repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
Goamaral/SCC | inputWindow.py | 1 | 31922 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'inputWindow.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(708, 428)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.List = QtGui.QVBoxLayout()
self.List.setObjectName(_fromUtf8("List"))
self.listItem_3 = QtGui.QWidget(self.centralwidget)
self.listItem_3.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_3.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_3.setObjectName(_fromUtf8("listItem_3"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.listItem_3)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.nameLabel_3 = QtGui.QLabel(self.listItem_3)
self.nameLabel_3.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_3.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_3.setFont(font)
self.nameLabel_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_3.setObjectName(_fromUtf8("nameLabel_3"))
self.horizontalLayout_5.addWidget(self.nameLabel_3)
self.nameLabel_27 = QtGui.QLabel(self.listItem_3)
self.nameLabel_27.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_27.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_27.setFont(font)
self.nameLabel_27.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_27.setObjectName(_fromUtf8("nameLabel_27"))
self.horizontalLayout_5.addWidget(self.nameLabel_27)
self.mediaChegadaA = QtGui.QLineEdit(self.listItem_3)
self.mediaChegadaA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaChegadaA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaChegadaA.setText(_fromUtf8(""))
self.mediaChegadaA.setObjectName(_fromUtf8("mediaChegadaA"))
self.horizontalLayout_5.addWidget(self.mediaChegadaA)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.List.addWidget(self.listItem_3)
self.listItem_6 = QtGui.QWidget(self.centralwidget)
self.listItem_6.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_6.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_6.setObjectName(_fromUtf8("listItem_6"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.listItem_6)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.nameLabel_7 = QtGui.QLabel(self.listItem_6)
self.nameLabel_7.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_7.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_7.setFont(font)
self.nameLabel_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_7.setObjectName(_fromUtf8("nameLabel_7"))
self.horizontalLayout_7.addWidget(self.nameLabel_7)
self.nameLabel_8 = QtGui.QLabel(self.listItem_6)
self.nameLabel_8.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_8.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_8.setFont(font)
self.nameLabel_8.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_8.setObjectName(_fromUtf8("nameLabel_8"))
self.horizontalLayout_7.addWidget(self.nameLabel_8)
self.mediaPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.mediaPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoA.setText(_fromUtf8(""))
self.mediaPerfuracaoA.setObjectName(_fromUtf8("mediaPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.mediaPerfuracaoA)
self.nameLabel_9 = QtGui.QLabel(self.listItem_6)
self.nameLabel_9.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_9.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_9.setFont(font)
self.nameLabel_9.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_9.setObjectName(_fromUtf8("nameLabel_9"))
self.horizontalLayout_7.addWidget(self.nameLabel_9)
self.desvioPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.desvioPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoA.setText(_fromUtf8(""))
self.desvioPerfuracaoA.setObjectName(_fromUtf8("desvioPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.desvioPerfuracaoA)
self.nameLabel_10 = QtGui.QLabel(self.listItem_6)
self.nameLabel_10.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_10.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_10.setFont(font)
self.nameLabel_10.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_10.setObjectName(_fromUtf8("nameLabel_10"))
self.horizontalLayout_7.addWidget(self.nameLabel_10)
self.nMaquinasPerfuracaoA = QtGui.QLineEdit(self.listItem_6)
self.nMaquinasPerfuracaoA.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoA.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoA.setText(_fromUtf8(""))
self.nMaquinasPerfuracaoA.setObjectName(_fromUtf8("nMaquinasPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.nMaquinasPerfuracaoA)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.List.addWidget(self.listItem_6)
self.listItem_7 = QtGui.QWidget(self.centralwidget)
self.listItem_7.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_7.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_7.setObjectName(_fromUtf8("listItem_7"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.listItem_7)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.nameLabel_11 = QtGui.QLabel(self.listItem_7)
self.nameLabel_11.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_11.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_11.setFont(font)
self.nameLabel_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_11.setObjectName(_fromUtf8("nameLabel_11"))
self.horizontalLayout_8.addWidget(self.nameLabel_11)
self.nameLabel_12 = QtGui.QLabel(self.listItem_7)
self.nameLabel_12.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_12.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_12.setFont(font)
self.nameLabel_12.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_12.setObjectName(_fromUtf8("nameLabel_12"))
self.horizontalLayout_8.addWidget(self.nameLabel_12)
self.mediaPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.mediaPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPolimentoA.setText(_fromUtf8(""))
self.mediaPolimentoA.setObjectName(_fromUtf8("mediaPolimentoA"))
self.horizontalLayout_8.addWidget(self.mediaPolimentoA)
self.nameLabel_13 = QtGui.QLabel(self.listItem_7)
self.nameLabel_13.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_13.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_13.setFont(font)
self.nameLabel_13.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_13.setObjectName(_fromUtf8("nameLabel_13"))
self.horizontalLayout_8.addWidget(self.nameLabel_13)
self.desvioPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.desvioPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPolimentoA.setText(_fromUtf8(""))
self.desvioPolimentoA.setObjectName(_fromUtf8("desvioPolimentoA"))
self.horizontalLayout_8.addWidget(self.desvioPolimentoA)
self.nameLabel_14 = QtGui.QLabel(self.listItem_7)
self.nameLabel_14.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_14.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_14.setFont(font)
self.nameLabel_14.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_14.setObjectName(_fromUtf8("nameLabel_14"))
self.horizontalLayout_8.addWidget(self.nameLabel_14)
self.nMaquinasPolimentoA = QtGui.QLineEdit(self.listItem_7)
self.nMaquinasPolimentoA.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoA.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoA.setText(_fromUtf8(""))
self.nMaquinasPolimentoA.setObjectName(_fromUtf8("nMaquinasPolimentoA"))
self.horizontalLayout_8.addWidget(self.nMaquinasPolimentoA)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.List.addWidget(self.listItem_7)
self.line_2 = QtGui.QFrame(self.centralwidget)
self.line_2.setMinimumSize(QtCore.QSize(5, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.line_2.setFont(font)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.List.addWidget(self.line_2)
self.listItem_4 = QtGui.QWidget(self.centralwidget)
self.listItem_4.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_4.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_4.setObjectName(_fromUtf8("listItem_4"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.listItem_4)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.nameLabel_4 = QtGui.QLabel(self.listItem_4)
self.nameLabel_4.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_4.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_4.setFont(font)
self.nameLabel_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_4.setObjectName(_fromUtf8("nameLabel_4"))
self.horizontalLayout_6.addWidget(self.nameLabel_4)
self.nameLabel_31 = QtGui.QLabel(self.listItem_4)
self.nameLabel_31.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_31.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_31.setFont(font)
self.nameLabel_31.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_31.setObjectName(_fromUtf8("nameLabel_31"))
self.horizontalLayout_6.addWidget(self.nameLabel_31)
self.mediaChegadaB = QtGui.QLineEdit(self.listItem_4)
self.mediaChegadaB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaChegadaB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaChegadaB.setText(_fromUtf8(""))
self.mediaChegadaB.setObjectName(_fromUtf8("mediaChegadaB"))
self.horizontalLayout_6.addWidget(self.mediaChegadaB)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem3)
self.List.addWidget(self.listItem_4)
self.listItem_9 = QtGui.QWidget(self.centralwidget)
self.listItem_9.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_9.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_9.setObjectName(_fromUtf8("listItem_9"))
self.horizontalLayout_13 = QtGui.QHBoxLayout(self.listItem_9)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.nameLabel_36 = QtGui.QLabel(self.listItem_9)
self.nameLabel_36.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_36.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_36.setFont(font)
self.nameLabel_36.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_36.setObjectName(_fromUtf8("nameLabel_36"))
self.horizontalLayout_13.addWidget(self.nameLabel_36)
self.nameLabel_37 = QtGui.QLabel(self.listItem_9)
self.nameLabel_37.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_37.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_37.setFont(font)
self.nameLabel_37.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_37.setObjectName(_fromUtf8("nameLabel_37"))
self.horizontalLayout_13.addWidget(self.nameLabel_37)
self.mediaPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.mediaPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPerfuracaoB.setText(_fromUtf8(""))
self.mediaPerfuracaoB.setObjectName(_fromUtf8("mediaPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.mediaPerfuracaoB)
self.nameLabel_38 = QtGui.QLabel(self.listItem_9)
self.nameLabel_38.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_38.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_38.setFont(font)
self.nameLabel_38.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_38.setObjectName(_fromUtf8("nameLabel_38"))
self.horizontalLayout_13.addWidget(self.nameLabel_38)
self.desvioPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.desvioPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPerfuracaoB.setText(_fromUtf8(""))
self.desvioPerfuracaoB.setObjectName(_fromUtf8("desvioPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.desvioPerfuracaoB)
self.nameLabel_39 = QtGui.QLabel(self.listItem_9)
self.nameLabel_39.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_39.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_39.setFont(font)
self.nameLabel_39.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_39.setObjectName(_fromUtf8("nameLabel_39"))
self.horizontalLayout_13.addWidget(self.nameLabel_39)
self.nMaquinasPerfuracaoB = QtGui.QLineEdit(self.listItem_9)
self.nMaquinasPerfuracaoB.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoB.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPerfuracaoB.setText(_fromUtf8(""))
self.nMaquinasPerfuracaoB.setObjectName(_fromUtf8("nMaquinasPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.nMaquinasPerfuracaoB)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem4)
self.List.addWidget(self.listItem_9)
self.listItem_8 = QtGui.QWidget(self.centralwidget)
self.listItem_8.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_8.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_8.setObjectName(_fromUtf8("listItem_8"))
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.listItem_8)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.nameLabel_19 = QtGui.QLabel(self.listItem_8)
self.nameLabel_19.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_19.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_19.setFont(font)
self.nameLabel_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_19.setObjectName(_fromUtf8("nameLabel_19"))
self.horizontalLayout_10.addWidget(self.nameLabel_19)
self.nameLabel_20 = QtGui.QLabel(self.listItem_8)
self.nameLabel_20.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_20.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_20.setFont(font)
self.nameLabel_20.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_20.setObjectName(_fromUtf8("nameLabel_20"))
self.horizontalLayout_10.addWidget(self.nameLabel_20)
self.mediaPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.mediaPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.mediaPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.mediaPolimentoB.setText(_fromUtf8(""))
self.mediaPolimentoB.setObjectName(_fromUtf8("mediaPolimentoB"))
self.horizontalLayout_10.addWidget(self.mediaPolimentoB)
self.nameLabel_21 = QtGui.QLabel(self.listItem_8)
self.nameLabel_21.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_21.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_21.setFont(font)
self.nameLabel_21.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_21.setObjectName(_fromUtf8("nameLabel_21"))
self.horizontalLayout_10.addWidget(self.nameLabel_21)
self.desvioPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.desvioPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.desvioPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.desvioPolimentoB.setText(_fromUtf8(""))
self.desvioPolimentoB.setObjectName(_fromUtf8("desvioPolimentoB"))
self.horizontalLayout_10.addWidget(self.desvioPolimentoB)
self.nameLabel_22 = QtGui.QLabel(self.listItem_8)
self.nameLabel_22.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_22.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_22.setFont(font)
self.nameLabel_22.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_22.setObjectName(_fromUtf8("nameLabel_22"))
self.horizontalLayout_10.addWidget(self.nameLabel_22)
self.nMaquinasPolimentoB = QtGui.QLineEdit(self.listItem_8)
self.nMaquinasPolimentoB.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoB.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasPolimentoB.setText(_fromUtf8(""))
self.nMaquinasPolimentoB.setObjectName(_fromUtf8("nMaquinasPolimentoB"))
self.horizontalLayout_10.addWidget(self.nMaquinasPolimentoB)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem5)
self.List.addWidget(self.listItem_8)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setMinimumSize(QtCore.QSize(0, 5))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.List.addWidget(self.line)
self.listItem_11 = QtGui.QWidget(self.centralwidget)
self.listItem_11.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_11.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_11.setObjectName(_fromUtf8("listItem_11"))
self.horizontalLayout_12 = QtGui.QHBoxLayout(self.listItem_11)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.nameLabel_23 = QtGui.QLabel(self.listItem_11)
self.nameLabel_23.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_23.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_23.setFont(font)
self.nameLabel_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_23.setObjectName(_fromUtf8("nameLabel_23"))
self.horizontalLayout_12.addWidget(self.nameLabel_23)
self.nameLabel_24 = QtGui.QLabel(self.listItem_11)
self.nameLabel_24.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_24.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_24.setFont(font)
self.nameLabel_24.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_24.setObjectName(_fromUtf8("nameLabel_24"))
self.horizontalLayout_12.addWidget(self.nameLabel_24)
self.mediaEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.mediaEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.mediaEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.mediaEnvernizamento.setText(_fromUtf8(""))
self.mediaEnvernizamento.setObjectName(_fromUtf8("mediaEnvernizamento"))
self.horizontalLayout_12.addWidget(self.mediaEnvernizamento)
self.nameLabel_25 = QtGui.QLabel(self.listItem_11)
self.nameLabel_25.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_25.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_25.setFont(font)
self.nameLabel_25.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_25.setObjectName(_fromUtf8("nameLabel_25"))
self.horizontalLayout_12.addWidget(self.nameLabel_25)
self.desvioEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.desvioEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.desvioEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.desvioEnvernizamento.setText(_fromUtf8(""))
self.desvioEnvernizamento.setObjectName(_fromUtf8("desvioEnvernizamento"))
self.horizontalLayout_12.addWidget(self.desvioEnvernizamento)
self.nameLabel_26 = QtGui.QLabel(self.listItem_11)
self.nameLabel_26.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_26.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_26.setFont(font)
self.nameLabel_26.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_26.setObjectName(_fromUtf8("nameLabel_26"))
self.horizontalLayout_12.addWidget(self.nameLabel_26)
self.nMaquinasEnvernizamento = QtGui.QLineEdit(self.listItem_11)
self.nMaquinasEnvernizamento.setMinimumSize(QtCore.QSize(50, 25))
self.nMaquinasEnvernizamento.setMaximumSize(QtCore.QSize(50, 25))
self.nMaquinasEnvernizamento.setText(_fromUtf8(""))
self.nMaquinasEnvernizamento.setObjectName(_fromUtf8("nMaquinasEnvernizamento"))
self.horizontalLayout_12.addWidget(self.nMaquinasEnvernizamento)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem6)
self.List.addWidget(self.listItem_11)
self.verticalLayout_4.addLayout(self.List)
self.footer = QtGui.QWidget(self.centralwidget)
self.footer.setMaximumSize(QtCore.QSize(100000, 50))
self.footer.setObjectName(_fromUtf8("footer"))
self.horizontalLayout = QtGui.QHBoxLayout(self.footer)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.nameLabel_30 = QtGui.QLabel(self.footer)
self.nameLabel_30.setMinimumSize(QtCore.QSize(130, 0))
self.nameLabel_30.setMaximumSize(QtCore.QSize(130, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_30.setFont(font)
self.nameLabel_30.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_30.setObjectName(_fromUtf8("nameLabel_30"))
self.horizontalLayout.addWidget(self.nameLabel_30)
self.tipoLimite = QtGui.QComboBox(self.footer)
self.tipoLimite.setMinimumSize(QtCore.QSize(125, 0))
self.tipoLimite.setMaximumSize(QtCore.QSize(125, 16777215))
self.tipoLimite.setObjectName(_fromUtf8("tipoLimite"))
self.tipoLimite.addItem(_fromUtf8(""))
self.tipoLimite.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.tipoLimite)
self.nameLabel_28 = QtGui.QLabel(self.footer)
self.nameLabel_28.setMinimumSize(QtCore.QSize(50, 0))
self.nameLabel_28.setMaximumSize(QtCore.QSize(50, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_28.setFont(font)
self.nameLabel_28.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_28.setObjectName(_fromUtf8("nameLabel_28"))
self.horizontalLayout.addWidget(self.nameLabel_28)
self.valorLimite = QtGui.QLineEdit(self.footer)
self.valorLimite.setMinimumSize(QtCore.QSize(75, 25))
self.valorLimite.setMaximumSize(QtCore.QSize(75, 25))
self.valorLimite.setText(_fromUtf8(""))
self.valorLimite.setObjectName(_fromUtf8("valorLimite"))
self.horizontalLayout.addWidget(self.valorLimite)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem7)
self.nameLabel_29 = QtGui.QLabel(self.footer)
self.nameLabel_29.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_29.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_29.setFont(font)
self.nameLabel_29.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_29.setObjectName(_fromUtf8("nameLabel_29"))
self.horizontalLayout.addWidget(self.nameLabel_29)
self.nRepeticoes = QtGui.QLineEdit(self.footer)
self.nRepeticoes.setMinimumSize(QtCore.QSize(50, 25))
self.nRepeticoes.setMaximumSize(QtCore.QSize(50, 25))
self.nRepeticoes.setText(_fromUtf8(""))
self.nRepeticoes.setObjectName(_fromUtf8("nRepeticoes"))
self.horizontalLayout.addWidget(self.nRepeticoes)
self.botaoSimular = QtGui.QPushButton(self.footer)
self.botaoSimular.setMinimumSize(QtCore.QSize(100, 25))
self.botaoSimular.setMaximumSize(QtCore.QSize(100, 25))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.botaoSimular.setFont(font)
self.botaoSimular.setLayoutDirection(QtCore.Qt.RightToLeft)
self.botaoSimular.setAutoFillBackground(False)
self.botaoSimular.setStyleSheet(_fromUtf8(""))
self.botaoSimular.setFlat(False)
self.botaoSimular.setObjectName(_fromUtf8("botaoSimular"))
self.horizontalLayout.addWidget(self.botaoSimular)
self.verticalLayout_4.addWidget(self.footer)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Descriçao da simulaçao", None))
self.nameLabel_3.setText(_translate("MainWindow", "Peças grandes (A)", None))
self.nameLabel_27.setText(_translate("MainWindow", "Media chegada", None))
self.nameLabel_7.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_8.setText(_translate("MainWindow", "Media", None))
self.nameLabel_9.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_10.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_11.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_12.setText(_translate("MainWindow", "Media", None))
self.nameLabel_13.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_14.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_4.setText(_translate("MainWindow", "Peças grandes (B)", None))
self.nameLabel_31.setText(_translate("MainWindow", "Media chegada", None))
self.nameLabel_36.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_37.setText(_translate("MainWindow", "Media", None))
self.nameLabel_38.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_39.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_19.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_20.setText(_translate("MainWindow", "Media", None))
self.nameLabel_21.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_22.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_23.setText(_translate("MainWindow", "Envernizamento", None))
self.nameLabel_24.setText(_translate("MainWindow", "Media", None))
self.nameLabel_25.setText(_translate("MainWindow", "Desvio padrao", None))
self.nameLabel_26.setText(_translate("MainWindow", "Nº maquinas", None))
self.nameLabel_30.setText(_translate("MainWindow", "Limites da simulacao", None))
self.tipoLimite.setItemText(0, _translate("MainWindow", "Tempo simulacao", None))
self.tipoLimite.setItemText(1, _translate("MainWindow", "Nº Clientes", None))
self.nameLabel_28.setText(_translate("MainWindow", "Valor", None))
self.nameLabel_29.setText(_translate("MainWindow", "Nº Repeticoes", None))
self.botaoSimular.setText(_translate("MainWindow", "Simular", None))
| mit | 6,655,440,768,663,493,000 | -5,365,176,729,364,893,000 | 55.576241 | 105 | 0.701934 | false |
kirca/odoo | addons/point_of_sale/wizard/pos_session_opening.py | 40 | 5025 |
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.addons.point_of_sale.point_of_sale import pos_session
class pos_session_opening(osv.osv_memory):
_name = 'pos.session.opening'
_columns = {
'pos_config_id' : fields.many2one('pos.config', 'Point of Sale', required=True),
'pos_session_id' : fields.many2one('pos.session', 'PoS Session'),
'pos_state' : fields.related('pos_session_id', 'state',
type='selection',
selection=pos_session.POS_SESSION_STATE,
string='Session Status', readonly=True),
'pos_state_str' : fields.char('Status', 32, readonly=True),
'show_config' : fields.boolean('Show Config', readonly=True),
'pos_session_name' : fields.related('pos_session_id', 'name',
type='char', size=64, readonly=True),
'pos_session_username' : fields.related('pos_session_id', 'user_id', 'name',
type='char', size=64, readonly=True)
}
def open_ui(self, cr, uid, ids, context=None):
context = context or {}
data = self.browse(cr, uid, ids[0], context=context)
context['active_id'] = data.pos_session_id.id
return {
'type' : 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
def open_existing_session_cb_close(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
self.pool.get('pos.session').signal_cashbox_control(cr, uid, [wizard.pos_session_id.id])
return self.open_session_cb(cr, uid, ids, context)
def open_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1, "you can open only one session at a time"
proxy = self.pool.get('pos.session')
wizard = self.browse(cr, uid, ids[0], context=context)
if not wizard.pos_session_id:
values = {
'user_id' : uid,
'config_id' : wizard.pos_config_id.id,
}
session_id = proxy.create(cr, uid, values, context=context)
s = proxy.browse(cr, uid, session_id, context=context)
if s.state=='opened':
return self.open_ui(cr, uid, ids, context=context)
return self._open_session(session_id)
return self._open_session(wizard.pos_session_id.id)
def open_existing_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1
wizard = self.browse(cr, uid, ids[0], context=context)
return self._open_session(wizard.pos_session_id.id)
def _open_session(self, session_id):
return {
'name': _('Session'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'pos.session',
'res_id': session_id,
'view_id': False,
'type': 'ir.actions.act_window',
}
def on_change_config(self, cr, uid, ids, config_id, context=None):
result = {
'pos_session_id': False,
'pos_state': False,
'pos_state_str' : '',
'pos_session_username' : False,
'pos_session_name' : False,
}
if not config_id:
return {'value' : result}
proxy = self.pool.get('pos.session')
session_ids = proxy.search(cr, uid, [
('state', '!=', 'closed'),
('config_id', '=', config_id),
('user_id', '=', uid),
], context=context)
if session_ids:
session = proxy.browse(cr, uid, session_ids[0], context=context)
result['pos_state'] = str(session.state)
result['pos_state_str'] = dict(pos_session.POS_SESSION_STATE).get(session.state, '')
result['pos_session_id'] = session.id
result['pos_session_name'] = session.name
result['pos_session_username'] = session.user_id.name
return {'value' : result}
def default_get(self, cr, uid, fieldnames, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','<>','closed'), ('user_id','=',uid)], context=context)
if session_ids:
result = so.browse(cr, uid, session_ids[0], context=context).config_id.id
else:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
result = current_user.pos_config and current_user.pos_config.id or False
if not result:
r = self.pool.get('pos.config').search(cr, uid, [], context=context)
result = r and r[0] or False
count = self.pool.get('pos.config').search_count(cr, uid, [('state', '=', 'active')], context=context)
show_config = bool(count > 1)
return {
'pos_config_id' : result,
'show_config' : show_config,
}
| agpl-3.0 | -5,262,621,265,106,220,000 | 5,510,088,865,381,394,000 | 42.318966 | 110 | 0.54408 | false |
Product-Foundry/vaultier | vaultier/nodes/business/permissions.py | 3 | 1731 | from rest_framework import permissions
from accounts.models import Member
def _has_membership(user, node):
return Member.objects.filter(node=node.get_root(), user=user).exists()
def _get_membership(user, node):
if not _has_membership(user, node):
return
return Member.objects.to_node(user, node)
class NodePermission(permissions.BasePermission):
"""
Prepared permissions for Nodes. Returning True only at this point.
"""
def has_permission(self, request, view):
"""
Grant permission
"""
parent = view.kwargs.get('parent') or view.kwargs.get('node')
if request.method == "GET" and parent:
member = _get_membership(request.user, parent)
if not member:
return
return parent.acl.has_permission('read', member)
return True
def has_object_permission(self, request, view, obj):
"""
Grant object permission
"""
member = _get_membership(request.user, obj)
if not member:
return
if request.method == "GET":
return obj.acl.has_permission('read', member)
if request.method in ('PUT', 'PATCH', 'DELETE'):
return obj.acl.has_permission('update', member)
class PolicyPermission(NodePermission):
def has_object_permission(self, request, view, obj):
node = view.kwargs.get('node')
member = _get_membership(request.user, obj)
if not member:
return
if view.action == "retrieve":
return node.acl.has_permission('read', member)
if view.action in ('update', 'partial_update'):
return node.acl.has_permission('update', member)
| bsd-3-clause | -8,660,251,953,157,208,000 | -6,159,527,964,268,803,000 | 28.338983 | 74 | 0.608319 | false |
ennoborg/gramps | gramps/gen/plug/_pluginreg.py | 2 | 47851 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the base class for plugin registration.
It provides an object containing data about the plugin (version, filename, ...)
and a register for the data of all plugins .
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import re
import traceback
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...version import VERSION as GRAMPSVERSION, VERSION_TUPLE
from ..const import IMAGE_DIR
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import logging
LOG = logging.getLogger('._manager')
#-------------------------------------------------------------------------
#
# PluginData
#
#-------------------------------------------------------------------------
#a plugin is stable or unstable
STABLE = 0
UNSTABLE = 1
STATUS = [STABLE, UNSTABLE]
STATUSTEXT = {STABLE: _('Stable'), UNSTABLE: _('Unstable')}
#possible plugin types
REPORT = 0
QUICKREPORT = 1 # deprecated
QUICKVIEW = 1
TOOL = 2
IMPORT = 3
EXPORT = 4
DOCGEN = 5
GENERAL = 6
MAPSERVICE = 7
VIEW = 8
RELCALC = 9
GRAMPLET = 10
SIDEBAR = 11
DATABASE = 12
PTYPE = [REPORT , QUICKREPORT, TOOL, IMPORT, EXPORT, DOCGEN, GENERAL,
MAPSERVICE, VIEW, RELCALC, GRAMPLET, SIDEBAR, DATABASE]
PTYPE_STR = {
REPORT: _('Report') ,
QUICKREPORT: _('Quickreport'),
TOOL: _('Tool'),
IMPORT: _('Importer'),
EXPORT: _('Exporter'),
DOCGEN: _('Doc creator'),
GENERAL: _('Plugin lib'),
MAPSERVICE: _('Map service'),
VIEW: _('Gramps View'),
RELCALC: _('Relationships'),
GRAMPLET: _('Gramplet'),
SIDEBAR: _('Sidebar'),
DATABASE: _('Database'),
}
#possible report categories
CATEGORY_TEXT = 0
CATEGORY_DRAW = 1
CATEGORY_CODE = 2
CATEGORY_WEB = 3
CATEGORY_BOOK = 4
CATEGORY_GRAPHVIZ = 5
REPORT_CAT = [ CATEGORY_TEXT, CATEGORY_DRAW, CATEGORY_CODE,
CATEGORY_WEB, CATEGORY_BOOK, CATEGORY_GRAPHVIZ]
#possible tool categories
TOOL_DEBUG = -1
TOOL_ANAL = 0
TOOL_DBPROC = 1
TOOL_DBFIX = 2
TOOL_REVCTL = 3
TOOL_UTILS = 4
TOOL_CAT = [ TOOL_DEBUG, TOOL_ANAL, TOOL_DBPROC, TOOL_DBFIX, TOOL_REVCTL,
TOOL_UTILS]
#possible quickreport categories
CATEGORY_QR_MISC = -1
CATEGORY_QR_PERSON = 0
CATEGORY_QR_FAMILY = 1
CATEGORY_QR_EVENT = 2
CATEGORY_QR_SOURCE = 3
CATEGORY_QR_PLACE = 4
CATEGORY_QR_REPOSITORY = 5
CATEGORY_QR_NOTE = 6
CATEGORY_QR_DATE = 7
CATEGORY_QR_MEDIA = 8
CATEGORY_QR_CITATION = 9
CATEGORY_QR_SOURCE_OR_CITATION = 10
# Modes for generating reports
REPORT_MODE_GUI = 1 # Standalone report using GUI
REPORT_MODE_BKI = 2 # Book Item interface using GUI
REPORT_MODE_CLI = 4 # Command line interface (CLI)
REPORT_MODES = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
# Modes for running tools
TOOL_MODE_GUI = 1 # Standard tool using GUI
TOOL_MODE_CLI = 2 # Command line interface (CLI)
TOOL_MODES = [TOOL_MODE_GUI, TOOL_MODE_CLI]
# possible view orders
START = 1
END = 2
#-------------------------------------------------------------------------
#
# Functions and classes
#
#-------------------------------------------------------------------------
def myint(s):
"""
Protected version of int()
"""
try:
v = int(s)
except:
v = s
return v
def version(sversion):
"""
Return the tuple version of a string version.
"""
return tuple([myint(x or "0") for x in (sversion + "..").split(".")])
def valid_plugin_version(plugin_version_string):
"""
Checks to see if string is a valid version string for this version
of Gramps.
"""
if not isinstance(plugin_version_string, str): return False
dots = plugin_version_string.count(".")
if dots == 1:
plugin_version = tuple(map(int, plugin_version_string.split(".", 1)))
return plugin_version == VERSION_TUPLE[:2]
elif dots == 2:
plugin_version = tuple(map(int, plugin_version_string.split(".", 2)))
return (plugin_version[:2] == VERSION_TUPLE[:2] and
plugin_version <= VERSION_TUPLE)
return False
class PluginData:
"""
This is the base class for all plugin data objects.
The workflow is:
1. plugin manager reads all register files, and stores plugin data
objects in a plugin register
2. when plugin is needed, the plugin register creates the plugin, and
the manager stores this, after which it can be executed.
Attributes present for all plugins
.. attribute:: id
A unique identifier for the plugin. This is eg used to store the plugin
settings.
.. attribute:: name
A friendly name to call this plugin (normally translated)
.. attribute:: name_accell
A friendly name to call this plugin (normally translated), with an
accellerator present (eg '_Descendant report', with D to be accellerator
key
.. attribute:: description
A friendly description of what the plugin does
.. attribute:: version
The version of the plugin
.. attribute:: status
The status of the plugin, STABLE or UNSTABLE
UNSTABLE is only visible in development code, not in release
.. attribute:: fname
The python file where the plugin implementation can be found
.. attribute:: fpath
The python path where the plugin implementation can be found
.. attribute:: ptype
The plugin type. One of REPORT , QUICKREPORT, TOOL, IMPORT,
EXPORT, DOCGEN, GENERAL, MAPSERVICE, VIEW, GRAMPLET, DATABASE
.. attribute:: authors
List of authors of the plugin, default=[]
.. attribute:: authors_email
List of emails of the authors of the plugin, default=[]
.. attribute:: supported
Bool value indicating if the plugin is still supported, default=True
.. attribute:: load_on_reg
bool value, if True, the plugin is loaded on Gramps startup. Some
plugins. Only set this value if for testing you want the plugin to be
loaded immediately on startup. default=False
.. attribute: icons
New stock icons to register. A list of tuples (stock_id, icon_label),
eg:
[('gramps_myplugin', _('My Plugin')),
('gramps_myplugin_open', _('Open Plugin')]
The icon directory must contain the directories scalable, 48x48, 22x22
and 16x16 with the icons, eg:
scalable/gramps_myplugin.svg
48x48/gramps_myplugin.png
22x22/gramps_myplugin.png
.. attribute: icondir
The directory to use for the icons. If icondir is not set or None, it
reverts to the plugindirectory itself.
Attributes for RELCALC plugins:
.. attribute:: relcalcclass
The class in the module that is the relationcalc class
.. attribute:: lang_list
List of languages this plugin handles
Attributes for REPORT plugins:
.. attribute:: require_active
Bool, If the reports requries an active person to be set or not
.. attribute:: reportclass
The class in the module that is the report class
.. attribute:: report_modes
The report modes: list of REPORT_MODE_GUI ,REPORT_MODE_BKI,REPORT_MODE_CLI
Attributes for REPORT and TOOL and QUICKREPORT and VIEW plugins
.. attribute:: category
Or the report category the plugin belongs to, default=CATEGORY_TEXT
or the tool category a plugin belongs to, default=TOOL_UTILS
or the quickreport category a plugin belongs to, default=CATEGORY_QR_PERSON
or the view category a plugin belongs to,
default=("Miscellaneous", _("Miscellaneous"))
Attributes for REPORT and TOOL and DOCGEN plugins
.. attribute:: optionclass
The class in the module that is the option class
Attributes for TOOL plugins
.. attribute:: toolclass
The class in the module that is the tool class
.. attribute:: tool_modes
The tool modes: list of TOOL_MODE_GUI, TOOL_MODE_CLI
Attributes for DOCGEN plugins
.. attribute :: docclass
The class in the module that is the BaseDoc defined
.. attribute :: paper
bool, Indicates whether the plugin uses paper or not, default=True
.. attribute :: style
bool, Indicates whether the plugin uses styles or not, default=True
Attribute for DOCGEN, EXPORT plugins
.. attribute :: extension
str, The file extension to use for output produced by the docgen/export,
default=''
Attributes for QUICKREPORT plugins
.. attribute:: runfunc
The function that executes the quick report
Attributes for MAPSERVICE plugins
.. attribute:: mapservice
The class in the module that is a mapservice
Attributes for EXPORT plugins
.. attribute:: export_function
Function that produces the export
.. attribute:: export_options
Class to set options
.. attribute:: export_options_title
Title for the option page
Attributes for IMPORT plugins
.. attribute:: import_function
Function that starts an import
Attributes for GRAMPLET plugins
.. attribute:: gramplet
The function or class that defines the gramplet.
.. attribute:: height
The height the gramplet should have in a column on GrampletView,
default = 200
.. attribute:: detached_height
The height the gramplet should have detached, default 300
.. attribute:: detached_width
The width the gramplet should have detached, default 400
.. attribute:: expand
If the attributed should be expanded on start, default False
.. attribute:: gramplet_title
Title to use for the gramplet, default = _('Gramplet')
.. attribute:: navtypes
Navigation types that the gramplet is appropriate for, default = []
.. attribute:: help_url
The URL where documentation for the URL can be found
Attributes for VIEW plugins
.. attribute:: viewclass
A class of type ViewCreator that holds the needed info of the
view to be created: icon, viewclass that derives from pageview, ...
.. attribute:: stock_icon
The icon in the toolbar or sidebar used to select the view
Attributes for SIDEBAR plugins
.. attribute:: sidebarclass
The class that defines the sidebar.
.. attribute:: menu_label
A label to use on the seltion menu.
Attributes for VIEW and SIDEBAR plugins
.. attribute:: order
order can be START or END. Default is END. For END, on registering,
the plugin is appended to the list of plugins. If START, then the
plugin is prepended. Only set START if you want a plugin to be the
first in the order of plugins
Attributes for DATABASE plugins
.. attribute:: databaseclass
The class in the module that is the database class
.. attribute:: reset_system
Boolean to indicate that the system (sys.modules) should
be reset.
"""
def __init__(self):
#read/write attribute
self.directory = None
#base attributes
self._id = None
self._name = None
self._name_accell = None
self._version = None
self._gramps_target_version = None
self._description = None
self._status = UNSTABLE
self._fname = None
self._fpath = None
self._ptype = None
self._authors = []
self._authors_email = []
self._supported = True
self._load_on_reg = False
self._icons = []
self._icondir = None
self._depends_on = []
self._include_in_listing = True
#derived var
self.mod_name = None
#RELCALC attr
self._relcalcclass = None
self._lang_list = None
#REPORT attr
self._reportclass = None
self._require_active = True
self._report_modes = [REPORT_MODE_GUI]
#REPORT and TOOL and GENERAL attr
self._category = None
#REPORT and TOOL attr
self._optionclass = None
#TOOL attr
self._toolclass = None
self._tool_modes = [TOOL_MODE_GUI]
#DOCGEN attr
self._paper = True
self._style = True
self._extension = ''
#QUICKREPORT attr
self._runfunc = None
#MAPSERVICE attr
self._mapservice = None
#EXPORT attr
self._export_function = None
self._export_options = None
self._export_options_title = ''
#IMPORT attr
self._import_function = None
#GRAMPLET attr
self._gramplet = None
self._height = 200
self._detached_height = 300
self._detached_width = 400
self._expand = False
self._gramplet_title = _('Gramplet')
self._navtypes = []
self._orientation = None
self._help_url = None
#VIEW attr
self._viewclass = None
self._stock_icon = None
#SIDEBAR attr
self._sidebarclass = None
self._menu_label = ''
#VIEW and SIDEBAR attr
self._order = END
#DATABASE attr
self._databaseclass = None
self._reset_system = False
#GENERAL attr
self._data = []
self._process = None
def _set_id(self, id):
self._id = id
def _get_id(self):
return self._id
def _set_name(self, name):
self._name = name
def _get_name(self):
return self._name
def _set_name_accell(self, name):
self._name_accell = name
def _get_name_accell(self):
if self._name_accell is None:
return self._name
else:
return self._name_accell
def _set_description(self, description):
self._description = description
def _get_description(self):
return self._description
def _set_version(self, version):
self._version = version
def _get_version(self):
return self._version
def _set_gramps_target_version(self, version):
self._gramps_target_version = version
def _get_gramps_target_version(self):
return self._gramps_target_version
def _set_status(self, status):
if status not in STATUS:
raise ValueError('plugin status cannot be %s' % str(status))
self._status = status
def _get_status(self):
return self._status
def _set_fname(self, fname):
self._fname = fname
def _get_fname(self):
return self._fname
def _set_fpath(self, fpath):
self._fpath = fpath
def _get_fpath(self):
return self._fpath
def _set_ptype(self, ptype):
if ptype not in PTYPE:
raise ValueError('Plugin type cannot be %s' % str(ptype))
elif self._ptype is not None:
raise ValueError('Plugin type may not be changed')
self._ptype = ptype
if self._ptype == REPORT:
self._category = CATEGORY_TEXT
elif self._ptype == TOOL:
self._category = TOOL_UTILS
elif self._ptype == QUICKREPORT:
self._category = CATEGORY_QR_PERSON
elif self._ptype == VIEW:
self._category = ("Miscellaneous", _("Miscellaneous"))
#if self._ptype == DOCGEN:
# self._load_on_reg = True
def _get_ptype(self):
return self._ptype
def _set_authors(self, authors):
if not authors or not isinstance(authors, list):
return
self._authors = authors
def _get_authors(self):
return self._authors
def _set_authors_email(self, authors_email):
if not authors_email or not isinstance(authors_email, list):
return
self._authors_email = authors_email
def _get_authors_email(self):
return self._authors_email
def _set_supported(self, supported):
if not isinstance(supported, bool):
raise ValueError('Plugin must have supported=True or False')
self._supported = supported
def _get_supported(self):
return self._supported
def _set_load_on_reg(self, load_on_reg):
if not isinstance(load_on_reg, bool):
raise ValueError('Plugin must have load_on_reg=True or False')
self._load_on_reg = load_on_reg
def _get_load_on_reg(self):
return self._load_on_reg
def _get_icons(self):
return self._icons
def _set_icons(self, icons):
if not isinstance(icons, list):
raise ValueError('Plugin must have icons as a list')
self._icons = icons
def _get_icondir(self):
return self._icondir
def _set_icondir(self, icondir):
self._icondir = icondir
def _get_depends_on(self):
return self._depends_on
def _set_depends_on(self, depends):
if not isinstance(depends, list):
raise ValueError('Plugin must have depends_on as a list')
self._depends_on = depends
def _get_include_in_listing(self):
return self._include_in_listing
def _set_include_in_listing(self, include):
if not isinstance(include, bool):
raise ValueError('Plugin must have include_in_listing as a bool')
self._include_in_listing = include
id = property(_get_id, _set_id)
name = property(_get_name, _set_name)
name_accell = property(_get_name_accell, _set_name_accell)
description = property(_get_description, _set_description)
version = property(_get_version, _set_version)
gramps_target_version = property(_get_gramps_target_version,
_set_gramps_target_version)
status = property(_get_status, _set_status)
fname = property(_get_fname, _set_fname)
fpath = property(_get_fpath, _set_fpath)
ptype = property(_get_ptype, _set_ptype)
authors = property(_get_authors, _set_authors)
authors_email = property(_get_authors_email, _set_authors_email)
supported = property(_get_supported, _set_supported)
load_on_reg = property(_get_load_on_reg, _set_load_on_reg)
icons = property(_get_icons, _set_icons)
icondir = property(_get_icondir, _set_icondir)
depends_on = property(_get_depends_on, _set_depends_on)
include_in_listing = property(_get_include_in_listing, _set_include_in_listing)
def statustext(self):
return STATUSTEXT[self.status]
#type specific plugin attributes
#RELCALC attributes
def _set_relcalcclass(self, relcalcclass):
if not self._ptype == RELCALC:
raise ValueError('relcalcclass may only be set for RELCALC plugins')
self._relcalcclass = relcalcclass
def _get_relcalcclass(self):
return self._relcalcclass
def _set_lang_list(self, lang_list):
if not self._ptype == RELCALC:
raise ValueError('relcalcclass may only be set for RELCALC plugins')
self._lang_list = lang_list
def _get_lang_list(self):
return self._lang_list
relcalcclass = property(_get_relcalcclass, _set_relcalcclass)
lang_list = property(_get_lang_list, _set_lang_list)
#REPORT attributes
def _set_require_active(self, require_active):
if not self._ptype == REPORT:
raise ValueError('require_active may only be set for REPORT plugins')
if not isinstance(require_active, bool):
raise ValueError('Report must have require_active=True or False')
self._require_active = require_active
def _get_require_active(self):
return self._require_active
def _set_reportclass(self, reportclass):
if not self._ptype == REPORT:
raise ValueError('reportclass may only be set for REPORT plugins')
self._reportclass = reportclass
def _get_reportclass(self):
return self._reportclass
def _set_report_modes(self, report_modes):
if not self._ptype == REPORT:
raise ValueError('report_modes may only be set for REPORT plugins')
if not isinstance(report_modes, list):
raise ValueError('report_modes must be a list')
self._report_modes = [x for x in report_modes if x in REPORT_MODES]
if not self._report_modes:
raise ValueError('report_modes not a valid list of modes')
def _get_report_modes(self):
return self._report_modes
#REPORT or TOOL or QUICKREPORT or GENERAL attributes
def _set_category(self, category):
if self._ptype not in [REPORT, TOOL, QUICKREPORT, VIEW, GENERAL]:
raise ValueError('category may only be set for ' \
'REPORT/TOOL/QUICKREPORT/VIEW/GENERAL plugins')
self._category = category
def _get_category(self):
return self._category
#REPORT OR TOOL attributes
def _set_optionclass(self, optionclass):
if not (self._ptype == REPORT or self.ptype == TOOL or self._ptype == DOCGEN):
raise ValueError('optionclass may only be set for REPORT/TOOL/DOCGEN plugins')
self._optionclass = optionclass
def _get_optionclass(self):
return self._optionclass
#TOOL attributes
def _set_toolclass(self, toolclass):
if not self._ptype == TOOL:
raise ValueError('toolclass may only be set for TOOL plugins')
self._toolclass = toolclass
def _get_toolclass(self):
return self._toolclass
def _set_tool_modes(self, tool_modes):
if not self._ptype == TOOL:
raise ValueError('tool_modes may only be set for TOOL plugins')
if not isinstance(tool_modes, list):
raise ValueError('tool_modes must be a list')
self._tool_modes = [x for x in tool_modes if x in TOOL_MODES]
if not self._tool_modes:
raise ValueError('tool_modes not a valid list of modes')
def _get_tool_modes(self):
return self._tool_modes
require_active = property(_get_require_active, _set_require_active)
reportclass = property(_get_reportclass, _set_reportclass)
report_modes = property(_get_report_modes, _set_report_modes)
category = property(_get_category, _set_category)
optionclass = property(_get_optionclass, _set_optionclass)
toolclass = property(_get_toolclass, _set_toolclass)
tool_modes = property(_get_tool_modes, _set_tool_modes)
#DOCGEN attributes
def _set_paper(self, paper):
if not self._ptype == DOCGEN:
raise ValueError('paper may only be set for DOCGEN plugins')
if not isinstance(paper, bool):
raise ValueError('Plugin must have paper=True or False')
self._paper = paper
def _get_paper(self):
return self._paper
def _set_style(self, style):
if not self._ptype == DOCGEN:
raise ValueError('style may only be set for DOCGEN plugins')
if not isinstance(style, bool):
raise ValueError('Plugin must have style=True or False')
self._style = style
def _get_style(self):
return self._style
def _set_extension(self, extension):
if not (self._ptype == DOCGEN or self._ptype == EXPORT
or self._ptype == IMPORT):
raise ValueError('extension may only be set for DOCGEN/EXPORT/'\
'IMPORT plugins')
self._extension = extension
def _get_extension(self):
return self._extension
paper = property(_get_paper, _set_paper)
style = property(_get_style, _set_style)
extension = property(_get_extension, _set_extension)
#QUICKREPORT attributes
def _set_runfunc(self, runfunc):
if not self._ptype == QUICKREPORT:
raise ValueError('runfunc may only be set for QUICKREPORT plugins')
self._runfunc = runfunc
def _get_runfunc(self):
return self._runfunc
runfunc = property(_get_runfunc, _set_runfunc)
#MAPSERVICE attributes
def _set_mapservice(self, mapservice):
if not self._ptype == MAPSERVICE:
raise ValueError('mapservice may only be set for MAPSERVICE plugins')
self._mapservice = mapservice
def _get_mapservice(self):
return self._mapservice
mapservice = property(_get_mapservice, _set_mapservice)
#EXPORT attributes
def _set_export_function(self, export_function):
if not self._ptype == EXPORT:
raise ValueError('export_function may only be set for EXPORT plugins')
self._export_function = export_function
def _get_export_function(self):
return self._export_function
def _set_export_options(self, export_options):
if not self._ptype == EXPORT:
raise ValueError('export_options may only be set for EXPORT plugins')
self._export_options = export_options
def _get_export_options(self):
return self._export_options
def _set_export_options_title(self, export_options_title):
if not self._ptype == EXPORT:
raise ValueError('export_options_title may only be set for EXPORT plugins')
self._export_options_title = export_options_title
def _get_export_options_title(self):
return self._export_options_title
export_function = property(_get_export_function, _set_export_function)
export_options = property(_get_export_options, _set_export_options)
export_options_title = property(_get_export_options_title,
_set_export_options_title)
#IMPORT attributes
def _set_import_function(self, import_function):
if not self._ptype == IMPORT:
raise ValueError('import_function may only be set for IMPORT plugins')
self._import_function = import_function
def _get_import_function(self):
return self._import_function
import_function = property(_get_import_function, _set_import_function)
#GRAMPLET attributes
def _set_gramplet(self, gramplet):
if not self._ptype == GRAMPLET:
raise ValueError('gramplet may only be set for GRAMPLET plugins')
self._gramplet = gramplet
def _get_gramplet(self):
return self._gramplet
def _set_height(self, height):
if not self._ptype == GRAMPLET:
raise ValueError('height may only be set for GRAMPLET plugins')
if not isinstance(height, int):
raise ValueError('Plugin must have height an integer')
self._height = height
def _get_height(self):
return self._height
def _set_detached_height(self, detached_height):
if not self._ptype == GRAMPLET:
raise ValueError('detached_height may only be set for GRAMPLET plugins')
if not isinstance(detached_height, int):
raise ValueError('Plugin must have detached_height an integer')
self._detached_height = detached_height
def _get_detached_height(self):
return self._detached_height
def _set_detached_width(self, detached_width):
if not self._ptype == GRAMPLET:
raise ValueError('detached_width may only be set for GRAMPLET plugins')
if not isinstance(detached_width, int):
raise ValueError('Plugin must have detached_width an integer')
self._detached_width = detached_width
def _get_detached_width(self):
return self._detached_width
def _set_expand(self, expand):
if not self._ptype == GRAMPLET:
raise ValueError('expand may only be set for GRAMPLET plugins')
if not isinstance(expand, bool):
raise ValueError('Plugin must have expand as a bool')
self._expand = expand
def _get_expand(self):
return self._expand
def _set_gramplet_title(self, gramplet_title):
if not self._ptype == GRAMPLET:
raise ValueError('gramplet_title may only be set for GRAMPLET plugins')
if not isinstance(gramplet_title, str):
raise ValueError('gramplet_title is type %s, string or unicode required' % type(gramplet_title))
self._gramplet_title = gramplet_title
def _get_gramplet_title(self):
return self._gramplet_title
def _set_help_url(self, help_url):
if not self._ptype == GRAMPLET:
raise ValueError('help_url may only be set for GRAMPLET plugins')
self._help_url = help_url
def _get_help_url(self):
return self._help_url
def _set_navtypes(self, navtypes):
if not self._ptype == GRAMPLET:
raise ValueError('navtypes may only be set for GRAMPLET plugins')
self._navtypes = navtypes
def _get_navtypes(self):
return self._navtypes
def _set_orientation(self, orientation):
if not self._ptype == GRAMPLET:
raise ValueError('orientation may only be set for GRAMPLET plugins')
self._orientation = orientation
def _get_orientation(self):
return self._orientation
gramplet = property(_get_gramplet, _set_gramplet)
height = property(_get_height, _set_height)
detached_height = property(_get_detached_height, _set_detached_height)
detached_width = property(_get_detached_width, _set_detached_width)
expand = property(_get_expand, _set_expand)
gramplet_title = property(_get_gramplet_title, _set_gramplet_title)
navtypes = property(_get_navtypes, _set_navtypes)
orientation = property(_get_orientation, _set_orientation)
help_url = property(_get_help_url, _set_help_url)
def _set_viewclass(self, viewclass):
if not self._ptype == VIEW:
raise ValueError('viewclass may only be set for VIEW plugins')
self._viewclass = viewclass
def _get_viewclass(self):
return self._viewclass
def _set_stock_icon(self, stock_icon):
if not self._ptype == VIEW:
raise ValueError('stock_icon may only be set for VIEW plugins')
self._stock_icon = stock_icon
def _get_stock_icon(self):
return self._stock_icon
viewclass = property(_get_viewclass, _set_viewclass)
stock_icon = property(_get_stock_icon, _set_stock_icon)
#SIDEBAR attributes
def _set_sidebarclass(self, sidebarclass):
if not self._ptype == SIDEBAR:
raise ValueError('sidebarclass may only be set for SIDEBAR plugins')
self._sidebarclass = sidebarclass
def _get_sidebarclass(self):
return self._sidebarclass
def _set_menu_label(self, menu_label):
if not self._ptype == SIDEBAR:
raise ValueError('menu_label may only be set for SIDEBAR plugins')
self._menu_label = menu_label
def _get_menu_label(self):
return self._menu_label
sidebarclass = property(_get_sidebarclass, _set_sidebarclass)
menu_label = property(_get_menu_label, _set_menu_label)
#VIEW and SIDEBAR attributes
def _set_order(self, order):
if not self._ptype in (VIEW, SIDEBAR):
raise ValueError('order may only be set for VIEW and SIDEBAR plugins')
self._order = order
def _get_order(self):
return self._order
order = property(_get_order, _set_order)
#DATABASE attributes
def _set_databaseclass(self, databaseclass):
if not self._ptype == DATABASE:
raise ValueError('databaseclass may only be set for DATABASE plugins')
self._databaseclass = databaseclass
def _get_databaseclass(self):
return self._databaseclass
def _set_reset_system(self, reset_system):
if not self._ptype == DATABASE:
raise ValueError('reset_system may only be set for DATABASE plugins')
self._reset_system = reset_system
def _get_reset_system(self):
return self._reset_system
databaseclass = property(_get_databaseclass, _set_databaseclass)
reset_system = property(_get_reset_system, _set_reset_system)
#GENERAL attr
def _set_data(self, data):
if not self._ptype in (GENERAL,):
raise ValueError('data may only be set for GENERAL plugins')
self._data = data
def _get_data(self):
return self._data
def _set_process(self, process):
if not self._ptype in (GENERAL,):
raise ValueError('process may only be set for GENERAL plugins')
self._process = process
def _get_process(self):
return self._process
data = property(_get_data, _set_data)
process = property(_get_process, _set_process)
def newplugin():
"""
Function to create a new plugindata object, add it to list of
registered plugins
:returns: a newly created PluginData which is already part of the register
"""
gpr = PluginRegister.get_instance()
pgd = PluginData()
gpr.add_plugindata(pgd)
return pgd
def register(ptype, **kwargs):
"""
Convenience function to register a new plugin using a dictionary as input.
The register functions will call newplugin() function, and use the
dictionary kwargs to assign data to the PluginData newplugin() created,
as in: plugindata.key = data
:param ptype: the plugin type, one of REPORT, TOOL, ...
:param kwargs: dictionary with keys attributes of the plugin, and data
the value
:returns: a newly created PluginData which is already part of the register
and which has kwargs assigned as attributes
"""
plg = newplugin()
plg.ptype = ptype
for prop in kwargs:
#check it is a valid attribute with getattr
getattr(plg, prop)
#set the value
setattr(plg, prop, kwargs[prop])
return plg
def make_environment(**kwargs):
env = {
'newplugin': newplugin,
'register': register,
'STABLE': STABLE,
'UNSTABLE': UNSTABLE,
'REPORT': REPORT,
'QUICKREPORT': QUICKREPORT,
'TOOL': TOOL,
'IMPORT': IMPORT,
'EXPORT': EXPORT,
'DOCGEN': DOCGEN,
'GENERAL': GENERAL,
'MAPSERVICE': MAPSERVICE,
'VIEW': VIEW,
'RELCALC': RELCALC,
'GRAMPLET': GRAMPLET,
'SIDEBAR': SIDEBAR,
'CATEGORY_TEXT': CATEGORY_TEXT,
'CATEGORY_DRAW': CATEGORY_DRAW,
'CATEGORY_CODE': CATEGORY_CODE,
'CATEGORY_WEB': CATEGORY_WEB,
'CATEGORY_BOOK': CATEGORY_BOOK,
'CATEGORY_GRAPHVIZ': CATEGORY_GRAPHVIZ,
'TOOL_DEBUG': TOOL_DEBUG,
'TOOL_ANAL': TOOL_ANAL,
'TOOL_DBPROC': TOOL_DBPROC,
'TOOL_DBFIX': TOOL_DBFIX,
'TOOL_REVCTL': TOOL_REVCTL,
'TOOL_UTILS': TOOL_UTILS,
'CATEGORY_QR_MISC': CATEGORY_QR_MISC,
'CATEGORY_QR_PERSON': CATEGORY_QR_PERSON,
'CATEGORY_QR_FAMILY': CATEGORY_QR_FAMILY,
'CATEGORY_QR_EVENT': CATEGORY_QR_EVENT,
'CATEGORY_QR_SOURCE': CATEGORY_QR_SOURCE,
'CATEGORY_QR_CITATION': CATEGORY_QR_CITATION,
'CATEGORY_QR_SOURCE_OR_CITATION': CATEGORY_QR_SOURCE_OR_CITATION,
'CATEGORY_QR_PLACE': CATEGORY_QR_PLACE,
'CATEGORY_QR_MEDIA': CATEGORY_QR_MEDIA,
'CATEGORY_QR_REPOSITORY': CATEGORY_QR_REPOSITORY,
'CATEGORY_QR_NOTE': CATEGORY_QR_NOTE,
'CATEGORY_QR_DATE': CATEGORY_QR_DATE,
'REPORT_MODE_GUI': REPORT_MODE_GUI,
'REPORT_MODE_BKI': REPORT_MODE_BKI,
'REPORT_MODE_CLI': REPORT_MODE_CLI,
'TOOL_MODE_GUI': TOOL_MODE_GUI,
'TOOL_MODE_CLI': TOOL_MODE_CLI,
'DATABASE': DATABASE,
'GRAMPSVERSION': GRAMPSVERSION,
'START': START,
'END': END,
'IMAGE_DIR': IMAGE_DIR,
}
env.update(kwargs)
return env
#-------------------------------------------------------------------------
#
# PluginRegister
#
#-------------------------------------------------------------------------
class PluginRegister:
"""
PluginRegister is a Singleton which holds plugin data
.. attribute : stable_only
Bool, include stable plugins only or not. Default True
"""
__instance = None
def get_instance():
""" Use this function to get the instance of the PluginRegister """
if PluginRegister.__instance is None:
PluginRegister.__instance = 1 # Set to 1 for __init__()
PluginRegister.__instance = PluginRegister()
return PluginRegister.__instance
get_instance = staticmethod(get_instance)
def __init__(self):
""" This function should only be run once by get_instance() """
if PluginRegister.__instance is not 1:
raise Exception("This class is a singleton. "
"Use the get_instance() method")
self.stable_only = True
if __debug__:
self.stable_only = False
self.__plugindata = []
self.__id_to_pdata = {}
def add_plugindata(self, plugindata):
""" This is used to add an entry to the registration list. The way it
is used, this entry is not yet filled in, so we cannot use the id to
add to the __id_to_pdata dict at this time. """
self.__plugindata.append(plugindata)
def scan_dir(self, dir, filenames, uistate=None):
"""
The dir name will be scanned for plugin registration code, which will
be loaded in :class:`PluginData` objects if they satisfy some checks.
:returns: A list with :class:`PluginData` objects
"""
# if the directory does not exist, do nothing
if not (os.path.isdir(dir) or os.path.islink(dir)):
return []
ext = r".gpr.py"
extlen = -len(ext)
pymod = re.compile(r"^(.*)\.py$")
for filename in filenames:
if not filename[extlen:] == ext:
continue
lenpd = len(self.__plugindata)
full_filename = os.path.join(dir, filename)
try:
with open(full_filename, "r", encoding='utf-8') as fd:
stream = fd.read()
except Exception as msg:
print(_('ERROR: Failed reading plugin registration %(filename)s') % \
{'filename' : filename})
print(msg)
continue
if os.path.exists(os.path.join(os.path.dirname(full_filename),
'locale')):
try:
local_gettext = glocale.get_addon_translator(full_filename).gettext
except ValueError:
print(_('WARNING: Plugin %(plugin_name)s has no translation'
' for any of your configured languages, using US'
' English instead') %
{'plugin_name' : filename.split('.')[0] })
local_gettext = glocale.translation.gettext
else:
local_gettext = glocale.translation.gettext
try:
exec (compile(stream, filename, 'exec'),
make_environment(_=local_gettext), {'uistate': uistate})
for pdata in self.__plugindata[lenpd:]:
# should not be duplicate IDs in different plugins
assert pdata.id not in self.__id_to_pdata
# if pdata.id in self.__id_to_pdata:
# print("Error: %s is duplicated!" % pdata.id)
self.__id_to_pdata[pdata.id] = pdata
except ValueError as msg:
print(_('ERROR: Failed reading plugin registration %(filename)s') % \
{'filename' : filename})
print(msg)
self.__plugindata = self.__plugindata[:lenpd]
except:
print(_('ERROR: Failed reading plugin registration %(filename)s') % \
{'filename' : filename})
print("".join(traceback.format_exception(*sys.exc_info())))
self.__plugindata = self.__plugindata[:lenpd]
#check if:
# 1. plugin exists, if not remove, otherwise set module name
# 2. plugin not stable, if stable_only=True, remove
# 3. TOOL_DEBUG only if __debug__ True
rmlist = []
ind = lenpd-1
for plugin in self.__plugindata[lenpd:]:
#LOG.warning("\nPlugin scanned %s at registration", plugin.id)
ind += 1
plugin.directory = dir
if not valid_plugin_version(plugin.gramps_target_version):
print(_('ERROR: Plugin file %(filename)s has a version of '
'"%(gramps_target_version)s" which is invalid for Gramps '
'"%(gramps_version)s".' %
{'filename': os.path.join(dir, plugin.fname),
'gramps_version': GRAMPSVERSION,
'gramps_target_version': plugin.gramps_target_version,}
))
rmlist.append(ind)
continue
if not plugin.status == STABLE and self.stable_only:
rmlist.append(ind)
continue
if plugin.ptype == TOOL and plugin.category == TOOL_DEBUG \
and not __debug__:
rmlist.append(ind)
continue
if plugin.fname is None:
continue
match = pymod.match(plugin.fname)
if not match:
rmlist.append(ind)
print(_('ERROR: Wrong python file %(filename)s in register file '
'%(regfile)s') % {
'filename': os.path.join(dir, plugin.fname),
'regfile': os.path.join(dir, filename)
})
continue
if not os.path.isfile(os.path.join(dir, plugin.fname)):
rmlist.append(ind)
print(_('ERROR: Python file %(filename)s in register file '
'%(regfile)s does not exist') % {
'filename': os.path.join(dir, plugin.fname),
'regfile': os.path.join(dir, filename)
})
continue
module = match.groups()[0]
plugin.mod_name = module
plugin.fpath = dir
#LOG.warning("\nPlugin added %s at registration", plugin.id)
rmlist.reverse()
for ind in rmlist:
del self.__id_to_pdata[self.__plugindata[ind].id]
del self.__plugindata[ind]
def get_plugin(self, id):
"""
Return the :class:`PluginData` for the plugin with id
"""
assert(len(self.__id_to_pdata) == len(self.__plugindata))
# if len(self.__id_to_pdata) != len(self.__plugindata):
# print(len(self.__id_to_pdata), len(self.__plugindata))
return self.__id_to_pdata.get(id, None)
def type_plugins(self, ptype):
"""
Return a list of :class:`PluginData` that are of type ptype
"""
return [self.get_plugin(id) for id in
set([x.id for x in self.__plugindata if x.ptype == ptype])]
def report_plugins(self, gui=True):
"""
Return a list of gui or cli :class:`PluginData` that are of type REPORT
:param gui: bool, if True then gui plugin, otherwise cli plugin
"""
if gui:
return [x for x in self.type_plugins(REPORT) if REPORT_MODE_GUI
in x.report_modes]
else:
return [x for x in self.type_plugins(REPORT) if REPORT_MODE_CLI
in x.report_modes]
def tool_plugins(self, gui=True):
"""
Return a list of :class:`PluginData` that are of type TOOL
"""
if gui:
return [x for x in self.type_plugins(TOOL) if TOOL_MODE_GUI
in x.tool_modes]
else:
return [x for x in self.type_plugins(TOOL) if TOOL_MODE_CLI
in x.tool_modes]
def bookitem_plugins(self):
"""
Return a list of REPORT :class:`PluginData` that are can be used as
bookitem
"""
return [x for x in self.type_plugins(REPORT) if REPORT_MODE_BKI
in x.report_modes]
def quickreport_plugins(self):
"""
Return a list of :class:`PluginData` that are of type QUICKREPORT
"""
return self.type_plugins(QUICKREPORT)
def import_plugins(self):
"""
Return a list of :class:`PluginData` that are of type IMPORT
"""
return self.type_plugins(IMPORT)
def export_plugins(self):
"""
Return a list of :class:`PluginData` that are of type EXPORT
"""
return self.type_plugins(EXPORT)
def docgen_plugins(self):
"""
Return a list of :class:`PluginData` that are of type DOCGEN
"""
return self.type_plugins(DOCGEN)
def general_plugins(self, category=None):
"""
Return a list of :class:`PluginData` that are of type GENERAL
"""
plugins = self.type_plugins(GENERAL)
if category:
return [plugin for plugin in plugins
if plugin.category == category]
return plugins
def mapservice_plugins(self):
"""
Return a list of :class:`PluginData` that are of type MAPSERVICE
"""
return self.type_plugins(MAPSERVICE)
def view_plugins(self):
"""
Return a list of :class:`PluginData` that are of type VIEW
"""
return self.type_plugins(VIEW)
def relcalc_plugins(self):
"""
Return a list of :class:`PluginData` that are of type RELCALC
"""
return self.type_plugins(RELCALC)
def gramplet_plugins(self):
"""
Return a list of :class:`PluginData` that are of type GRAMPLET
"""
return self.type_plugins(GRAMPLET)
def sidebar_plugins(self):
"""
Return a list of :class:`PluginData` that are of type SIDEBAR
"""
return self.type_plugins(SIDEBAR)
def database_plugins(self):
"""
Return a list of :class:`PluginData` that are of type DATABASE
"""
return self.type_plugins(DATABASE)
def filter_load_on_reg(self):
"""
Return a list of :class:`PluginData` that have load_on_reg == True
"""
return [self.get_plugin(id) for id in
set([x.id for x in self.__plugindata
if x.load_on_reg == True])]
| gpl-2.0 | 1,529,820,115,045,267,700 | -3,750,738,081,154,043,000 | 34.262343 | 108 | 0.596518 | false |
iglootools/genconf | genconf/filegenerator/_filegenerator.py | 2 | 2425 | """
Copyright 2011 Sami Dalouche
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import codecs
from genconf.filegenerator._defaulteventlistener import DefaultEventListener
from genconf.filegenerator._defaulterrorlistener import DefaultErrorListener
from genconf.manifest import TemplateNotFoundException, TemplateProcessingException
class FileGenerator(object):
def __init__(self, template_loader, targetdir):
assert template_loader is not None, "template_loader is required"
assert targetdir is not None, "targetdir is required"
self._template_loader = template_loader
self._targetdir = targetdir
def generate_files(self, manifest, error_listener=DefaultErrorListener(), event_listener=DefaultEventListener()):
profiles = manifest.concrete_profiles()
for p in profiles:
event_listener.on_before_profile(p)
for f in p.output_files:
filename = os.path.join(self._targetdir, f.target_path)
event_listener.on_before_file_update(filename)
try:
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
content = f.render(self._template_loader)
with codecs.open(filename, "wb", encoding="utf-8") as f:
f.write(content)
event_listener.on_after_file_update(filename, content)
except TemplateNotFoundException as e:
error_listener.on_template_not_found(e)
except TemplateProcessingException as e:
error_listener.on_template_processing_error(e)
except Exception as e:
error_listener.on_write_error(filename, e)
event_listener.on_after_profile(p) | apache-2.0 | -6,481,493,706,391,407,000 | 2,719,925,360,520,896,500 | 46.568627 | 117 | 0.652371 | false |
wgcv/SWW-Crashphone | lib/python2.7/site-packages/django/contrib/auth/management/commands/createsuperuser.py | 63 | 7419 | """
Management utility to create superusers.
"""
from __future__ import unicode_literals
import getpass
import sys
from optparse import make_option
from django.contrib.auth import get_user_model
from django.contrib.auth.management import get_default_username
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
from django.utils.six.moves import input
from django.utils.text import capfirst
class NotRunningInTTYException(Exception):
pass
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
# Options are defined in an __init__ method to support swapping out
# custom user models in tests.
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
self.username_field = self.UserModel._meta.get_field(self.UserModel.USERNAME_FIELD)
self.option_list = BaseCommand.option_list + (
make_option('--%s' % self.UserModel.USERNAME_FIELD, dest=self.UserModel.USERNAME_FIELD, default=None,
help='Specifies the login for the superuser.'),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help=('Tells Django to NOT prompt the user for input of any kind. '
'You must use --%s with --noinput, along with an option for '
'any other required field. Superusers created with --noinput will '
' not be able to log in until they\'re given a valid password.' %
self.UserModel.USERNAME_FIELD)),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
) + tuple(
make_option('--%s' % field, dest=field, default=None,
help='Specifies the %s for the superuser.' % field)
for field in self.UserModel.REQUIRED_FIELDS
)
option_list = BaseCommand.option_list
help = 'Used to create a superuser.'
def execute(self, *args, **options):
self.stdin = options.get('stdin', sys.stdin) # Used for testing
return super(Command, self).execute(*args, **options)
def handle(self, *args, **options):
username = options.get(self.UserModel.USERNAME_FIELD, None)
interactive = options.get('interactive')
verbosity = int(options.get('verbosity', 1))
database = options.get('database')
# If not provided, create the user with an unusable password
password = None
user_data = {}
# Do quick and dirty validation if --noinput
if not interactive:
try:
if not username:
raise CommandError("You must use --%s with --noinput." %
self.UserModel.USERNAME_FIELD)
username = self.username_field.clean(username, None)
for field_name in self.UserModel.REQUIRED_FIELDS:
if options.get(field_name):
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = field.clean(options[field_name], None)
else:
raise CommandError("You must use --%s with --noinput." % field_name)
except exceptions.ValidationError as e:
raise CommandError('; '.join(e.messages))
else:
# Prompt for username/password, and any other required fields.
# Enclose this whole thing in a try/except to trap for a
# keyboard interrupt and exit gracefully.
default_username = get_default_username()
try:
if hasattr(self.stdin, 'isatty') and not self.stdin.isatty():
raise NotRunningInTTYException("Not running in a TTY")
# Get a username
verbose_field_name = self.username_field.verbose_name
while username is None:
if not username:
input_msg = capfirst(verbose_field_name)
if default_username:
input_msg = "%s (leave blank to use '%s')" % (
input_msg, default_username)
raw_value = input(force_str('%s: ' % input_msg))
if default_username and raw_value == '':
raw_value = default_username
try:
username = self.username_field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
username = None
continue
try:
self.UserModel._default_manager.db_manager(database).get_by_natural_key(username)
except self.UserModel.DoesNotExist:
pass
else:
self.stderr.write("Error: That %s is already taken." %
verbose_field_name)
username = None
for field_name in self.UserModel.REQUIRED_FIELDS:
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = options.get(field_name)
while user_data[field_name] is None:
raw_value = input(force_str('%s: ' % capfirst(field.verbose_name)))
try:
user_data[field_name] = field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
user_data[field_name] = None
# Get a password
while password is None:
if not password:
password = getpass.getpass()
password2 = getpass.getpass(force_str('Password (again): '))
if password != password2:
self.stderr.write("Error: Your passwords didn't match.")
password = None
continue
if password.strip() == '':
self.stderr.write("Error: Blank passwords aren't allowed.")
password = None
continue
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
except NotRunningInTTYException:
self.stdout.write(
"Superuser creation skipped due to not running in a TTY. "
"You can run `manage.py createsuperuser` in your project "
"to create one manually."
)
if username:
user_data[self.UserModel.USERNAME_FIELD] = username
user_data['password'] = password
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
if verbosity >= 1:
self.stdout.write("Superuser created successfully.")
| apache-2.0 | -8,387,353,485,464,456,000 | -3,042,738,459,695,277,600 | 44.796296 | 113 | 0.544683 | false |
redbear/micropython | tests/extmod/ure1.py | 35 | 1169 | try:
import ure as re
except ImportError:
import re
r = re.compile(".+")
m = r.match("abc")
print(m.group(0))
try:
m.group(1)
except IndexError:
print("IndexError")
r = re.compile("(.+)1")
m = r.match("xyz781")
print(m.group(0))
print(m.group(1))
try:
m.group(2)
except IndexError:
print("IndexError")
r = re.compile("[a-cu-z]")
m = r.match("a")
print(m.group(0))
m = r.match("z")
print(m.group(0))
m = r.match("d")
print(m)
m = r.match("A")
print(m)
print("===")
r = re.compile("[^a-cu-z]")
m = r.match("a")
print(m)
m = r.match("z")
print(m)
m = r.match("d")
print(m.group(0))
m = r.match("A")
print(m.group(0))
r = re.compile("o+")
m = r.search("foobar")
print(m.group(0))
try:
m.group(1)
except IndexError:
print("IndexError")
m = re.match(".*", "foo")
print(m.group(0))
m = re.search("w.r", "hello world")
print(m.group(0))
m = re.match('a+?', 'ab'); print(m.group(0))
m = re.match('a*?', 'ab'); print(m.group(0))
m = re.match('^ab$', 'ab'); print(m.group(0))
m = re.match('a|b', 'b'); print(m.group(0))
m = re.match('a|b|c', 'c'); print(m.group(0))
try:
re.compile("*")
except:
print("Caught invalid regex")
| mit | -5,902,244,517,122,516,000 | -4,176,366,136,636,077,000 | 15.942029 | 45 | 0.568007 | false |
PrismTech/opensplice | build/scripts/overnight/python/DBMSConnect.py | 2 | 12435 | import sys
import os
import json
import shutil
import subprocess
import fileinput
import platform
import time
from shutil import copy
import example_logparser
from example_exceptions import LogCheckFail
from Example import Example
from Example import ExeThread
import pdb
"""
Class specific to the DBMSConnect example as it is very different
to all other examples having a different directory structure and
also runs more than a simple publisher/subscriber
"""
class dbmsconnect (Example):
def __init__(self, host, logger):
super(dbmsconnect, self).__init__(host, logger, "dbmsconnect", "services")
with open ('examples.json') as data_file:
data = json.load(data_file)
self.odbcMsgBoard_params = data["services"]["dbmsconnect"]["params"]["odbcMsgBoard_params"]
self.odbcChatter1_params = data["services"]["dbmsconnect"]["params"]["odbcChatter1_params"]
self.odbcChatter2_params = data["services"]["dbmsconnect"]["params"]["odbcChatter2_params"]
self.cppChatter1_params = data["services"]["dbmsconnect"]["params"]["cppChatter1_params"]
self.cppChatter2_params = data["services"]["dbmsconnect"]["params"]["cppChatter2_params"]
self.odbcChatterQuit_params = data["services"]["dbmsconnect"]["params"]["odbcChatterQuit_params"]
self.cppChatterQuit_params = data["services"]["dbmsconnect"]["params"]["cppChatterQuit_params"]
super(dbmsconnect, self).setPath(os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', 'SQL', 'C++', 'ODBC'))
if os.environ['EXRUNTYPE'] == "shm":
self.uri = "file://" + os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', self.shm_uri)
else:
self.uri = "file://" + os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', self.sp_uri)
self.runDBMSConnect = self.host.runExample(self.expath, self.name, "")
def runExample(self):
print "In runExample for " + self.expath + ": " + self.name
currPath = os.getcwd()
try:
self.exdir = "servicesdbmsconnectSQLCPPODBC"
exSfx = ""
if self.host.isWindows():
exSfx = ".exe"
os.putenv("ODBC_LIB_NAME", "odbc32")
else:
os.putenv("ODBC_LIB_NAME", "odbc")
msg = "NONE"
result = "PASS"
dsn = self.odbcMsgBoard_params[0]
os.putenv("MY_DSN", dsn);
os.environ["MY_DSN"]= dsn;
os.putenv("OSPL_URI", self.uri)
os.environ["OSPL_URI"] = self.uri
try:
self.convertConfig()
self.setLogPathAndLogs("", "")
odbcMsgBoardLog = os.path.join(self.pPath, 'odbcMsgBoard.log')
odbcChatter1Log = os.path.join(self.pPath, 'odbcChatter1.log')
odbcChatter2Log = os.path.join(self.pPath, 'odbcChatter2.log')
odbcChatterQuitLog = os.path.join(self.pPath, 'odbcChatterQuit.log')
cppMsgBoardLog = os.path.join(self.pPath, 'cppMsgBoard.log')
cppChatter1Log = os.path.join(self.pPath, 'cppChatter1.log')
cppChatter2Log = os.path.join(self.pPath, 'cppChatter2.log')
cppChatterQuitLog = os.path.join(self.pPath, 'cppChatterQuit.log')
with open ('examples.json') as data_file:
data = json.load(data_file)
odbcMsgBoardName = data[self.expath][self.name]["executables"]["odbc"]["msgBoardName"]
odbcChatterName = data[self.expath][self.name]["executables"]["odbc"]["chatterName"]
cppMsgBoardName = data[self.expath][self.name]["executables"]["cpp"]["msgBoardName"]
cppChatterName = data[self.expath][self.name]["executables"]["cpp"]["chatterName"]
odbcmsgboard_conds_file = data[self.expath][self.name]["log_conditions_file"]["odbcmsgboard_conds"]
cppmsgboard_conds_file = data[self.expath][self.name]["log_conditions_file"]["msgboard_conds"]
odbcchatter_conds_file = data[self.expath][self.name]["log_conditions_file"]["odbcchatter_conds"]
chatter_conds_file = data[self.expath][self.name]["log_conditions_file"]["chatter_conds"]
odbcmsgboard_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', odbcmsgboard_conds_file)
odbcchatter_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', odbcchatter_conds_file)
cppmsgboard_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', cppmsgboard_conds_file)
cppchatter_conds = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yaml', chatter_conds_file)
if odbcMsgBoardName != "":
if self.classpath == "":
odbcMsgBoardExe = os.path.join(self.pPath, odbcMsgBoardName) + exSfx
if not os.path.isfile (odbcMsgBoardExe):
msg = "MissingExecutable: " + odbcMsgBoardExe
else:
odbcMsgBoardExe = odbcMsgBoardName
if odbcChatterName != "":
if self.classpath == "":
odbcChatterNameExe = os.path.join(self.pPath, odbcChatterName) + exSfx
if not os.path.isfile (odbcChatterNameExe):
msg = "MissingExecutable: " + odbcChatterNameExe
else:
odbcChatterNameExe = odbcChatterName
cppPath = os.path.join(os.environ['OSPL_HOME'], 'examples', "dcps", "Tutorial", "cpp", "standalone")
if cppMsgBoardName != "":
if self.classpath == "":
cppMsgBoardExe = os.path.join(cppPath, cppMsgBoardName) + exSfx
if not os.path.isfile (cppMsgBoardExe):
msg = "MissingExecutable: " + cppMsgBoardExe
else:
cppMsgBoardExe = cppMsgBoardName
if cppChatterName != "":
if self.classpath == "":
cppChatterNameExe = os.path.join(cppPath, cppChatterName) + exSfx
if not os.path.isfile (cppChatterNameExe):
msg = "MissingExecutable: " + cppChatterNameExe
else:
cppChatterNameExe = cppChatterName
if msg == "NONE":
odbcMsgBoard_Thread = ExeThread(self.classpath, odbcMsgBoardLog, "", odbcMsgBoardExe, self.odbcMsgBoard_params, self.example_timeout * 2)
odbcChatter1_Thread = ExeThread(self.classpath, odbcChatter1Log, "", odbcChatterNameExe, self.odbcChatter1_params, self.example_timeout)
odbcChatter2_Thread = ExeThread(self.classpath, odbcChatter2Log, "", odbcChatterNameExe, self.odbcChatter2_params, self.example_timeout)
cppMsgBoard_Thread = ExeThread(self.classpath, cppMsgBoardLog, "", cppMsgBoardExe, "", self.example_timeout * 2)
cppChatter1_Thread = ExeThread(self.classpath, cppChatter1Log, "", cppChatterNameExe, self.cppChatter1_params, self.example_timeout)
cppChatter2_Thread = ExeThread(self.classpath, cppChatter2Log, "", cppChatterNameExe, self.cppChatter2_params, self.example_timeout)
odbcChatterQuit_Thread = ExeThread(self.classpath, odbcChatterQuitLog, "", odbcChatterNameExe, self.odbcChatterQuit_params, self.example_timeout)
cppChatterQuit_Thread = ExeThread(self.classpath, cppChatterQuitLog, "", cppChatterNameExe, self.cppChatterQuit_params, self.example_timeout)
os.chdir(self.pPath)
self.startOSPL()
cppMsgBoard_Thread.start()
odbcMsgBoard_Thread.start()
time.sleep(5)
odbcChatter1_Thread.start()
odbcChatter2_Thread.start()
cppChatter1_Thread.start()
cppChatter2_Thread.start()
odbcChatter1_Thread.join(self.example_timeout)
odbcChatter2_Thread.join(self.example_timeout)
cppChatter1_Thread.join(self.example_timeout)
cppChatter2_Thread.join(self.example_timeout)
time.sleep(10)
odbcChatterQuit_Thread.start()
cppChatterQuit_Thread.start()
odbcChatterQuit_Thread.join(self.example_timeout)
cppChatterQuit_Thread.join(self.example_timeout)
cppMsgBoard_Thread.join(self.example_timeout)
odbcMsgBoard_Thread.join(self.example_timeout)
except Exception as ex:
msg = "Exception running ", str(ex)
try:
self.stopOSPL()
except Exception as ex:
print "Exception stopping OpenSplice ", str(ex)
if msg == "NONE":
try:
#Allow time for all messages to be written to log
time.sleep (15)
super(dbmsconnect, self).copyLogs()
if os.path.isfile (self.ospl_error_log):
msg = "ospl-error.log found"
print "checking odbcMsgBoardLog with odbcmsgboard_conds", odbcMsgBoardLog, odbcmsgboard_conds
self.checkResults(odbcMsgBoardLog, odbcmsgboard_conds)
print "checking odbcChatter1Log with odbcchatter_conds", odbcChatter1Log, odbcchatter_conds
self.checkResults(odbcChatter1Log, odbcchatter_conds)
print "checking odbcChatter2Log with odbcchatter_conds", odbcChatter2Log, odbcchatter_conds
self.checkResults(odbcChatter2Log, odbcchatter_conds)
self.checkResults(cppMsgBoardLog, cppmsgboard_conds)
self.checkResults(cppChatter1Log, cppchatter_conds)
self.checkResults(cppChatter2Log, cppchatter_conds)
self.checkOSPLInfoLog(self.ospl_info_log)
except LogCheckFail as lf:
reason = str(lf)
if "OpenSpliceDDS Warnings" in reason:
msg = "LogCheckFail: OpenSpliceDDS Warnings in ospl-info.log"
else:
msg = "LogCheckFail: " + str(lf)
except Exception:
msg = "Exception checking logs " + str(sys.exc_info()[0])
logdir = os.path.join(os.environ['LOGDIR'], "examples", "run_" + os.environ['EXRUNTYPE'], self.exdir)
dbmsconnLog = os.path.join(self.pPath, 'dbmsconnect.log')
print "dbmsconnect.log is ", dbmsconnLog
copy(dbmsconnLog, logdir)
if msg != "NONE":
result = "FAIL"
try:
self.writeResult (result, self.exdir, "", msg)
except Exception as ex:
print "Exception writing result", str(ex)
try:
self.cleanUp()
except Exception as ex:
print "Exception cleaning up", str(ex)
except Exception as ex:
print "Unexpected exception ", str(ex)
finally:
os.chdir(currPath)
def convertConfig(self):
if os.environ['EXRUNTYPE'] == "shm":
uri = self.shm_uri
else:
uri = self.sp_uri
fcfg = os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', uri)
forig = os.path.join(os.environ['OSPL_HOME'], 'examples', 'services', 'dbmsconnect', uri+'.orig')
os.rename(fcfg, forig)
if self.host.name != "default":
hn = self.host.name
else:
hn = platform.uname()[1]
prefix = hn[:16].replace('-', '_') + '_'
fout = open(fcfg, "w")
for line in fileinput.input(forig):
fout.write(line.replace("Sql", prefix))
fout.close()
| gpl-3.0 | 806,010,241,945,947,900 | 4,646,934,810,231,063,000 | 46.281369 | 165 | 0.567029 | false |
MakeHer/edx-platform | cms/djangoapps/contentstore/views/tests/test_course_index.py | 25 | 36973 | """
Unit tests for getting the list of courses and the course outline.
"""
import ddt
import json
import lxml
import datetime
import mock
import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.test.utils import override_settings
from django.utils.translation import ugettext as _
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_library_url, add_instructor, reverse_usage_url
from contentstore.views.course import (
course_outline_initial_state, reindex_course_and_check_access, _deprecated_blocks_info
)
from contentstore.views.item import create_xblock_info, VisibilityState
from course_action_state.managers import CourseRerunUIStateManager
from course_action_state.models import CourseRerunState
from opaque_keys.edx.locator import CourseLocator
from search.api import perform_search
from student.auth import has_course_author_access
from student.tests.factories import UserFactory
from util.date_utils import get_default_time_display
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory
class TestCourseIndex(CourseTestCase):
"""
Unit tests for getting the list of courses and the course outline.
"""
def setUp(self):
"""
Add a course with odd characters in the fields
"""
super(TestCourseIndex, self).setUp()
# had a problem where index showed course but has_access failed to retrieve it for non-staff
self.odd_course = CourseFactory.create(
org='test.org_1-2',
number='test-2.3_course',
display_name='dotted.course.name-2',
)
def check_index_and_outline(self, authed_client):
"""
Test getting the list of courses and then pulling up their outlines
"""
index_url = '/home/'
index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
course_link_eles = parsed_html.find_class('course-link')
self.assertGreaterEqual(len(course_link_eles), 2)
for link in course_link_eles:
self.assertRegexpMatches(
link.get("href"),
'course/{}'.format(settings.COURSE_KEY_PATTERN)
)
# now test that url
outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
# ensure it has the expected 2 self referential links
outline_parsed = lxml.html.fromstring(outline_response.content)
outline_link = outline_parsed.find_class('course-link')[0]
self.assertEqual(outline_link.get("href"), link.get("href"))
course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]
self.assertEqual(course_menu_link.find("a").get("href"), link.get("href"))
def test_libraries_on_course_index(self):
"""
Test getting the list of libraries from the course listing page
"""
# Add a library:
lib1 = LibraryFactory.create()
index_url = '/home/'
index_response = self.client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
library_link_elements = parsed_html.find_class('library-link')
self.assertEqual(len(library_link_elements), 1)
link = library_link_elements[0]
self.assertEqual(
link.get("href"),
reverse_library_url('library_handler', lib1.location.library_key),
)
# now test that url
outline_response = self.client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
self.assertEqual(outline_response.status_code, 200)
def test_is_staff_access(self):
"""
Test that people with is_staff see the courses and can navigate into them
"""
self.check_index_and_outline(self.client)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
outline_url = reverse_course_url('course_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_course_staff_access(self):
"""
Make and register course_staff and ensure they can access the courses
"""
course_staff_client, course_staff = self.create_non_staff_authed_user_client()
for course in [self.course, self.odd_course]:
permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email})
self.client.post(
permission_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
# test access
self.check_index_and_outline(course_staff_client)
def test_json_responses(self):
outline_url = reverse_course_url('course_handler', self.course.id)
chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1")
lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1")
subsection = ItemFactory.create(
parent_location=lesson.location,
category='vertical',
display_name='Subsection 1'
)
ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video")
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def test_notifications_handler_get(self):
state = CourseRerunUIStateManager.State.FAILED
action = CourseRerunUIStateManager.ACTION
should_display = True
# try when no notification exists
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': 1,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
# verify that we get an empty dict out
self.assertEquals(resp.status_code, 400)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=self.course.id,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=UserFactory(),
should_display=should_display
)
# try to get information on this notification
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.assertEquals(json_response['state'], state)
self.assertEquals(json_response['action'], action)
self.assertEquals(json_response['should_display'], should_display)
def test_notifications_handler_dismiss(self):
state = CourseRerunUIStateManager.State.FAILED
should_display = True
rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run')
# add an instructor to this course
user2 = UserFactory()
add_instructor(rerun_course_key, self.user, user2)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=rerun_course_key,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=user2,
should_display=should_display
)
# try to get information on this notification
notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.delete(notification_dismiss_url)
self.assertEquals(resp.status_code, 200)
with self.assertRaises(CourseRerunState.DoesNotExist):
# delete nofications that are dismissed
CourseRerunState.objects.get(id=rerun_state.id)
self.assertFalse(has_course_author_access(user2, rerun_course_key))
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_updates_invalid_url(self):
"""
Tests the error conditions for the invalid course updates URL.
"""
# Testing the response code by passing slash separated course id whose format is valid but no course
# having this id exists.
invalid_course_key = '{}_blah_blah_blah'.format(self.course.id)
course_updates_url = reverse_course_url('course_info_handler', invalid_course_key)
response = self.client.get(course_updates_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course id whose format is valid but no course
# having this id exists.
split_course_key = CourseLocator(org='orgASD', course='course_01213', run='Run_0_hhh_hhh_hhh')
course_updates_url_split = reverse_course_url('course_info_handler', split_course_key)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
# Testing the response by passing split course id whose format is invalid.
invalid_course_id = 'invalid.course.key/{}'.format(split_course_key)
course_updates_url_split = reverse_course_url('course_info_handler', invalid_course_id)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
def test_course_index_invalid_url(self):
"""
Tests the error conditions for the invalid course index URL.
"""
# Testing the response code by passing slash separated course key, no course
# having this key exists.
invalid_course_key = '{}_some_invalid_run'.format(self.course.id)
course_outline_url = reverse_course_url('course_handler', invalid_course_key)
response = self.client.get_html(course_outline_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course key, no course
# having this key exists.
split_course_key = CourseLocator(org='invalid_org', course='course_01111', run='Run_0_invalid')
course_outline_url_split = reverse_course_url('course_handler', split_course_key)
response = self.client.get_html(course_outline_url_split)
self.assertEqual(response.status_code, 404)
def test_course_outline_with_display_course_number_as_none(self):
"""
Tests course outline when 'display_coursenumber' field is none.
"""
# Change 'display_coursenumber' field to None and update the course.
self.course.display_coursenumber = None
updated_course = self.update_course(self.course, self.user.id)
# Assert that 'display_coursenumber' field has been changed successfully.
self.assertEqual(updated_course.display_coursenumber, None)
# Perform GET request on course outline url with the course id.
course_outline_url = reverse_course_url('course_handler', updated_course.id)
response = self.client.get_html(course_outline_url)
# Assert that response code is 200.
self.assertEqual(response.status_code, 200)
# Assert that 'display_course_number' is being set to "" (as display_coursenumber was None).
self.assertIn('display_course_number: ""', response.content)
@ddt.ddt
class TestCourseOutline(CourseTestCase):
"""
Unit tests for the course outline.
"""
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseOutline, self).setUp()
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
def test_json_responses(self):
"""
Verify the JSON responses returned for the course.
"""
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(self.chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_outline_initial_state(self):
course_module = modulestore().get_item(self.course.location)
course_structure = create_xblock_info(
course_module,
include_child_info=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical'
)
# Verify that None is returned for a non-existent locator
self.assertIsNone(course_outline_initial_state('no-such-locator', course_structure))
# Verify that the correct initial state is returned for the test chapter
chapter_locator = unicode(self.chapter.location)
initial_state = course_outline_initial_state(chapter_locator, course_structure)
self.assertEqual(initial_state['locator_to_show'], chapter_locator)
expanded_locators = initial_state['expanded_locators']
self.assertIn(unicode(self.sequential.location), expanded_locators)
self.assertIn(unicode(self.vertical.location), expanded_locators)
def test_start_date_on_page(self):
"""
Verify that the course start date is included on the course outline page.
"""
def _get_release_date(response):
"""Return the release date from the course page"""
parsed_html = lxml.html.fromstring(response.content)
return parsed_html.find_class('course-status')[0].find_class('status-release-value')[0].text_content()
def _assert_settings_link_present(response):
"""
Asserts there's a course settings link on the course page by the course release date.
"""
parsed_html = lxml.html.fromstring(response.content)
settings_link = parsed_html.find_class('course-status')[0].find_class('action-edit')[0].find('a')
self.assertIsNotNone(settings_link)
self.assertEqual(settings_link.get('href'), reverse_course_url('settings_handler', self.course.id))
outline_url = reverse_course_url('course_handler', self.course.id)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
# A course with the default release date should display as "Unscheduled"
self.assertEqual(_get_release_date(response), 'Unscheduled')
_assert_settings_link_present(response)
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
self.assertEqual(_get_release_date(response), get_default_time_display(self.course.start))
_assert_settings_link_present(response)
def _create_test_data(self, course_module, create_blocks=False, publish=True, block_types=None):
"""
Create data for test.
"""
if create_blocks:
for block_type in block_types:
ItemFactory.create(
parent_location=self.vertical.location,
category=block_type,
display_name='{} Problem'.format(block_type)
)
if not publish:
self.store.unpublish(self.vertical.location, self.user.id)
course_module.advanced_modules.extend(block_types)
def _verify_deprecated_info(self, course_id, advanced_modules, info, deprecated_block_types):
"""
Verify deprecated info.
"""
expected_blocks = []
for block_type in deprecated_block_types:
expected_blocks.append(
[
reverse_usage_url('container_handler', self.vertical.location),
'{} Problem'.format(block_type)
]
)
self.assertEqual(info['block_types'], deprecated_block_types)
self.assertEqual(
info['block_types_enabled'],
any(component in advanced_modules for component in deprecated_block_types)
)
self.assertItemsEqual(info['blocks'], expected_blocks)
self.assertEqual(
info['advance_settings_url'],
reverse_course_url('advanced_settings_handler', course_id)
)
@ddt.data(
{'publish': True},
{'publish': False},
)
@ddt.unpack
def test_verify_deprecated_warning_message_with_single_feature(self, publish):
"""
Verify deprecated warning info for single deprecated feature.
"""
block_types = ['notes']
with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(
course_module.id,
course_module.advanced_modules,
info,
block_types
)
def test_verify_deprecated_warning_message_with_multiple_features(self):
"""
Verify deprecated warning info for multiple deprecated features.
"""
block_types = ['notes', 'lti']
with override_settings(DEPRECATED_BLOCK_TYPES=block_types):
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
@ddt.data(
{'delete_vertical': True},
{'delete_vertical': False},
)
@ddt.unpack
def test_deprecated_blocks_list_updated_correctly(self, delete_vertical):
"""
Verify that deprecated blocks list shown on banner is updated correctly.
Here is the scenario:
This list of deprecated blocks shown on banner contains published
and un-published blocks. That list should be updated when we delete
un-published block(s). This behavior should be same if we delete
unpublished vertical or problem.
"""
block_types = ['notes']
course_module = modulestore().get_item(self.course.location)
vertical1 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert1 Subsection1'
)
problem1 = ItemFactory.create(
parent_location=vertical1.location,
category='notes',
display_name='notes problem in vert1',
publish_item=False
)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should be empty here because there is nothing
# published or un-published present
self.assertEqual(info['blocks'], [])
vertical2 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert2 Subsection1'
)
ItemFactory.create(
parent_location=vertical2.location,
category='notes',
display_name='notes problem in vert2',
pubish_item=True
)
# At this point CourseStructure will contain both the above
# published and un-published verticals
info = _deprecated_blocks_info(course_module, block_types)
self.assertItemsEqual(
info['blocks'],
[
[reverse_usage_url('container_handler', vertical1.location), 'notes problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']
]
)
# Delete the un-published vertical or problem so that CourseStructure updates its data
if delete_vertical:
self.store.delete_item(vertical1.location, self.user.id)
else:
self.store.delete_item(problem1.location, self.user.id)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should only contain the info about vertical2 which is published.
# There shouldn't be any info present about un-published vertical1
self.assertEqual(
info['blocks'],
[[reverse_usage_url('container_handler', vertical2.location), 'notes problem in vert2']]
)
class TestCourseReIndex(CourseTestCase):
"""
Unit tests for the course outline.
"""
SUCCESSFUL_RESPONSE = _("Course has been successfully reindexed.")
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseReIndex, self).setUp()
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, self.user.id)
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
self.html = ItemFactory.create(
parent_location=self.vertical.location, category="html", display_name="My HTML",
data="<div>This is my unique HTML content</div>",
)
def test_reindex_course(self):
"""
Verify that course gets reindexed.
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
response = self.client.post(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.content, '')
self.assertEqual(response.status_code, 405)
self.client.logout()
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 302)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_empty_content_type(self):
"""
Test json content type is set if '' is selected
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, CONTENT_TYPE='')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_course_search_index_error(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# Start manual reindex and check error in response
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 500)
def test_reindex_json_responses(self):
"""
Test json response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
reindex_course_and_check_access(self.course.id, self.user)
# Check results remain the same
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_reindex_video_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_html_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_reindex_seq_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_reindex_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
def test_reindex_no_permissions(self):
# register a non-staff member and try to delete the course branch
user2 = UserFactory()
with self.assertRaises(PermissionDenied):
reindex_course_and_check_access(self.course.id, user2)
def test_indexing_responses(self):
"""
Test do_course_reindex response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
# Check results are the same following reindex
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_indexing_video_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_indexing_html_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_indexing_seq_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_indexing_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
| agpl-3.0 | 3,770,301,057,566,557,000 | -5,300,367,601,251,159,000 | 41.158495 | 119 | 0.644687 | false |
robbiet480/home-assistant | homeassistant/components/xs1/__init__.py | 8 | 2700 | """Support for the EZcontrol XS1 gateway."""
import asyncio
import logging
import voluptuous as vol
import xs1_api_client
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "xs1"
ACTUATORS = "actuators"
SENSORS = "sensors"
# define configuration parameters
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=80): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
XS1_COMPONENTS = ["climate", "sensor", "switch"]
# Lock used to limit the amount of concurrent update requests
# as the XS1 Gateway can only handle a very
# small amount of concurrent requests
UPDATE_LOCK = asyncio.Lock()
def setup(hass, config):
"""Set up XS1 Component."""
_LOGGER.debug("Initializing XS1")
host = config[DOMAIN][CONF_HOST]
port = config[DOMAIN][CONF_PORT]
ssl = config[DOMAIN][CONF_SSL]
user = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
# initialize XS1 API
try:
xs1 = xs1_api_client.XS1(
host=host, port=port, ssl=ssl, user=user, password=password
)
except ConnectionError as error:
_LOGGER.error(
"Failed to create XS1 API client because of a connection error: %s", error,
)
return False
_LOGGER.debug("Establishing connection to XS1 gateway and retrieving data...")
hass.data[DOMAIN] = {}
actuators = xs1.get_all_actuators(enabled=True)
sensors = xs1.get_all_sensors(enabled=True)
hass.data[DOMAIN][ACTUATORS] = actuators
hass.data[DOMAIN][SENSORS] = sensors
_LOGGER.debug("Loading components for XS1 platform...")
# Load components for supported devices
for component in XS1_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class XS1DeviceEntity(Entity):
"""Representation of a base XS1 device."""
def __init__(self, device):
"""Initialize the XS1 device."""
self.device = device
async def async_update(self):
"""Retrieve latest device state."""
async with UPDATE_LOCK:
await self.hass.async_add_executor_job(self.device.update)
| apache-2.0 | 636,340,912,472,140,400 | 2,212,344,759,170,845,200 | 26.55102 | 87 | 0.651481 | false |
dhruv13J/scikit-learn | sklearn/decomposition/nmf.py | 15 | 19103 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause | 1,742,061,618,097,098,200 | 6,544,974,695,453,297,000 | 31.488095 | 87 | 0.55295 | false |
toinbis/369old | src/web369/conf/base.py | 1 | 2325 | from pkg_resources import resource_filename
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'web369',
'USER': 'root',
'PASSWORD': '',
}
}
TIME_ZONE = 'Europe/Vilnius'
LANGUAGE_CODE = 'lt'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
STATIC_URL = '/static/'
STATIC_ROOT = resource_filename('web369', '../../var/htdocs/static')
STATICFILES_DIRS = (
resource_filename('web369', 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = resource_filename('web369', '../../var/htdocs/media')
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
SECRET_KEY = 'SBX*YTL!cANetM&uFTf6R5Je(@PX3!rtgo)kgwNT'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'web369.urls.default'
TEMPLATE_DIRS = (
resource_filename('web369', 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'south',
'web369',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
'TIMEOUT': 60,
'OPTIONS': {
'MAX_ENTRIES': 1000
}
}
}
# Word count will be updated when new documents are scrapped:
LIVE_WORD_COUNT = True
| bsd-3-clause | -9,112,336,378,150,907,000 | -1,226,696,044,276,672,800 | 23.734043 | 73 | 0.667097 | false |
ZhangXinNan/tensorflow | tensorflow/python/ops/parallel_for/pfor.py | 2 | 101653 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compiled parallel-for loop."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import flags
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
flags.DEFINE_bool(
"op_conversion_fallback_to_while_loop", False,
"If true, falls back to using a while loop for ops for "
"which a converter is not defined.")
def _stack(t, length):
"""stacks `t` `length` times."""
ones = array_ops.ones_like(array_ops.shape(t))
multiples = array_ops.concat([length, ones], 0)
t = array_ops.tile(array_ops.expand_dims(t, 0), multiples)
return wrap(t, True)
# The following stateful ops can be safely called once, and with the same
# signature as the unconverted version, if their inputs are loop invariant.
# TODO(agarwal): implement a strategy for converting Variable reads/writes. The
# plan is to map each read/write in the loop_fn to a corresponding merged
# read/write in the converted graph. Writes need to be mergeable (e.g.
# AssignAdd) to be used in `pfor`. Given a certain read/write order in the
# loop_fn, doing a one-to-one conversion will simulate executing such
# instructions in lock-step across all iterations.
passthrough_stateful_ops = set([
"VariableV2",
"VarHandleOp",
"ReadVariableOp",
"StackV2",
"TensorArrayWriteV3",
"TensorArrayReadV3",
"TensorArraySizeV3",
])
def _is_stateful_pfor_op(op):
if isinstance(op, WhileOp):
return op.is_stateful
if op.type == "Const":
# Const didn't have an op_def.
return False
if op.type in passthrough_stateful_ops:
return False
assert hasattr(op, "op_def") and op.op_def is not None, op
return op.op_def.is_stateful
# pylint: disable=protected-access
class WhileOp(object):
"""Object for storing state for converting the outputs of a while_loop."""
def __init__(self, exit_node, pfor_ops):
"""Initializer.
Args:
exit_node: A tensor output from the while_loop.
pfor_ops: list of ops inside the current pfor loop.
"""
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
assert isinstance(exit_node, ops.Tensor)
self._while_context = exit_node.op._get_control_flow_context()
assert isinstance(self._while_context, control_flow_ops.WhileContext)
self._context_name = self._while_context.name
self._condition = self._while_context.pivot.op.inputs[0]
# Parts of an external while_loop could be created inside a pfor loop.
# However for the purpose here, we declare such loops to be external. Also
# note that we check if the condition was created inside or outside to
# determine if the while_loop was first created inside or outside.
# TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
if self._is_inside_loop:
for e in self._while_context.loop_exits:
assert self.op_is_inside_loop(e.op)
# Note the code below tries to reverse engineer an existing while_loop graph
# by assuming the following pattern of nodes.
#
# NextIteration <---- Body <--- Enter
# | ^
# V ___| Y
# Enter -> Merge -> Switch___
# ^ | N
# | V
# LoopCond Exit
# Node that elements in the list below correspond one-to-one with each
# other. i.e. these lists are the same size, and the i_th entry corresponds
# to different Operations/Tensors of a single cycle as illustrated above.
# List of Switch ops (ops.Operation) that feed into an Exit Node.
self._exit_switches = []
# List of inputs (ops.Tensor) to NextIteration.
self._body_outputs = []
# List of list of control inputs of the NextIteration nodes.
self._next_iter_control_inputs = []
# List of Merge ops (ops.Operation).
self._enter_merges = []
# List of output (ops.Tensor) of Exit nodes.
self._outputs = []
# List of Enter Tensors.
# There are two types of Enter nodes:
# - The Enter nodes that are used in the `loop_vars` argument to
# `while_loop` (see
# https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
# these Enter nodes immediately below by tracing backwards from the Exit
# nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
# diagram above. This allows us to have a 1:1 correspondence between the
# self._outputs and the first elements in self._enters.
# - The Enter nodes that are used only by the body. They don't appear in the
# `loop_vars` and are not returned from the `while_loop`. In Python code,
# they are usually captured by the body lambda. We collect them below by
# iterating over all the ops in the graph. They are appended to the end of
# self._enters or self._direct_enters, and don't correspond to any outputs
# in self._outputs. Note that we keep the resource/variant Enter nodes in
# self._direct_enters and the constructed while_loop's body uses them
# directly as opposed to passing them as loop variables. This is done
# because the while_body cannot partition the resource/variant Tensors, so
# it has to leave them unchanged.
self._enters = []
self._direct_enters = []
for e in self._while_context.loop_exits:
self._outputs.append(e.op.outputs[0])
switch = e.op.inputs[0].op
assert switch.type == "Switch", switch
self._exit_switches.append(switch)
merge = switch.inputs[0].op
assert merge.type == "Merge", merge
self._enter_merges.append(merge)
enter = merge.inputs[0].op
assert enter.type == "Enter", enter
self._enters.append(enter.outputs[0])
next_iter = merge.inputs[1].op
assert next_iter.type == "NextIteration", next_iter
self._body_outputs.append(next_iter.inputs[0])
self._next_iter_control_inputs.append(next_iter.control_inputs)
# Collect all the Enter nodes that are not part of `loop_vars`, the second
# category described above.
# Also track whether the loop body has any stateful ops.
self._is_stateful = False
for op in ops.get_default_graph().get_operations():
# TODO(agarwal): make sure this works with nested case.
control_flow_context = op._get_control_flow_context()
if control_flow_context is None:
continue
if control_flow_context.name == self._context_name:
self._is_stateful |= _is_stateful_pfor_op(op)
if op.type == "Enter":
output = op.outputs[0]
if output not in self._enters:
if output.dtype in (dtypes.resource, dtypes.variant):
if output not in self._direct_enters:
self._direct_enters.append(output)
else:
self._enters.append(output)
def __str__(self):
"""String representation."""
return "while_loop(%s)" % self.name
@property
def inputs(self):
"""Input to all the Enter nodes."""
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
@property
def control_inputs(self):
"""Control input to all the Enter nodes."""
control_inputs = []
for x in self._enters + self._direct_enters:
control_inputs.extend(x.op.control_inputs)
return control_inputs
@property
def outputs(self):
"""Outputs of all the Exit nodes."""
return self._outputs
@property
def name(self):
"""Context name for the while loop."""
return self._context_name
@property
def is_inside_loop(self):
"""Returns true if the while_loop was created inside the pfor."""
return self._is_inside_loop
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
@property
def is_stateful(self):
return self._is_stateful
@property
def pfor_converter(self):
"""Return a converter for the while loop."""
return self
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
inputs_stacked):
"""Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
"""
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(
loop_var,
loop_len,
pfor_ops=self._pfor_ops,
all_indices=indices,
all_indices_partitioned=cond_stacked)
# Map all inputs of Enter nodes in self._direct_enters to their converted
# values.
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
enter_input)
# Since these are resources / variants, they should be unstacked.
assert not stacked and not is_sparse_stacked, (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
# Map all Enter nodes to the inputs.
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
# Map outputs of Switch and Merge.
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
# Note that second output of Merge is typically not used, except possibly
# as a control dependency. To avoid trying to output the correct value, we
# employ a hack here. We output a dummy invalid value with an incorrect
# dtype. This will allow control dependency to work but if using it as an
# input, it should typically lead to errors during graph construction due
# to dtype mismatch.
# TODO(agarwal): Check in the original graph to see if there are any
# consumers of this Tensor that use it as an input.
pfor._add_conversion(merge.outputs[1],
wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
# Don't need to worry about switch.output[0] which will feed to Exit node.
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
def _convert_enter(self, parent_pfor, enter):
"""Converts an Enter node."""
inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
control_inputs = [
parent_pfor._convert_helper(x).t for x in enter.op.control_inputs
]
if control_inputs:
with ops.control_dependencies(control_inputs):
inp = array_ops.identity(inp)
return inp, stacked
def _maybe_stacked(self, cache, inp):
"""Heuristic to figue out if the coverting inp leads to a stacked value.
Args:
cache: map from Tensor to boolean indicating stacked/unstacked.
inp: input Tensor.
Returns:
True if `inp` could get stacked. If the function returns False, the
converted value should be guaranteed to be unstacked. If returning True,
it may or may not be stacked.
"""
if inp in cache:
return cache[inp]
if not self.op_is_inside_loop(inp.op):
return False
op = inp.op
output = False
if op.type in [
"Shape",
"Rank"
"ShapeN",
"ZerosLike",
"TensorArrayV3",
"TensorArraySizeV3",
]:
output = False
elif _is_stateful_pfor_op(op):
# This may be fairly aggressive.
output = True
elif op.type == "Exit":
# This may be fairly aggressive.
output = True
else:
for t in op.inputs:
if self._maybe_stacked(cache, t):
output = True
break
cache[inp] = output
return output
def _create_init_values(self, pfor_input):
"""Create arguments passed to converted while_loop."""
with ops.name_scope("while_init"):
loop_len_vector = pfor_input.pfor.loop_len_vector
loop_len = loop_len_vector[0]
num_outputs = len(self._outputs)
inputs = []
maybe_stacked_cache = {}
# Convert all the Enters. Need to do this before checking for stacking
# below.
for i, enter in enumerate(self._enters):
inp, stacked = self._convert_enter(pfor_input.pfor, enter)
inputs.append(inp)
maybe_stacked_cache[enter] = stacked
# Since this enter node is part of the `loop_vars`, it corresponds to an
# output and its preceding switch. We mark this switch's output the same
# stackness, to act at the base case for the logic below. Below, we will
# be going through the body figuring out which inputs might need to be
# stacked and which inputs can safely remain unstacked.
if i < num_outputs:
maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
# Shape invariants for init_values corresponding to self._enters.
input_shape_invariants = []
# TensorArrays for outputs of converted while loop
output_tas = []
# Shape invariants for output TensorArrays.
ta_shape_invariants = []
# List of booleans indicating stackness of inputs, i.e. tensors
# corresponding to self._enters.
inputs_stacked = []
for i, inp in enumerate(inputs):
enter = self._enters[i]
inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
# Note that even when an input is unstacked, the body could make it
# stacked. we use a heuristic below to figure out if body may be making
# it stacked.
if i < num_outputs:
body_output = self._body_outputs[i]
if enter.op in self._pfor_ops:
body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
body_output)
else:
# If constructed outside of pfor loop, then the output would not be
# stacked.
body_output_stacked = False
if body_output_stacked and not inp_stacked:
inp = _stack(inp, loop_len_vector).t
inputs[i] = inp
inp_stacked = True
# TODO(agarwal): other attributes for the TensorArray ?
output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
ta_shape_invariants.append(tensor_shape.TensorShape(None))
inputs_stacked.append(inp_stacked)
input_shape_invariants.append(tensor_shape.TensorShape(None))
# See documentation for __call__ for the structure of init_values.
init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
# TODO(agarwal): try stricter shape invariants
shape_invariants = (
[tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)
] + input_shape_invariants + ta_shape_invariants)
return init_values, inputs_stacked, shape_invariants
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is unstacked.
Note that all iterations end together. So we don't need to partition the
inputs. When all iterations are done, we write the inputs to the
TensorArrays. Note that we only write to index 0 of output_tas. Since all
iterations end together, they can all be output together.
"""
not_all_done = array_ops.reshape(conditions, [])
new_output_tas = []
# pylint: disable=cell-var-from-loop
for i, out_ta in enumerate(output_tas):
inp = inputs[i]
new_output_tas.append(
control_flow_ops.cond(not_all_done,
lambda: out_ta,
lambda: out_ta.write(0, inp)))
# pylint: enable=cell-var-from-loop
return not_all_done, indices, inputs, new_output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
num_outputs = len(self._outputs)
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
# TODO(agarwal): avoid this stacking. See TODO earlier in
# _process_cond_unstacked.
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
new_inputs.append(new_inp)
# For iterations that are done, write them to TensorArrays.
if i < num_outputs:
out_ta = output_tas[i]
# Note that done_indices can be empty. done_inp should also be empty in
# that case.
new_output_tas.append(out_ta.scatter(done_indices, done_inp))
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(self, pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done):
"""Convert the body function."""
def true_fn(control_inputs, body_pfor, body_output, stacked):
"""Converts the body function for all but last iteration.
This essentially converts body_output. Additionally, it needs to handle
any control dependencies on the NextIteration node. So it creates another
Identity node with the converted dependencies.
"""
converted_control_inp = []
for x in control_inputs:
for t in x.outputs:
converted_control_inp.append(body_pfor._convert_helper(t).t)
if stacked:
# Note convert always does the stacking.
output = body_pfor.convert(body_output)
else:
output, convert_stacked, _ = body_pfor._convert_helper(body_output)
assert convert_stacked == stacked, body_output
with ops.control_dependencies(converted_control_inp):
return array_ops.identity(output)
body_pfor = self._init_pfor(pfor_input.pfor, new_indices,
cond_stacked, new_inputs,
inputs_stacked)
new_outputs = []
for i, (body_output, stacked) in enumerate(
zip(self._body_outputs, inputs_stacked)):
control_inp = self._next_iter_control_inputs[i]
out_dtype = body_output.dtype
# Note that we want to run the body only if not all pfor iterations are
# done. If all are done, we return empty tensors since these values will
# not be used. Notice that the value returned by the loop is based on
# TensorArrays and not directly on these returned values.
# pylint: disable=cell-var-from-loop
new_output = control_flow_ops.cond(
not_all_done,
lambda: true_fn(control_inp, body_pfor, body_output, stacked),
lambda: constant_op.constant([], dtype=out_dtype))
# pylint: enable=cell-var-from-loop
new_outputs.append(new_output)
return new_outputs
def __call__(self, pfor_input):
"""Converter for the while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the iterations that are not
done.
args: Remaining arguments. These can be divided into 3 categories:
- First set of arguments are the tensors that correspond to the initial
elements of self._enters. The elements that appear in original while
loop's `loop_vars`.
- The second set of arguments are the tensors that correspond to the
remaining elements of self._enters. These are the tensors that directly
enter the original while loop body.
- Finally, the last set of arguments are TensorArrays. These TensorArrays
correspond to the outputs of the original while_loop, i.e. to the
elements in self._outputs. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the
i'th iteration of pfor. Note that elements can be written into these
tensors arrays in any order, depending on when the corresponding pfor
iteration is done.
If the original while_loop had `k` tensors in its `loop_vars` and its body
directly captured `m` tensors, the `args` will contain `2 * k + m` values.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Args:
pfor_input: A PForInput object corresponding to the output of any Exit
node from this while loop.
Returns:
List of converted outputs.
"""
# Create init_values that will be passed to the while_loop.
init_values, inputs_stacked, shape_invariants = self._create_init_values(
pfor_input)
# Note that we use a list as a hack since we need the nested function body
# to set the value of cond_is_stacked. python2.x doesn't support nonlocal
# variables.
cond_is_stacked = [None]
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentatin for __call__ for the structure of *args.
num_enters = len(self._enters)
inputs = args[:num_enters]
output_tas = args[num_enters:]
# TODO(agarwal): see which outputs have consumers and only populate the
# TensorArrays corresponding to those. Or do those paths get trimmed out
# from inside the while_loop body?
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set cond_stacked to True here. At this point we don't
# know if it could be loop invariant, hence the conservative value is
# to assume stacked.
cond_pfor = self._init_pfor(pfor_input.pfor, indices,
cond_stacked=True,
inputs=inputs,
inputs_stacked=inputs_stacked)
conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_unstacked(
conditions, indices, inputs, output_tas)
else:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_stacked(
conditions, indices, inputs, inputs_stacked, output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs = self._process_body(pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done)
# Note that the first num_outputs new values of inputs are computed using
# the body. Rest of them were direct Enters into the condition/body and
# the partitioning done earlier is sufficient to give the new value.
num_outputs = len(self._outputs)
new_args = ([not_all_done, new_indices] + new_outputs + list(
new_inputs[num_outputs:]) + new_output_tas)
return tuple(new_args)
while_outputs = control_flow_ops.while_loop(
cond, body, init_values, shape_invariants=shape_invariants)
output_tas = while_outputs[-len(self._outputs):]
outputs = []
assert cond_is_stacked[0] is not None
for inp_stacked, ta in zip(inputs_stacked, output_tas):
if cond_is_stacked[0]:
outputs.append(wrap(ta.stack(), True))
else:
# Note that if while_loop condition is unstacked, all iterations exit at
# the same time and we wrote those outputs in index 0 of the tensor
# array.
outputs.append(wrap(ta.read(0), inp_stacked))
return outputs
class _PforInput(object):
"""Input object passed to registered pfor converters."""
def __init__(self, pfor, op, inputs):
"""Creates a _PforInput object.
Args:
pfor: PFor converter object.
op: the Operation object that is being converted.
inputs: list of WrappedTensor objects representing converted values of the
inputs of `op`.
"""
self.pfor = pfor
self._op = op
self._inputs = inputs
def stack_inputs(self, stack_indices=None):
"""Stacks unstacked inputs at `stack_indices`.
Args:
stack_indices: indices of inputs at which stacking is done. If None,
stacking is done at all indices.
"""
if stack_indices is None:
stack_indices = range(len(self._inputs))
length = self.pfor.loop_len_vector
for i in stack_indices:
inp = self._inputs[i]
if not inp.is_stacked:
self._inputs[i] = _stack(inp.t, length)
def expanddim_inputs_for_broadcast(self):
"""Reshapes stacked inputs to prepare them for broadcast.
Since stacked inputs have an extra leading dimension, automatic broadcasting
rules could incorrectly try to expand dimensions before that leading
dimension. To avoid that, we reshape these stacked inputs to the maximum
rank they will need to be broadcasted to.
"""
if not self._inputs:
return
# Find max rank
def _get_rank(x):
rank = array_ops.rank(x.t)
if not x.is_stacked:
rank += 1
return rank
ranks = [_get_rank(x) for x in self._inputs]
max_rank = ranks[0]
for rank in ranks[1:]:
max_rank = math_ops.maximum(rank, max_rank)
for i, inp in enumerate(self._inputs):
if inp.is_stacked:
shape = array_ops.shape(inp.t)
rank_diff = array_ops.reshape(max_rank - ranks[i], [1])
ones = array_ops.tile([1], rank_diff)
new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)
self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True)
@property
def inputs(self):
return self._inputs
@property
def num_inputs(self):
return len(self._inputs)
def input(self, index):
assert len(self._inputs) > index, (index, self._inputs)
return self._inputs[index]
def stacked_input(self, index):
t, is_stacked, _ = self.input(index)
if not is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be not loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
def unstacked_input(self, index):
t, is_stacked, _ = self.input(index)
if is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
@property
def op(self):
return self._op
@property
def op_type(self):
return self._op.type
def get_attr(self, attr):
return self._op.get_attr(attr)
@property
def outputs(self):
return self._op.outputs
def output(self, index):
assert index < len(self._op.outputs)
return self._op.outputs[index]
_pfor_converter_registry = {}
class RegisterPFor(object):
"""Utility to register converters for pfor.
Usage:
@RegisterPFor(foo_op_type)
def _foo_converter(pfor_input):
...
The above will register conversion function `_foo_converter` for handling
conversion of `foo_op_type`. During conversion, the registered functin will be
called with a single argument of type `PForInput` which will contain state
needed for the conversion. This registered function should output a list of
WrappedTensor object with the same length as the number of outputs of op being
converted. If the op had zero outputs, then it should return a ops.Operation
object.
"""
def __init__(self, op_type):
"""Creates an object to register a converter for op with type `op_type`."""
self.op_type = op_type
def __call__(self, converter):
name = self.op_type
assert name not in _pfor_converter_registry, "Re-registering %s " % name
_pfor_converter_registry[name] = converter
return converter
class RegisterPForWithArgs(RegisterPFor):
"""Utility to register converters for pfor.
Usage:
@RegisteRPFor(foo_op_type, foo=value, ....)
def _foo_converter(pfor_input, foo=None, ....):
...
See RegisterPFor for details on the conversion function.
`RegisterPForWithArgs` allows binding extra arguments to the
conversion function at registration time.
"""
def __init__(self, op_type, *args, **kw_args):
super(RegisterPForWithArgs, self).__init__(op_type)
self._args = args
self._kw_args = kw_args
def __call__(self, converter):
def _f(pfor_input):
return converter(pfor_input, self.op_type, *self._args, **self._kw_args)
super(RegisterPForWithArgs, self).__call__(_f)
return converter
def _create_op(op_type, inputs, op_dtypes, attrs=None):
"""Utility to create an op."""
return ops.get_default_graph().create_op(
op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)
WrappedTensor = collections.namedtuple("WrappedTensor",
["t", "is_stacked", "is_sparse_stacked"])
"""Wrapper around the result of a Tensor conversion.
The additional fields are useful for keeping track of the conversion state as
data flows through the ops in the loop body. For every op whose output is a
Tensor, its converter should return either a WrappedTensor or a list of
WrappedTensors.
Args:
t: The converted tensor
is_stacked: True if the tensor is stacked, i.e. represents the results of all
the iterations of the loop, where each row i of the tensor corresponds to
that op's output on iteration i of the loop. False if the tensor is not
stacked, i.e. represents the result of the op on of a single iteration of
the loop, where the result does not vary between iterations.
is_sparse_stacked: True if the tensor corresponds to a component tensor
(indices, values, or dense_shape) of a sparse tensor, and has been logically
stacked via a sparse conversion.
"""
def wrap(tensor, is_stacked=True, is_sparse_stacked=False):
"""Helper to create a WrappedTensor object."""
assert isinstance(is_stacked, bool)
assert isinstance(is_sparse_stacked, bool)
assert isinstance(tensor, ops.Tensor)
assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is "
"stacked via a sparse "
"conversion, it must also be "
"stacked.")
return WrappedTensor(tensor, is_stacked, is_sparse_stacked)
def _fallback_converter(pfor_input):
logging.warn("Using a while_loop for converting %s", pfor_input.op_type)
output_dtypes = [x.dtype for x in pfor_input.outputs]
iters = pfor_input.pfor.loop_len_vector[0]
def while_body(i, *ta_list):
"""Body of while loop."""
inputs = [
x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs
]
op_outputs = _create_op(
pfor_input.op_type,
inputs,
output_dtypes,
attrs=pfor_input.op.node_def.attr).outputs
outputs = []
for out, ta in zip(op_outputs, ta_list):
assert isinstance(out, ops.Tensor)
outputs.append(ta.write(i, array_ops.expand_dims(out, 0)))
return tuple([i + 1] + outputs)
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters, while_body, [0] + [
tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes
])[1:]
return tuple([wrap(ta.concat(), True) for ta in ta_list])
class PFor(object):
"""Implementation of rewrite of parallel-for loops.
This class takes a DAG or a set of DAGs representing the body of a
parallel-for loop, and adds new operations to the graph that implements
functionality equivalent to running that loop body for a specified number of
iterations. This new set of nodes may or may not use a tensorflow loop
construct.
The process of conversion does not delete or change any existing operations.
It only adds operations that efficiently implement the equivalent
functionality. We refer to the added ops as "converted ops".
The conversion process uses a simple greedy heuristic. It walks the loop body
and tries to express the functionality of running each node in a loop with a
new set of nodes. When converting an op several cases are possible:
- The op is not inside the loop body. Hence it can be used as is.
- The op does not depend on the iteration number and is stateless. In this
case, it can be used as is.
- The op is not stateful, and depends on iteration number only through control
dependencies. In this case, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is not stateful, and all its inputs are loop invariant. In this
case, similar to above, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is stateful or at least one of the inputs is not loop invariant. In
this case, we run the registered converter for that op to create a set of
converted ops. All nodes in the set will have converted control dependencies
corresponding to control dependencies of the original op. If the op returned
multiple outputs, "converted outputs" could be produced by different ops in
this set.
"""
def __init__(self,
loop_var,
loop_len,
pfor_ops,
all_indices=None,
all_indices_partitioned=False):
"""Creates an object to rewrite a parallel-for loop.
Args:
loop_var: ops.Tensor output of a Placeholder operation. The value should
be an int32 scalar representing the loop iteration number.
loop_len: A scalar or scalar Tensor representing the number of iterations
the loop is run for.
pfor_ops: List of all ops inside the loop body.
all_indices: If not None, an int32 vector with size `loop_len`
representing the iteration ids that are still active. These values
should be unique and sorted. However they may not be contiguous. This is
typically the case when inside a control flow construct which has
partitioned the indices of the iterations that are being converted.
all_indices_partitioned: If True, this object is being constructed from a
control flow construct where not all the pfor iterations are guaranteed
to be active.
"""
assert isinstance(loop_var, ops.Tensor)
assert loop_var.op.type == "Placeholder"
self._loop_var = loop_var
loop_len_value = tensor_util.constant_value(loop_len)
if loop_len_value is not None:
loop_len = loop_len_value
self._loop_len_vector = array_ops.reshape(loop_len, [1])
self._all_indices_partitioned = all_indices_partitioned
if all_indices_partitioned:
assert all_indices is not None
self.all_indices = (
math_ops.range(loop_len) if all_indices is None else all_indices)
self._conversion_map = {}
self._conversion_map[loop_var] = wrap(self.all_indices, True)
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
def _convert_sparse(self, y):
"""Returns the converted value corresponding to SparseTensor y.
For SparseTensors, instead of stacking the component tensors separately,
resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
rank) respectively for indices, values, and dense_shape (where N is the loop
length and m is the number of sparse tensor values per loop iter), we want
to logically stack the SparseTensors, to create a SparseTensor whose
components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
respectively.
Here, we try to get the conversion of each component tensor.
If the tensors are stacked via a sparse conversion, return the resulting
SparseTensor composed of the converted components. Otherwise, the component
tensors are either unstacked or stacked naively. In the latter case, we
unstack the component tensors to reform loop_len SparseTensor elements,
then correctly batch them.
The unstacked tensors must have the same rank. Each dimension of each
SparseTensor will expand to be the largest among all SparseTensor elements
for that dimension. For example, if there are N SparseTensors of rank 3
being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
Args:
y: A tf.SparseTensor.
Returns:
A tf.SparseTensor that is the converted value corresponding to y.
"""
outputs = [
self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
]
assert all(isinstance(o, WrappedTensor) for o in outputs)
if all(w.is_sparse_stacked for w in outputs):
return sparse_tensor.SparseTensor(*[w.t for w in outputs])
assert not any(w.is_sparse_stacked for w in outputs), (
"Error converting SparseTensor. All components should be logically "
"stacked, or none.")
# If component tensors were not sparsely stacked, they are either unstacked
# or stacked without knowledge that they are components of sparse tensors.
# In this case, we have to restack them.
return self._restack_sparse_tensor_logically(
*[self._unwrap_or_tile(w) for w in outputs])
def _restack_sparse_tensor_logically(self, indices, values, shape):
sparse_tensor_rank = indices.get_shape()[-1].value
if sparse_tensor_rank is not None:
sparse_tensor_rank += 1
def map_fn(args):
res = gen_sparse_ops.serialize_sparse(
args[0], args[1], args[2], out_type=dtypes.variant)
return res
# Applies a map function to the component tensors to serialize each
# sparse tensor element and batch them all, then deserializes the batch.
# TODO(rachelim): Try to do this without map_fn -- add the right offsets
# to shape and indices tensors instead.
result = functional_ops.map_fn(
map_fn, [indices, values, shape], dtype=dtypes.variant)
return sparse_ops.deserialize_sparse(
result, dtype=values.dtype, rank=sparse_tensor_rank)
def _unwrap_or_tile(self, wrapped_tensor):
"""Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
if is_stacked:
return output
else:
return _stack(output, self._loop_len_vector).t
def convert(self, y):
"""Returns the converted value corresponding to y.
Args:
y: A ops.Tensor or a ops.Operation object. If latter, y should not have
any outputs.
Returns:
If y does not need to be converted, it returns y as is. Else it returns
the "converted value" corresponding to y.
"""
if isinstance(y, sparse_tensor.SparseTensor):
return self._convert_sparse(y)
output = self._convert_helper(y)
if isinstance(output, WrappedTensor):
assert isinstance(y, ops.Tensor)
return self._unwrap_or_tile(output)
else:
assert isinstance(y, ops.Operation)
assert not y.outputs
assert isinstance(output, ops.Operation)
return output
def _was_converted(self, t):
"""True if t is not a conversion of itself."""
converted_t = self._conversion_map[t]
return converted_t.t is not t
def _add_conversion(self, old_output, new_output):
self._conversion_map[old_output] = new_output
def _convert_helper(self, op_or_tensor):
stack = [op_or_tensor]
while stack:
y = stack[0]
if y in self._conversion_map:
assert isinstance(self._conversion_map[y],
(WrappedTensor, ops.Operation))
stack.pop(0)
continue
if isinstance(y, ops.Operation):
assert not y.outputs, (
"We only support converting Operation objects with no outputs. "
"Got %s", y)
y_op = y
else:
assert isinstance(y, ops.Tensor), y
y_op = y.op
is_while_loop = y_op.type == "Exit"
if is_while_loop:
while_op = WhileOp(y, pfor_ops=self._pfor_ops)
is_inside_loop = while_op.is_inside_loop
# If all nodes in the while_loop graph were created inside the pfor, we
# treat the whole loop subgraph as a single op (y_op) and try to convert
# it. For while_loops that are created completely or partially outside,
# we treat them as external and should be able to simply return the Exit
# node output as is without needing any conversion. Note that for
# while_loops that are partially constructed inside, we assume they will
# be loop invariant. If that is not the case, it will create runtime
# errors since the converted graph would depend on the self._loop_var
# placeholder.
if is_inside_loop:
y_op = while_op
else:
is_inside_loop = self.op_is_inside_loop(y_op)
# If this op was not created inside the loop body, we will return as is.
# 1. Convert inputs and control inputs.
def _add_to_stack(x):
if x not in self._conversion_map:
stack.insert(0, x)
return True
else:
return False
if is_inside_loop:
added_to_stack = False
for inp in y_op.inputs:
added_to_stack |= _add_to_stack(inp)
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
added_to_stack |= _add_to_stack(t)
else:
added_to_stack |= _add_to_stack(cinp)
if added_to_stack:
continue
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
some_input_converted = any(
[self._was_converted(x) for x in y_op.inputs])
some_input_stacked = any([x.is_stacked for x in converted_inputs])
converted_control_ops = set()
some_control_input_converted = False
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
converted_t = self._conversion_map[t]
if self._was_converted(t):
some_control_input_converted = True
converted_control_ops.add(converted_t.t.op)
else:
converted_cinp = self._conversion_map[cinp]
assert isinstance(converted_cinp, ops.Operation)
if converted_cinp != cinp:
some_control_input_converted = True
converted_control_ops.add(converted_cinp)
converted_control_ops = list(converted_control_ops)
is_stateful = _is_stateful_pfor_op(y_op)
else:
converted_inputs = []
converted_control_ops = []
logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
converted_inputs, converted_control_ops)
# 2. Convert y_op
# If converting a while_loop, we let the while_loop convertor deal with
# putting the control dependencies appropriately.
control_dependencies = [] if is_while_loop else converted_control_ops
with ops.control_dependencies(control_dependencies), ops.name_scope(
y_op.name + "/pfor/"):
# None of the inputs and control inputs were converted.
if (not is_inside_loop or
(not is_stateful and not some_input_converted and
not some_control_input_converted)):
if y == y_op:
assert not isinstance(y_op, WhileOp)
new_outputs = y_op
else:
new_outputs = [wrap(x, False) for x in y_op.outputs]
elif not (is_stateful or is_while_loop or some_input_stacked):
# All inputs are unstacked or uncoverted but some control inputs are
# converted.
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked (i.e. any([x.is_sparse_stacked for x in converted_inputs]))
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
[x.dtype for x in y_op.outputs],
y_op.node_def.attr)
if y == y_op:
new_outputs = new_op
else:
new_outputs = [wrap(x, False) for x in new_op.outputs]
else:
# Either some inputs are not loop invariant or op is stateful.
if hasattr(y_op, "pfor_converter"):
converter = y_op.pfor_converter
else:
converter = _pfor_converter_registry.get(y_op.type, None)
if converter is None:
if flags.FLAGS.op_conversion_fallback_to_while_loop:
converter = _fallback_converter
else:
raise ValueError(
"No converter defined for %s\n%s\ninputs: %s. "
"\nEither add a converter or set "
"--op_conversion_fallback_to_while_loop=True, "
"which may run slower" % (y_op.type, y_op, converted_inputs))
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked. We should only call the converter if it supports handling
# those inputs.
new_outputs = converter(_PforInput(self, y_op, converted_inputs))
if isinstance(new_outputs, WrappedTensor):
new_outputs = [new_outputs]
assert isinstance(new_outputs,
(list, tuple, ops.Operation)), new_outputs
logging.vlog(2, "converted %s %s", y_op, new_outputs)
# Insert into self._conversion_map
if y == y_op:
assert isinstance(new_outputs, ops.Operation)
self._add_conversion(y_op, new_outputs)
else:
for old_output, new_output in zip(y_op.outputs, new_outputs):
assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
self._add_conversion(old_output, new_output)
stack.pop(0)
return self._conversion_map[op_or_tensor]
@property
def loop_len_vector(self):
"""Returns a single element vector whose value is number of iterations."""
return self._loop_len_vector
@property
def loop_var(self):
"""Returns placeholder loop variable."""
return self._loop_var
@property
def pfor_ops(self):
return self._pfor_ops
@property
def all_indices_partitioned(self):
"""all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
"""
return self._all_indices_partitioned
# nn_ops
def _flatten_first_two_dims(x):
"""Merges first two dimensions."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_first_dim(x, first_dim):
"""Splits first dimension into [first_dim, -1]."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0)
return array_ops.reshape(x, new_shape)
def _inputs_with_flattening(pfor_input, input_indices):
"""Stacks and flattens first dim of inputs at indices `input_indices`."""
if input_indices is None:
input_indices = []
pfor_input.stack_inputs(stack_indices=input_indices)
inputs = []
for i in range(pfor_input.num_inputs):
if i in input_indices:
inp = pfor_input.stacked_input(i)
inp = _flatten_first_two_dims(inp)
else:
inp = pfor_input.unstacked_input(i)
inputs.append(inp)
return inputs
@RegisterPForWithArgs("Conv2D", dims=[0])
@RegisterPForWithArgs("AvgPool", dims=[0])
@RegisterPForWithArgs("MaxPool", dims=[0])
@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
def _convert_flatten_batch(pfor_input, op_type, dims):
del op_type
inputs = _inputs_with_flattening(pfor_input, dims)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
_channel_flatten_input_cache = {}
def _channel_flatten_input(x, data_format):
"""Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWCS.
We then merge the S and C dimension.
Args:
x: ops.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
"""
graph = ops.get_default_graph()
cache_key = (graph, x, data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
if data_format == b"NCHW":
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], [-1]], axis=0)
reverse_order = [3, 0, 1, 2, 4]
# Move S dimension next to C dimension.
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
# Reshape to merge the S and C dimension.
x = array_ops.reshape(x, shape)
outputs = x, reverse_order, reverse_shape
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
# Note that with training=True, running FusedBatchNorm on individual examples
# is very different from running FusedBatchNorm on a batch of those examples.
# This is because, for the latter case, the operation can be considered as first
# computing the mean and variance over all the examples and then using these
# to scale all those examples. This creates a data dependency between these
# different "iterations" since the inputs to the scaling step depends on the
# statistics coming from all these inputs.
# As with other kernels, the conversion here effectively runs the kernel
# independently for each iteration, and returns outputs by stacking outputs from
# each of those iterations.
@RegisterPFor("FusedBatchNorm")
def _convert_fused_batch_norm(pfor_input):
is_training = pfor_input.get_attr("is_training")
# When BatchNorm is used with training=False, mean and variance are provided
# externally and used as is by the op. Thus, we can merge the S and N
# dimensions as we do for regular operations.
# When BatchNorm is used with training=True, mean and variance are computed
# for each channel across the batch dimension (first one). If we merge S and N
# dimensions, mean and variances will be computed over a larger set. So, we
# merge the S and C dimensions instead.
if not is_training:
# We return zeros for batch_mean and batch_variance output. Note that CPU
# and GPU seem to have different behavior for those two outputs. CPU outputs
# zero because these values are not used during inference. GPU outputs
# something, probably real means and variances.
inputs = _inputs_with_flattening(pfor_input, [0])
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
n = pfor_input.pfor.loop_len_vector
y = _unflatten_first_dim(y, n)
mean = pfor_input.unstacked_input(3)
zeros = array_ops.zeros_like(mean)
return [wrap(y, True), wrap(zeros, False), wrap(zeros, False)]
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
# We merge the first dimension with the "C" dimension, run FusedBatchNorm, and
# then transpose back.
x = pfor_input.stacked_input(0)
x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
# Note that we stack all the other inputs as well so that they are the same
# size as the new size of the channel dimension.
inputs = [x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(1, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
y = array_ops.reshape(y, reverse_shape)
y = array_ops.transpose(y, reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [y] + outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("FusedBatchNormGrad")
def _convert_fused_batch_norm_grad(pfor_input):
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
y_backprop = pfor_input.stacked_input(0)
y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
x = pfor_input.stacked_input(1)
x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
inputs = [y_backprop, x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(2, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
x_backprop = outputs[0]
x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [x_backprop] + outputs
return [wrap(output, True) for output in outputs]
@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims,
shape_dim):
del op_type
inputs = _inputs_with_flattening(pfor_input, flatten_dims)
n = pfor_input.pfor.loop_len_vector
# Adjust the `input_sizes` input.
ones = array_ops.ones(
[array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype)
inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
@RegisterPFor("Conv2DBackpropFilter")
def _convert_conv2d_backprop_filter(pfor_input):
pfor_input.stack_inputs(stack_indices=[2])
inputs, inputs_stacked, _ = pfor_input.input(0)
filter_sizes = pfor_input.unstacked_input(1)
grads = pfor_input.stacked_input(2)
strides = pfor_input.get_attr("strides")
padding = pfor_input.get_attr("padding")
use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
data_format = pfor_input.get_attr("data_format")
dilations = pfor_input.get_attr("dilations")
if inputs_stacked:
# TODO(agarwal): Implement this efficiently.
logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!")
def while_body(i, ta):
inp_i = inputs[i, ...]
grad_i = grads[i, ...]
output = nn_ops.conv2d_backprop_filter(
inp_i,
filter_sizes,
grad_i,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
return i + 1, ta.write(i, array_ops.expand_dims(output, 0))
n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
_, ta = control_flow_ops.while_loop(
lambda i, ta: i < n, while_body,
(0, tensor_array_ops.TensorArray(inputs.dtype, n)))
output = ta.concat()
return wrap(output, True)
else:
# We merge the stack dimension with the channel dimension of the gradients
# and pretend we had a larger filter (see change to filter_sizes below).
# Once the filter backprop is computed, we reshape and transpose back
# appropriately.
grads, _, _ = _channel_flatten_input(grads, data_format)
n = pfor_input.pfor.loop_len_vector
old_filter_sizes = filter_sizes
filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
output = nn_ops.conv2d_backprop_filter(
inputs,
filter_sizes,
grads,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
output = array_ops.reshape(output, new_filter_shape)
output = array_ops.transpose(output, [3, 0, 1, 2, 4])
return wrap(output, True)
# array_ops
@RegisterPForWithArgs("Identity", array_ops.identity)
@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
def _convert_identity(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_dim = array_ops.shape(t)[:1]
new_shape = array_ops.concat([new_dim, shape], axis=0)
return wrap(array_ops.reshape(t, new_shape), True)
@RegisterPFor("ExpandDims")
def _convert_expanddims(pfor_input):
t = pfor_input.stacked_input(0)
dim = pfor_input.unstacked_input(1)
dim += math_ops.cast(dim >= 0, dtypes.int32)
return wrap(array_ops.expand_dims(t, axis=dim), True)
@RegisterPFor("Slice")
def _convert_slice(pfor_input):
t = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
size = pfor_input.unstacked_input(2)
begin = array_ops.concat([[0], begin], axis=0)
size = array_ops.concat([[-1], size], axis=0)
return wrap(array_ops.slice(t, begin, size), True)
@RegisterPFor("Tile")
def _convert_tile(pfor_input):
t = pfor_input.stacked_input(0)
multiples = pfor_input.unstacked_input(1)
multiples = array_ops.concat([[1], multiples], 0)
return wrap(array_ops.tile(t, multiples), True)
@RegisterPFor("Pack")
def _convert_pack(pfor_input):
pfor_input.stack_inputs()
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True)
@RegisterPFor("Unpack")
def _convert_unpack(pfor_input):
value = pfor_input.stacked_input(0)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
num = pfor_input.get_attr("num")
return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)]
@RegisterPFor("Pad")
def _convert_pad(pfor_input):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("Split")
def _convert_split(pfor_input):
split_dim = pfor_input.unstacked_input(0)
t = pfor_input.stacked_input(1)
num_split = pfor_input.get_attr("num_split")
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
@RegisterPFor("Transpose")
def _convert_transpose(pfor_input):
t = pfor_input.stacked_input(0)
perm = pfor_input.unstacked_input(1)
new_perm = array_ops.concat([[0], perm + 1], axis=0)
return wrap(array_ops.transpose(t, new_perm), True)
@RegisterPFor("ZerosLike")
def _convert_zeroslike(pfor_input):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
@RegisterPFor("Gather")
@RegisterPFor("GatherV2")
def _convert_gather(pfor_input):
param, param_stacked, _ = pfor_input.input(0)
indices, indices_stacked, _ = pfor_input.input(1)
op_type = pfor_input.op_type
if op_type == "Gather":
validate_indices = pfor_input.get_attr("validate_indices")
axis = 0
else:
validate_indices = None
axis = pfor_input.unstacked_input(2)
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
if indices_stacked and not param_stacked:
if indices == pfor_input.pfor.all_indices and axis == 0:
param_shape0 = param.shape[0].value
indices_shape0 = indices.shape[0].value
if param_shape0 is not None and indices_shape0 == param_shape0:
# Note that with loops and conditionals, indices may not be contiguous.
# However they will be sorted and unique. So if the shape matches, then
# it must be picking up all the rows of param.
return wrap(param, True)
# TODO(agarwal): use array_ops.slice here.
output = array_ops.gather(
param, indices, validate_indices=validate_indices, axis=axis)
if axis != 0:
axis = control_flow_ops.cond(
axis < 0, lambda: axis + array_ops.rank(param), lambda: axis)
order = array_ops.concat(
[[axis],
math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(output))],
axis=0)
output = control_flow_ops.cond(
math_ops.equal(axis, 0), lambda: output,
lambda: array_ops.transpose(output, order))
return wrap(output, True)
if param_stacked:
loop_len_vector = pfor_input.pfor.loop_len_vector
pfor_input.stack_inputs(stack_indices=[1])
indices = pfor_input.stacked_input(1)
param_flat = _flatten_first_two_dims(param)
# Recompute indices to handle stacked param.
indices_offset = math_ops.range(
loop_len_vector[0]) * array_ops.shape(param)[1]
# Reshape indices_offset to allow broadcast addition
ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32)
new_shape = array_ops.concat([loop_len_vector, ones], axis=0)
indices_offset = array_ops.reshape(indices_offset, new_shape)
indices += indices_offset
# TODO(agarwal): handle axis != 0. May need to transpose param or
# array_ops.gather_nd.
if isinstance(axis, ops.Tensor):
axis_value = tensor_util.constant_value(axis)
else:
try:
axis_value = int(axis)
except TypeError:
axis_value = None
msg = ("Gather, where indices and param are both loop dependent, currently "
"requires axis=0")
if axis_value is not None and axis_value != 0:
raise ValueError("Error while converting %s. %s. Got axis=%d" %
(pfor_input.op, msg, axis))
with ops.control_dependencies(
[check_ops.assert_equal(axis, 0, message=msg)]):
output = array_ops.gather(param_flat, indices)
return wrap(output, True)
@RegisterPFor("ConcatV2")
def _convert_concatv2(pfor_input):
n = pfor_input.num_inputs
pfor_input.stack_inputs(stack_indices=range(n - 1))
axis = pfor_input.unstacked_input(n - 1)
axis += math_ops.cast(axis >= 0, axis.dtype)
return wrap(
array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
True)
@RegisterPFor("StridedSlice")
def _convert_strided_slice(pfor_input):
inp = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice(
inp,
begin,
end,
strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("StridedSliceGrad")
def _convert_strided_slice_grad(pfor_input):
shape = pfor_input.unstacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
dy = pfor_input.stacked_input(4)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice_grad(
shape,
begin,
end,
strides,
dy,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
# math_ops
@RegisterPFor("MatMul")
def _convert_matmul(pfor_input):
# TODO(agarwal): Check if tiling is faster than two transposes.
a, a_stacked, _ = pfor_input.input(0)
b, b_stacked, _ = pfor_input.input(1)
tr_a = pfor_input.get_attr("transpose_a")
tr_b = pfor_input.get_attr("transpose_b")
if a_stacked and b_stacked:
output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True)
return output
elif a_stacked:
if tr_a:
a = array_ops.transpose(a, [0, 2, 1])
if a.shape.is_fully_defined():
x, y, z = a.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(a), 3)
]
a = array_ops.reshape(a, [x * y, z])
prod = math_ops.matmul(a, b, transpose_b=tr_b)
return wrap(array_ops.reshape(prod, [x, y, -1]), True)
else:
assert b_stacked
if tr_b:
perm = [2, 0, 1]
b = array_ops.transpose(b, perm)
else:
# As an optimization, if one of the first two dimensions is 1, then we can
# reshape instead of transpose.
# TODO(agarwal): This check can be done inside Transpose kernel.
b_shape = array_ops.shape(b)
min_dim = math_ops.minimum(b_shape[0], b_shape[1])
perm = control_flow_ops.cond(
math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2])
new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]])
b = array_ops.transpose(b, perm)
b = array_ops.reshape(b, new_shape)
if b.shape.is_fully_defined():
x, y, z = b.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(b), 3)
]
b = array_ops.reshape(b, [x, y * z])
prod = math_ops.matmul(a, b, transpose_a=tr_a)
prod = array_ops.reshape(prod, [-1, y, z])
prod = array_ops.transpose(prod, [1, 0, 2])
return wrap(prod, True)
@RegisterPFor("BatchMatMul")
def _convert_batch_mat_mul(pfor_input):
# TODO(agarwal): There may be a more efficient way to do this instead of
# stacking the inputs.
pfor_input.stack_inputs()
x = pfor_input.stacked_input(0)
y = pfor_input.stacked_input(1)
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
x = _flatten_first_two_dims(x)
y = _flatten_first_two_dims(y)
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
return wrap(output, True)
@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
@RegisterPForWithArgs("Max", math_ops.reduce_max)
@RegisterPForWithArgs("Min", math_ops.reduce_min)
def _convert_reduction(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
indices += math_ops.cast(indices >= 0, dtypes.int32)
keep_dims = pfor_input.get_attr("keep_dims")
return wrap(op_func(t, indices, keepdims=keep_dims), True)
@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
def _convert_cumfoo(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
axis += math_ops.cast(axis >= 0, dtypes.int32)
exclusive = pfor_input.get_attr("exclusive")
reverse = pfor_input.get_attr("reverse")
return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
@RegisterPFor("BiasAdd")
def _convert_biasadd(pfor_input):
t = pfor_input.stacked_input(0)
bias = pfor_input.unstacked_input(1)
data_format = pfor_input.get_attr("data_format")
if data_format != b"NCHW":
return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
shape = array_ops.shape(t)
flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
t = array_ops.reshape(t, flattened_shape)
t = nn_ops.bias_add(t, bias, data_format=b"NCHW")
t = array_ops.reshape(t, shape)
return wrap(t, True)
@RegisterPFor("UnsortedSegmentSum")
def _convert_unsortedsegmentsum(pfor_input):
data, data_stacked, _ = pfor_input.input(0)
# TODO(agarwal): handle unstacked?
segment_ids = pfor_input.stacked_input(1)
# TODO(agarwal): handle stacked?
num_segments = pfor_input.unstacked_input(2)
if not data_stacked:
data = _stack(data, pfor_input.pfor.loop_len_vector).t
segment_shape = array_ops.shape(segment_ids)
n = segment_shape[0]
ones = array_ops.ones_like(segment_shape)[1:]
segment_offset = num_segments * math_ops.range(n)
segment_offset = array_ops.reshape(segment_offset,
array_ops.concat([[n], ones], axis=0))
segment_ids += segment_offset
num_segments *= n
output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
new_output_shape = array_ops.concat(
[[n, -1], array_ops.shape(output)[1:]], axis=0)
output = array_ops.reshape(output, new_output_shape)
return wrap(output, True)
@RegisterPFor("Cast")
def _convert_cast(pfor_input):
inp = pfor_input.stacked_input(0)
dtype = pfor_input.get_attr("DstT")
return wrap(math_ops.cast(inp, dtype), True)
# Note that ops handled here do not have attributes except "T", and hence don't
# need extra arguments passed to the cwise_op call below.
@RegisterPForWithArgs("Add", math_ops.add)
@RegisterPForWithArgs("Ceil", math_ops.ceil)
@RegisterPForWithArgs("Equal", math_ops.equal)
@RegisterPForWithArgs("NotEqual", math_ops.not_equal)
@RegisterPForWithArgs("Floor", math_ops.floor)
@RegisterPForWithArgs("Greater", math_ops.greater)
@RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal)
@RegisterPForWithArgs("Less", math_ops.less)
@RegisterPForWithArgs("LessEqual", math_ops.less_equal)
@RegisterPForWithArgs("LogicalOr", math_ops.logical_or)
@RegisterPForWithArgs("LogicalAnd", math_ops.logical_and)
@RegisterPForWithArgs("LogicalNot", math_ops.logical_not)
@RegisterPForWithArgs("LogicalXor", math_ops.logical_xor)
@RegisterPForWithArgs("Maximum", math_ops.maximum)
@RegisterPForWithArgs("Minimum", math_ops.minimum)
@RegisterPForWithArgs("Mul", math_ops.multiply)
@RegisterPForWithArgs("Neg", math_ops.negative)
@RegisterPForWithArgs("RealDiv", math_ops.divide)
@RegisterPForWithArgs("Relu", nn_ops.relu)
@RegisterPForWithArgs("Sigmoid", math_ops.sigmoid)
@RegisterPForWithArgs("Square", math_ops.square)
@RegisterPForWithArgs("Sub", math_ops.subtract)
@RegisterPForWithArgs("Tanh", math_ops.tanh)
def _convert_cwise(pfor_input, op_type, op_func):
del op_type
pfor_input.expanddim_inputs_for_broadcast()
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("Shape")
def _convert_shape(pfor_input):
out_type = pfor_input.get_attr("out_type")
return wrap(
array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
False)
@RegisterPFor("ShapeN")
def _convert_shape_n(pfor_input):
out_type = pfor_input.get_attr("out_type")
shapes = [
array_ops.shape(x, out_type=out_type)[1:]
if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs
]
return [wrap(x, False) for x in shapes]
@RegisterPFor("Size")
def _convert_size(pfor_input):
out_type = pfor_input.get_attr("out_type")
n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
return wrap(
array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
False)
@RegisterPFor("Rank")
def _convert_rank(pfor_input):
return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
@RegisterPFor("AddN")
def _convert_addn(pfor_input):
# AddN does not support broadcasting.
pfor_input.stack_inputs()
return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True)
@RegisterPFor("BiasAddGrad")
def _convert_biasaddgrad(pfor_input):
grad = pfor_input.stacked_input(0)
fmt = pfor_input.get_attr("data_format")
if fmt == b"NCHW":
output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
else:
grad_shape = array_ops.shape(grad)
last_dim_shape = grad_shape[-1]
first_dim_shape = grad_shape[0]
output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
return wrap(output, True)
# Some required ops are not exposed under the tf namespace. Hence relying on
# _create_op to create them.
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("TanhGrad")
@RegisterPForWithArgs("SigmoidGrad")
def _convert_grads(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
# TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
# have to use tiling here.
pfor_input.stack_inputs()
outputs = _create_op(
op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Select")
def _convert_select(pfor_input):
pfor_input.stack_inputs()
cond = pfor_input.stacked_input(0)
t = pfor_input.stacked_input(1)
e = pfor_input.stacked_input(2)
cond_rank = array_ops.rank(cond)
cond, t, e = control_flow_ops.cond(
cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
lambda: [cond, t, e])
outputs = _create_op(
pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
out = control_flow_ops.cond(cond_rank > 1,
lambda: _unflatten_first_dim(outputs[0], n),
lambda: outputs[0])
return [wrap(out, True) for x in outputs]
# random_ops
@RegisterPForWithArgs("RandomUniform")
@RegisterPForWithArgs("RandomUniformInt")
@RegisterPForWithArgs("RandomStandardNormal")
@RegisterPForWithArgs("TruncatedNormal")
@RegisterPForWithArgs("RandomGamma")
@RegisterPForWithArgs("RandomPoissonV2")
def _convert_random(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
# inputs[0] is "shape"
inputs[0] = array_ops.concat(
[pfor_input.pfor.loop_len_vector, inputs[0]], axis=0)
logging.warning(
"Note that %s inside pfor op may not give same output as "
"inside a sequential loop.", op_type)
outputs = _create_op(
op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
# logging_ops
@RegisterPFor("Assert")
def _convert_assert(pfor_input):
cond, cond_stacked, _ = pfor_input.input(0)
if cond_stacked:
cond = math_ops.reduce_all(cond)
data_list = [x.t for x in pfor_input.inputs][1:]
return _create_op("Assert", [cond] + data_list, [],
attrs=pfor_input.op.node_def.attr)
@RegisterPFor("Print")
def _convert_print(pfor_input):
# Note that we don't stack all the inputs. Hence unstacked values are printed
# once here vs multiple times in a while_loop.
pfor_input.stack_inputs([0])
outputs = _create_op(
"Print", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
# data_flow_ops
# TensorArray conversion is tricky since we don't support arrays of
# TensorArrays. For converting them, we consider two distinct cases:
#
# 1. The array is constructed outside the pfor call, and read/written inside the
# loop.
# This is an easier case since we don't need to make an array of TensorArrays.
# A correctness requirement is that these parallel iterations shouldn't attempt
# to write to the same location. Hence at conversion time we disallow indices to
# be loop-invariant as that would guarantee a collision. Even if the indices are
# not loop-invariant, they could conflict and that shall trigger runtime errors.
#
# 2. The array is constructed and used entirely inside each pfor iteration.
# For simplicity, here we require that the indices used for write/scatter are
# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
# different pfor iterations. We consider two sub_cases:
#
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
# In this case we don't increase the dimensions to avoid redundant tiling. Each
# iteration is trying to write the same value. So we convert that to a single
# write.
#
# Here are some tricks used to implement the above:
# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
# trying to trace whether future writes are stacked or unstacked in order to set
# this attr, we set it to correspond to unknown shape.
# - We use the "flow" output of the different ops to track whether the array
# elements are stacked or unstacked. If a stacked write/scatter is done, we make
# the flow stacked as well.
# - We use some heuristic traversal of the graph to track whether the
# TensorArray handle was created inside or outside the pfor loop.
@RegisterPFor("TensorArrayV3")
def _convert_tensor_array_v3(pfor_input):
size = pfor_input.unstacked_input(0)
dtype = pfor_input.get_attr("dtype")
dynamic_size = pfor_input.get_attr("dynamic_size")
clear_after_read = pfor_input.get_attr("clear_after_read")
identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
tensor_array_name = pfor_input.get_attr("tensor_array_name")
handle, flow = data_flow_ops.tensor_array_v3(
size,
dtype=dtype,
# We don't set element shape since we don't know if writes are stacked or
# not yet.
element_shape=None,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name)
# Note we keep flow unstacked for now since we don't know if writes will be
# stacked or not.
return wrap(handle, False), wrap(flow, False)
@RegisterPFor("TensorArraySizeV3")
def _convert_tensor_array_size_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
size = data_flow_ops.tensor_array_size_v3(handle, flow)
return wrap(size, False)
def _handle_inside_pfor(pfor_input, handle):
"""Returns True if handle was created inside the pfor loop."""
# We use some heuristic to find the original TensorArray creation op.
# The logic should handle the common cases (except cond based subgraphs).
# In theory the user could perform different operations on the handle (like
# Reshape, stack multiple handles, etc) which could break this logic.
# TODO(agarwal): handle Switch/Merge.
while handle.op.type in ("Enter", "Identity"):
handle = handle.op.inputs[0]
if handle.op.type not in [
"TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]:
raise ValueError("Unable to find source for handle %s" % handle)
else:
return pfor_input.pfor.op_is_inside_loop(handle.op)
def _unstack_flow(value):
# TODO(agarwal): consider looking if this is a Tile op then get its input.
# This may avoid running the Tile operations.
return array_ops.gather(value, 0)
@RegisterPFor("TensorArrayReadV3")
def _convert_tensor_array_read_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
dtype = pfor_input.get_attr("dtype")
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside_pfor:
# Note that if we are inside a control flow construct inside the pfor, and
# only some of the iterations are doing the read (i.e.
# `all_indices_partitioned` is True), then the read operation should only
# return values for the currently active pfor iterations (`all_indices`
# below). Hence, whenever the returned value is stacked (i.e. `flow` is
# stacked), we may need to do an extra gather after reading the values. Also
# note that if `is_inside` is false, then values in the tensor array are
# unstacked. So the check is only needed in this branch.
all_indices = pfor_input.pfor.all_indices
all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
# Note: flow_stacked indicates if values in the TensorArray are stacked or
# not.
if index_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayReadV3 was called on a TensorArray whose"
" values are not loop-invariant, and the read indices were also"
" not loop invariant. This is currently unsupported.")
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
return wrap(value, True)
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
if flow_stacked and all_indices_partitioned:
value = array_ops.gather(value, all_indices)
return wrap(value, flow_stacked)
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on index_stacked.
if index_stacked:
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
else:
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
return wrap(value, index_stacked)
@RegisterPFor("TensorArrayWriteV3")
def _convert_tensor_array_write_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if value_stacked and pfor_input.pfor.all_indices_partitioned:
# Looks like we are in a control flow in a pfor where not all iterations are
# active now. We don't allow that since that could lead to different indices
# having different shapes which will be hard to merge later.
raise ValueError("Writing non loop invariant values to TensorArray from "
"inside a while_loop/cond not supported.")
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if index_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return wrap(flow_out, False)
else:
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
# TODO(agarwal): Note that if flow is unstacked and value is stacked, then
# this may or may not be a safe situation. flow is unstacked both for a
# freshly created TensorArray, as well as after unstacked values are
# written to it. If it is the latter, then we cannot write a stacked value
# now since that may cause runtime errors due to different shapes in the
# array. At the moment we are not able to handle this gracefully and
# distinguish between the two cases. That would require some heuristic
# traversal of the graph to figure out whether all the writes are
# unstacked or not.
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
else:
if not index_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
# Note that even when index_stacked is true, actual values in index may
# still not be unique. However that will cause runtime error when executing
# the scatter operation below.
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
def _transpose_first_two_dims(value):
# TODO(agarwal): optimize if one of the dims == 1.
value_shape = array_ops.shape(value)
v0 = value_shape[0]
v1 = value_shape[1]
value = array_ops.reshape(value, [v0, v1, -1])
value = array_ops.transpose(value, [1, 0, 2])
new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
return array_ops.reshape(value, new_shape)
@RegisterPFor("TensorArrayGatherV3")
def _convert_tensor_array_gather_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
dtype = pfor_input.get_attr("dtype")
# TODO(agarwal): support element_shape attr?
n = pfor_input.pfor.loop_len_vector
value = data_flow_ops.tensor_array_gather_v3(
handle, indices, flow, dtype=dtype)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
# flow_stacked indicates if values in the TensorArray are stacked or not.
if indices_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayGatherV3 was called on a TensorArray "
"whose values are not loop-invariant, and the indices were also "
"not loop invariant. This is currently unsupported.")
else:
value = _unflatten_first_dim(value, n)
return wrap(value, True)
else:
if flow_stacked:
# Since elements in this array are stacked and `value` was produced by
# gather, its first two dims are "gathered elements" and "stack
# dimension". Our semantics require these two to be flipped.
value = _transpose_first_two_dims(value)
return wrap(value, flow_stacked)
else:
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on indices_stacked.
if indices_stacked:
value = _unflatten_first_dim(value, n)
return wrap(value, indices_stacked)
@RegisterPFor("TensorArrayScatterV3")
def _convert_tensor_array_scatter_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if indices_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
# Note that flow_stacked indicates if existing values in the array are
# stacked or not.
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return wrap(flow_out, False)
if not value_stacked:
# TODO(agarwal): tile in the second dimension directly instead of
# transposing below.
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _transpose_first_two_dims(value)
# TODO(agarwal): Note that if a previous write was unstacked, flow will be
# unstacked, and a stacked value may be written here which may cause
# runtime error due to different elements having different shape. We do
# not try to prevent that.
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
if not indices_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _flatten_first_two_dims(value)
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
@RegisterPFor("TensorArrayGradV3")
def _convert_tensor_array_grad_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
source = pfor_input.get_attr("source")
# TODO(agarwal): For now, we assume that gradients are stacked if the
# TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
# will give runtime error due to incorrect shape being written to the
# accumulator. It is difficult to know in advance if gradients written will be
# stacked or not. Note that flow being stacked is not indicative of the
# gradient being stacked or not. Revisit this later.
shape_to_prepend = pfor_input.pfor.loop_len_vector
grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
handle=handle,
flow_in=flow,
shape_to_prepend=shape_to_prepend,
source=source)
flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
return [wrap(grad_handle, False), wrap(flow_out, True)]
# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
# to TensorArrays, we convert them by changing the dimension of the elements
# inside the stack.
#
# We consider two cases:
#
# 1. StackV2 is constructed and used entirely inside the pfor loop.
# We keep a single Stack and perform the push/pop operations of all the
# iterations in lock-step. We also assume that all the iterations perform these
# operations. In case of dynamic control flow, if only some of the iterations
# try to perform a push/pop, then the conversion may not work correctly and may
# cause undefined behavior.
# TODO(agarwal): test StackV2 with dynamic control flow.
#
# 2. StackV2 is constructed outside the pfor loop.
# Performing stack push/pop in a parallel fashion is ill-defined. However given
# that reading stacks created externally is a common operation when computing
# jacobians, we provide some special semantics here as follows.
# - disallow push operations to the stack
# - pop operations are performed in lock step by all iterations, similar to the
# case when the stack is created inside. A single value is popped during the
# lock-step operation and broadcast to all the iterations. Values in the stack
# are assumed to be loop-invariant.
#
# Some other implementation details:
# We use an ugly logic to find whether values in Stack data structure are
# loop invariant or not. When converting push/pop operations, we keep track of
# whether the last conversion used a stacked value or not (see _stack_cache
# below). As a result if an unstacked value is written first, subsequent stacked
# writes are disallowed when they could have been allowed in theory.
# Map from cache key based on StackV2 handle to a bool indicating whether values
# are stacked or not.
# TODO(agarwal): move _stack_cache inside pfor?
_stack_cache = {}
def _stack_cache_key(pfor_input):
"""Create cache key corresponding to a stack handle."""
op_type = pfor_input.op_type
assert op_type in ["StackPushV2", "StackPopV2"], op_type
orig_handle = pfor_input.op.inputs[0]
while orig_handle.op.type in ["Identity", "Enter"]:
orig_handle = orig_handle.op.inputs[0]
assert orig_handle.op.type == "StackV2", orig_handle.op
return ops.get_default_graph(), pfor_input.pfor, orig_handle
def _stack_handle_inside_pfor(handle, pfor_input):
while handle.op.type in ["Identity", "Enter"]:
handle = handle.op.inputs[0]
assert handle.op.type == "StackV2", (
"Unable to find StackV2 op. Got %s" % handle.op)
return pfor_input.pfor.op_is_inside_loop(handle.op)
@RegisterPFor("StackPushV2")
def _convert_stack_push_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
elem, elem_stacked, _ = pfor_input.input(1)
swap_memory = pfor_input.get_attr("swap_memory")
if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
raise ValueError("StackPushV2 not allowed on stacks created outside pfor")
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
if stacked is None:
stacked = elem_stacked
_stack_cache[stack_cache_key] = stacked
else:
# If we previously made it unstacked then we can't revert to being stacked.
if not stacked and elem_stacked:
raise ValueError(
"It looks like the stack was previously determined to be loop"
" invariant, but we are now trying to push a loop dependent value"
" to it. This is currently unsupported.")
if stacked and not elem_stacked:
elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
return wrap(out, stacked)
# Note that inputs to this convertor will be unstacked. However it should get
# called since it is a stateful op.
@RegisterPFor("StackPopV2")
def _convert_stack_pop_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
# If a StackPushV2 has not been converted yet, we default to unstacked since
# the push could be outside of pfor, or the covertor may not be called if the
# inputs are unconverted.
if stacked is None:
stacked = False
_stack_cache[stack_cache_key] = False
elem_type = pfor_input.get_attr("elem_type")
out = data_flow_ops.stack_pop_v2(handle, elem_type)
return wrap(out, stacked)
# parsing_ops
@RegisterPFor("DecodeCSV")
def _convert_decode_csv(pfor_input):
lines = pfor_input.stacked_input(0)
record_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
field_delim = pfor_input.get_attr("field_delim")
use_quote_delim = pfor_input.get_attr("use_quote_delim")
select_cols = pfor_input.get_attr("select_cols")
if not select_cols:
select_cols = None
return [
wrap(t, True) for t in parsing_ops.decode_csv(
lines,
record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
select_cols=select_cols)
]
@RegisterPFor("ParseSingleExample")
def _convert_parse_single_example(pfor_input):
serialized = pfor_input.stacked_input(0)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
sparse_keys = pfor_input.get_attr("sparse_keys")
dense_keys = pfor_input.get_attr("dense_keys")
sparse_types = pfor_input.get_attr("sparse_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
output = gen_parsing_ops.parse_example(
serialized=serialized,
names=[],
dense_defaults=dense_defaults,
sparse_keys=sparse_keys,
dense_keys=dense_keys,
sparse_types=sparse_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
| apache-2.0 | 74,779,327,371,697,600 | -3,397,382,959,126,586,400 | 38.83268 | 80 | 0.668637 | false |
danviv/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause | 3,664,734,281,754,243,600 | 2,021,818,789,476,449,300 | 28.033613 | 90 | 0.556519 | false |
plaes/numpy | doc/source/conf.py | 6 | 8773 | # -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "0.5":
raise RuntimeError("Sphinx 0.5.dev or newer required")
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest',
'plot_directive']
if sphinx.__version__ >= "0.7":
extensions.append('sphinx.ext.autosummary')
else:
extensions.append('autosummary')
extensions.append('only_directives')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
#master_doc = 'index'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print version, release
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'scipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Manual (DRAFT)" % (project, version)
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = 'scipyshiny_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'indexcontent.html',
}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'numpy'
# Pngmath should try to align formulas properly
pngmath_use_preview = True
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
import matplotlib
matplotlib.rcParams.update({
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
| bsd-3-clause | -5,250,127,699,582,308,000 | -3,033,504,282,982,411,300 | 31.018248 | 81 | 0.600479 | false |
smallyear/linuxLearn | salt/salt/client/ssh/state.py | 1 | 6047 | # -*- coding: utf-8 -*-
'''
Create ssh executor system
'''
from __future__ import absolute_import
# Import python libs
import os
import tarfile
import tempfile
import json
import shutil
from contextlib import closing
# Import salt libs
import salt.client.ssh.shell
import salt.client.ssh
import salt.utils
import salt.utils.thin
import salt.utils.url
import salt.roster
import salt.state
import salt.loader
import salt.minion
class SSHState(salt.state.State):
'''
Create a State object which wraps the SSH functions for state operations
'''
def __init__(self, opts, pillar=None, wrapper=None):
self.wrapper = wrapper
super(SSHState, self).__init__(opts, pillar)
def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
def check_refresh(self, data, ret):
'''
Stub out check_refresh
'''
return
def module_refresh(self):
'''
Module refresh is not needed, stub it out
'''
return
class SSHHighState(salt.state.BaseHighState):
'''
Used to compile the highstate on the master
'''
stack = []
def __init__(self, opts, pillar=None, wrapper=None, fsclient=None):
self.client = fsclient
salt.state.BaseHighState.__init__(self, opts)
self.state = SSHState(opts, pillar, wrapper)
self.matcher = salt.minion.Matcher(self.opts)
def load_dynamic(self, matches):
'''
Stub out load_dynamic
'''
return
def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
refs[env].append([x])
return refs
def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, str):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
for comp in data:
salt_refs(comp, ret)
if isinstance(data, dict):
for comp in data:
salt_refs(data[comp], ret)
return ret
def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.fopen(lowfn, 'w+') as fp_:
fp_.write(json.dumps(chunks))
if pillar:
with salt.utils.fopen(pillarfn, 'w+') as fp_:
fp_.write(json.dumps(pillar))
cachedir = os.path.join('salt-ssh', id_)
for saltenv in file_refs:
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0]
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
if files:
for filename in files:
fn = filename[filename.find(short) + len(short):]
if fn.startswith('/'):
fn = fn.strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try: # cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar
| apache-2.0 | -2,779,358,893,495,304,000 | -9,012,139,402,469,140,000 | 30.331606 | 79 | 0.539937 | false |
doganov/edx-platform | common/djangoapps/enrollment/data.py | 41 | 9880 | """
Data Aggregation Layer of the Enrollment API. Collects all enrollment specific data into a single
source to be used throughout the API.
"""
import logging
from django.contrib.auth.models import User
from opaque_keys.edx.keys import CourseKey
from enrollment.errors import (
CourseEnrollmentClosedError, CourseEnrollmentFullError,
CourseEnrollmentExistsError, UserNotFoundError, InvalidEnrollmentAttribute
)
from enrollment.serializers import CourseEnrollmentSerializer, CourseSerializer
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.exceptions import CourseNotFoundError
from student.models import (
CourseEnrollment, NonExistentCourseError, EnrollmentClosedError,
CourseFullError, AlreadyEnrolledError, CourseEnrollmentAttribute
)
log = logging.getLogger(__name__)
def get_course_enrollments(user_id):
"""Retrieve a list representing all aggregated data for a user's course enrollments.
Construct a representation of all course enrollment data for a specific user.
Args:
user_id (str): The name of the user to retrieve course enrollment information for.
Returns:
A serializable list of dictionaries of all aggregated enrollment data for a user.
"""
qset = CourseEnrollment.objects.filter(
user__username=user_id,
is_active=True
).order_by('created')
enrollments = CourseEnrollmentSerializer(qset, many=True).data
# Find deleted courses and filter them out of the results
deleted = []
valid = []
for enrollment in enrollments:
if enrollment.get("course_details") is not None:
valid.append(enrollment)
else:
deleted.append(enrollment)
if deleted:
log.warning(
(
u"Course enrollments for user %s reference "
u"courses that do not exist (this can occur if a course is deleted)."
), user_id,
)
return valid
def get_course_enrollment(username, course_id):
"""Retrieve an object representing all aggregated data for a user's course enrollment.
Get the course enrollment information for a specific user and course.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
Returns:
A serializable dictionary representing the course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
enrollment = CourseEnrollment.objects.get(
user__username=username, course_id=course_key
)
return CourseEnrollmentSerializer(enrollment).data
except CourseEnrollment.DoesNotExist:
return None
def create_course_enrollment(username, course_id, mode, is_active):
"""Create a new course enrollment for the given user.
Creates a new course enrollment for the specified user username.
Args:
username (str): The name of the user to create a new course enrollment for.
course_id (str): The course to create the course enrollment for.
mode (str): (Optional) The mode for the new enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the new course enrollment.
Raises:
CourseNotFoundError
CourseEnrollmentFullError
EnrollmentClosedError
CourseEnrollmentExistsError
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.enroll(user, course_key, check_access=True)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except NonExistentCourseError as err:
raise CourseNotFoundError(err.message)
except EnrollmentClosedError as err:
raise CourseEnrollmentClosedError(err.message)
except CourseFullError as err:
raise CourseEnrollmentFullError(err.message)
except AlreadyEnrolledError as err:
enrollment = get_course_enrollment(username, course_id)
raise CourseEnrollmentExistsError(err.message, enrollment)
def update_course_enrollment(username, course_id, mode=None, is_active=None):
"""Modify a course enrollment for a user.
Allows updates to a specific course enrollment.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
mode (str): (Optional) If specified, modify the mode for this enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the modified course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.objects.get(user=user, course_id=course_key)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except CourseEnrollment.DoesNotExist:
return None
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>add_or_update_enrollment_attr(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if not _invalid_attribute(attributes) and enrollment is not None:
CourseEnrollmentAttribute.add_enrollment_attr(enrollment, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
return CourseEnrollmentAttribute.get_enrollment_attributes(enrollment)
def _get_user(user_id):
"""Retrieve user with provided user_id
Args:
user_id(str): username of the user for which object is to retrieve
Returns: obj
"""
try:
return User.objects.get(username=user_id)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=user_id)
log.warn(msg)
raise UserNotFoundError(msg)
def _update_enrollment(enrollment, is_active=None, mode=None):
enrollment.update_enrollment(is_active=is_active, mode=mode)
enrollment.save()
return CourseEnrollmentSerializer(enrollment).data
def _invalid_attribute(attributes):
"""Validate enrollment attribute
Args:
attributes(dict): dict of attribute
Return:
list of invalid attributes
"""
invalid_attributes = []
for attribute in attributes:
if "namespace" not in attribute:
msg = u"'namespace' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("namespace")
raise InvalidEnrollmentAttribute(msg)
if "name" not in attribute:
msg = u"'name' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("name")
raise InvalidEnrollmentAttribute(msg)
if "value" not in attribute:
msg = u"'value' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("value")
raise InvalidEnrollmentAttribute(msg)
return invalid_attributes
def get_course_enrollment_info(course_id, include_expired=False):
"""Returns all course enrollment information for the given course.
Based on the course id, return all related course information.
Args:
course_id (str): The course to retrieve enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary representing the course's enrollment information.
Raises:
CourseNotFoundError
"""
course_key = CourseKey.from_string(course_id)
try:
course = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
msg = u"Requested enrollment information for unknown course {course}".format(course=course_id)
log.warning(msg)
raise CourseNotFoundError(msg)
else:
return CourseSerializer(course, include_expired=include_expired).data
| agpl-3.0 | 8,130,703,282,798,424,000 | 8,377,121,538,284,541,000 | 32.378378 | 102 | 0.67581 | false |
cbeck88/fifengine | engine/python/fife/extensions/cegui/ceguibasicapplication.py | 2 | 4813 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
"""
The basic application and main loop.
See the L{ApplicationBase} documentation.
"""
from fife import fife
from fife.extensions.basicapplication import ApplicationBase
import PyCEGUI
class CEGUIEventListener(fife.IKeyListener, fife.ICommandListener):
"""
Default, rudimentary event listener.
Will cause the application to quit on pressing ESC.
"""
def __init__(self, app):
self.app = app
self.engine = app.engine
eventmanager = self.engine.getEventManager()
#eventmanager.setNonConsumableKeys([fife.Key.ESCAPE])
fife.IKeyListener.__init__(self)
eventmanager.addKeyListener(self)
fife.ICommandListener.__init__(self)
eventmanager.addCommandListener(self)
self.quitrequested = False
self.debuggeractive = False
def keyPressed(self, evt):
keyval = evt.getKey().getValue()
if keyval == fife.Key.ESCAPE:
self.app.quit()
def keyReleased(self, evt):
pass
def onCommand(self, command):
if command.getCommandType() == fife.CMD_QUIT_GAME:
self.quitrequested = True
command.consume()
DEFAULT_GUI_DIR = "gui/"
class CEGUIApplicationBase(ApplicationBase):
def __init__(self, setting=None):
super(CEGUIApplicationBase, self).__init__(setting)
self._initGuiManager()
self._loadCEGuiSettings()
def _initGuiManager(self):
settings = self.engine.getSettings()
major_v, minor_v = map(int, PyCEGUI.Version__.split('.')[:2])
#For CEGUI versions lower than 0.8.0 we use the old CEGuiManager
if major_v == 0 and minor_v <= 7:
guimanager = fife.CEGuiManager()
else:
guimanager = fife.CEGui_0Manager()
#transfer ownership to the engine
guimanager.thisown = 0
self.guimanager = guimanager
self.engine.setGuiManager(self.guimanager)
self.engine.getEventManager().addSdlEventListener(self.guimanager)
def _loadCEGuiSettings(self):
self._loadResourcePaths()
def _loadResourcePaths(self):
resourceprovider = PyCEGUI.System.getSingleton().getResourceProvider()
major_v, minor_v = map(int, PyCEGUI.Version__.split('.')[:2])
if major_v == 0 and minor_v <= 7:
resourcetypemap = { "schemes" : PyCEGUI.Scheme.setDefaultResourceGroup,
"imagesets" : PyCEGUI.Imageset.setDefaultResourceGroup,
"fonts" : PyCEGUI.Font.setDefaultResourceGroup,
"layouts" : PyCEGUI.WindowManager.setDefaultResourceGroup,
"looksnfeels" : PyCEGUI.WidgetLookManager.setDefaultResourceGroup,
}
else:
resourcetypemap = { "schemes" : PyCEGUI.Scheme.setDefaultResourceGroup,
"imagesets" : PyCEGUI.ImageManager.setImagesetDefaultResourceGroup,
"fonts" : PyCEGUI.Font.setDefaultResourceGroup,
"layouts" : PyCEGUI.WindowManager.setDefaultResourceGroup,
"looksnfeels" : PyCEGUI.WidgetLookManager.setDefaultResourceGroup,
}
if not self._setting:
for restype, res_setfunc in resourcetypemap.iteritems():
resourceprovider.setResourceGroupDirectory(restype, DEFAULT_GUI_DIR + restype)
res_setfunc(restype)
else:
for restype, res_setfunc in resourcetypemap.iteritems():
path = self._setting.get("CEGUI", restype)
if path:
resourceprovider.setResourceGroupDirectory(restype, path)
res_setfunc(restype)
else:
#set default path
resourceprovider.setResourceGroupDirectory(restype, DEFAULT_GUI_DIR + restype)
res_setfunc(restype)
parser = PyCEGUI.System.getSingleton().getXMLParser()
if parser.isPropertyPresent("SchemaDefaultResourceGroup"):
path = self._setting.get("CEGUI", "schemas")
if path:
rp.setResourceGroupDirectory("schemas", path)
else:
rp.setResourceGroupDirectory("schemas", DEFAULT_GUI_DIR + "schemas")
parser.setProperty("SchemaDefaultResourceGroup", "schemas")
def createListener(self):
self._listener = CEGUIEventListener(self)
return self._listener
| lgpl-2.1 | 6,788,062,650,499,294,000 | -5,330,431,330,737,653,000 | 31.965753 | 83 | 0.705797 | false |
shuangshuangwang/spark | examples/src/main/python/mllib/gradient_boosting_classification_example.py | 27 | 2446 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Gradient Boosted Trees Classification Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesClassificationExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainClassifier(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(
lambda lp: lp[0] != lp[1]).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingClassificationModel")
sameModel = GradientBoostedTreesModel.load(sc,
"target/tmp/myGradientBoostingClassificationModel")
# $example off$
| apache-2.0 | 5,524,290,731,269,656,000 | -8,417,121,389,715,896,000 | 44.296296 | 98 | 0.715454 | false |
rapidpro/chatpro | chatpro/rooms/models.py | 1 | 2494 | from __future__ import absolute_import, unicode_literals
from chatpro.profiles.tasks import sync_org_contacts
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Room(models.Model):
"""
Corresponds to a RapidPro contact group
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='rooms')
name = models.CharField(verbose_name=_("Name"), max_length=128, blank=True,
help_text=_("Name of this room"))
users = models.ManyToManyField(User, verbose_name=_("Users"), related_name='rooms',
help_text=_("Users who can chat in this room"))
managers = models.ManyToManyField(User, verbose_name=_("Managers"), related_name='manage_rooms',
help_text=_("Users who can manage contacts in this room"))
is_active = models.BooleanField(default=True, help_text="Whether this room is active")
@classmethod
def create(cls, org, name, uuid):
return cls.objects.create(org=org, name=name, uuid=uuid)
@classmethod
def get_all(cls, org):
return cls.objects.filter(org=org, is_active=True)
@classmethod
def update_room_groups(cls, org, group_uuids):
"""
Updates an org's chat rooms based on the selected groups UUIDs
"""
# de-activate rooms not included
org.rooms.exclude(uuid__in=group_uuids).update(is_active=False)
# fetch group details
groups = org.get_temba_client().get_groups()
group_names = {group.uuid: group.name for group in groups}
for group_uuid in group_uuids:
existing = org.rooms.filter(uuid=group_uuid).first()
if existing:
existing.name = group_names[group_uuid]
existing.is_active = True
existing.save()
else:
cls.create(org, group_names[group_uuid], group_uuid)
sync_org_contacts.delay(org.id)
def get_contacts(self):
return self.contacts.filter(is_active=True)
def get_users(self):
return self.users.filter(is_active=True).select_related('profile')
def get_managers(self):
return self.managers.filter(is_active=True).select_related('profile')
def __unicode__(self):
return self.name
| bsd-3-clause | -2,753,239,283,656,423,000 | 6,641,329,029,471,571,000 | 34.628571 | 100 | 0.631917 | false |
sbalde/edxplatform | common/djangoapps/third_party_auth/tests/specs/test_testshib.py | 24 | 12276 | """
Third_party_auth integration tests using a mock version of the TestShib provider
"""
import json
import unittest
import httpretty
from mock import patch
from django.core.urlresolvers import reverse
from openedx.core.lib.json_utils import EscapedEdxJSONEncoder
from student.tests.factories import UserFactory
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
TPA_TESTSHIB_LOGIN_URL = '/auth/login/tpa-saml/?auth_entry=login&next=%2Fdashboard&idp=testshib'
TPA_TESTSHIB_REGISTER_URL = '/auth/login/tpa-saml/?auth_entry=register&next=%2Fdashboard&idp=testshib'
TPA_TESTSHIB_COMPLETE_URL = '/auth/complete/tpa-saml/'
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class TestShibIntegrationTest(testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
def setUp(self):
super(TestShibIntegrationTest, self).setUp()
self.login_page_url = reverse('signin_user')
self.register_page_url = reverse('register_user')
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self.read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
self.addCleanup(httpretty.disable)
self.addCleanup(httpretty.reset)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.url_prefix + self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn('Authentication with TestShib is currently unavailable.', response.content)
def test_register(self):
self._configure_testshib_provider()
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
# The user goes to the register page, and sees a button to register with TestShib:
self._check_register_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_REGISTER_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
testshib_response = self._fake_testshib_login_and_return()
# We should be redirected to the register screen since this account is not linked to an edX account:
self.assertEqual(testshib_response.status_code, 302)
self.assertEqual(testshib_response['Location'], self.url_prefix + self.register_page_url)
register_response = self.client.get(self.register_page_url)
# We'd now like to see if the "You've successfully signed into TestShib" message is
# shown, but it's managed by a JavaScript runtime template, and we can't run JS in this
# type of test, so we just check for the variable that triggers that message.
self.assertIn('"currentProvider": "TestShib"', register_response.content)
self.assertIn('"errorMessage": null', register_response.content)
# Now do a crude check that the data (e.g. email) from the provider is displayed in the form:
self.assertIn('"defaultValue": "[email protected]"', register_response.content)
self.assertIn('"defaultValue": "Me Myself And I"', register_response.content)
# Now complete the form:
ajax_register_response = self.client.post(
reverse('user_api_registration'),
{
'email': '[email protected]',
'name': 'Myself',
'username': 'myself',
'honor_code': True,
}
)
self.assertEqual(ajax_register_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self.verify_user_email('[email protected]')
self._test_return_login()
def test_login(self):
self._configure_testshib_provider()
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
user = UserFactory.create()
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
testshib_response = self._fake_testshib_login_and_return()
# We should be redirected to the login screen since this account is not linked to an edX account:
self.assertEqual(testshib_response.status_code, 302)
self.assertEqual(testshib_response['Location'], self.url_prefix + self.login_page_url)
login_response = self.client.get(self.login_page_url)
# We'd now like to see if the "You've successfully signed into TestShib" message is
# shown, but it's managed by a JavaScript runtime template, and we can't run JS in this
# type of test, so we just check for the variable that triggers that message.
self.assertIn('"currentProvider": "TestShib"', login_response.content)
self.assertIn('"errorMessage": null', login_response.content)
# Now the user enters their username and password.
# The AJAX on the page will log them in:
ajax_login_response = self.client.post(
reverse('user_api_login_session'),
{'email': user.email, 'password': 'test'}
)
self.assertEqual(ajax_login_response.status_code, 200)
# Then the AJAX will finish the third party auth:
continue_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
# And we should be redirected to the dashboard:
self.assertEqual(continue_response.status_code, 302)
self.assertEqual(continue_response['Location'], self.url_prefix + reverse('dashboard'))
# Now check that we can login again:
self.client.logout()
self._test_return_login()
def _test_return_login(self):
""" Test logging in to an account that is already linked. """
# Make sure we're not logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 302)
# The user goes to the login page, and sees a button to login with TestShib:
self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(TPA_TESTSHIB_LOGIN_URL)
# The user should be redirected to TestShib:
self.assertEqual(try_login_response.status_code, 302)
self.assertTrue(try_login_response['Location'].startswith(TESTSHIB_SSO_URL))
# Now the user will authenticate with the SAML provider
login_response = self._fake_testshib_login_and_return()
# There will be one weird redirect required to set the login cookie:
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + TPA_TESTSHIB_COMPLETE_URL)
# And then we should be redirected to the dashboard:
login_response = self.client.get(TPA_TESTSHIB_COMPLETE_URL)
self.assertEqual(login_response.status_code, 302)
self.assertEqual(login_response['Location'], self.url_prefix + reverse('dashboard'))
# Now we are logged in:
dashboard_response = self.client.get(reverse('dashboard'))
self.assertEqual(dashboard_response.status_code, 200)
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _check_login_page(self):
""" Load the login form and check that it contains a TestShib button """
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn("TestShib", response.content)
self.assertIn(json.dumps(TPA_TESTSHIB_LOGIN_URL, cls=EscapedEdxJSONEncoder), response.content)
return response
def _check_register_page(self):
""" Load the login form and check that it contains a TestShib button """
response = self.client.get(self.register_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn("TestShib", response.content)
self.assertIn(json.dumps(TPA_TESTSHIB_REGISTER_URL, cls=EscapedEdxJSONEncoder), response.content)
return response
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
kwargs.setdefault('name', 'TestShib')
kwargs.setdefault('enabled', True)
kwargs.setdefault('idp_slug', 'testshib')
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_changed, num_failed, num_total = fetch_saml_metadata()
self.assertEqual(num_failed, 0)
self.assertEqual(num_changed, 1)
self.assertEqual(num_total, 1)
def _fake_testshib_login_and_return(self):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
return self.client.post(
TPA_TESTSHIB_COMPLETE_URL,
content_type='application/x-www-form-urlencoded',
data=self.read_data_file('testshib_response.txt'),
)
| agpl-3.0 | -5,539,982,520,250,926,000 | 3,057,243,957,862,852,000 | 52.373913 | 117 | 0.678478 | false |
JoKnopp/wp-import | test/test_postgresql.py | 1 | 4427 | # -*- coding: UTF-8 -*-
# © Copyright 2009 Wolodja Wentland. All Rights Reserved.
# This file is part of wp-import.
#
# wp-import is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wp-import is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wp-import. If not, see <http://www.gnu.org/licenses/>.
"""Tests for wp_import.postgresql
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import tempfile
from nose.tools import *
import wp_import.utils as wpi_utils
import wp_import.postgresql as wpi_psql
PREFIX = os.path.join(*os.path.split(os.path.dirname(__file__))[:-1])
TEST_DATA_DIR = os.path.join(PREFIX, 'test', 'data')
DOWNLOAD_DIR = os.path.join(TEST_DATA_DIR, 'download')
EXPECTED_STMTS = {
'categorylinks': [
"""INSERT INTO "categorylinks" VALUES """ \
"(130,'Linux','Linux\u5185\u6838','2006-07-25T19:03:22Z')"],
'langlinks': [
"""INSERT INTO "langlinks" VALUES """ \
"(43017,'af','Dante Alighieri')"],
'pagelinks': [
"""INSERT INTO "pagelinks" VALUES (12,0,'P/NP\u554f\u984c')"""],
'redirect': [
"""INSERT INTO "redirect" VALUES (71247,0,'ASCII\u827a\u672f')"""]}
class FakeOptions(object):
pass
def test_insert_statements():
fn_pat = re.compile(
r'''(?P<language>\w+)wiki-(?P<date>\d{8})-(?P<table>[\w_]+).*''')
for dump_path in sorted(wpi_utils.find('*.sql.gz', DOWNLOAD_DIR)):
filename = os.path.basename(dump_path)
mat = fn_pat.match(filename)
stmts = list(wpi_psql.insert_statements(dump_path))
eq_(list(wpi_psql.insert_statements(dump_path)),
EXPECTED_STMTS[mat.group('table')])
def test_categorylink_pipeline():
for file_path in wpi_utils.find('*categorylinks*.sql.gz', DOWNLOAD_DIR):
with wpi_utils.open_compressed(file_path) as cl_file:
eq_(list(wpi_psql.categorylinks_pipeline(cl_file)),
EXPECTED_STMTS['categorylinks'])
def test_psql_quotation():
eq_(list(wpi_psql.psql_quotation(['f `b`', 'baz', 'shrubbery ``'])),
['f "b"', 'baz', 'shrubbery ""'])
def test_timestamp_to_iso_8601():
eq_(list(wpi_psql.timestamp_to_iso_8601([',20080218135752) foo'])),
[",'2008-02-18T13:57:52Z') foo"])
def test_parse_pgpass():
with tempfile.NamedTemporaryFile() as tmp_f:
tmp_f.write('*:*:*:*:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql._parse_pgpass(tmp_f.name).next(),
{'user': '*', 'host': '*', 'port': '*', 'database': '*',
'password': 'GrailQuest'})
tmp_f.write('hostname:port:database:username:password\n')
tmp_f.seek(0)
eq_(wpi_psql._parse_pgpass(tmp_f.name).next(),
{'user': 'username', 'host': 'hostname', 'port': 'port',
'database': 'database',
'password': 'password'})
def test_password_from_pgpass():
with tempfile.NamedTemporaryFile() as tmp_f:
options = FakeOptions()
options.pg_passfile = tmp_f.name
options.pg_user = 'KingArthur'
options.pg_port = '2342'
options.pg_host = 'Camelot'
# test generic pgpass line
tmp_f.write('*:*:*:*:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
# test specific pgpass line
tmp_f.write('Camelot:2342:postgres:KingArthur:GrailQuest\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
# test pick most specific
tmp_f.write('Jerusalem:2342:postgres:Brian:Jehova\n')
tmp_f.write('Camelot:2342:postgres:KingArthur:GrailQuest\n')
tmp_f.write('*:*:*:*:UnladenSwallow\n')
tmp_f.seek(0)
eq_(wpi_psql.password_from_pgpass(options),
'GrailQuest')
tmp_f.write('*:*:*:*\n')
tmp_f.seek(0)
assert_raises(KeyError, wpi_psql.password_from_pgpass,
options=options)
| gpl-3.0 | 5,818,950,594,050,159,000 | -4,506,169,342,648,843,300 | 33.578125 | 76 | 0.615906 | false |
ep1cman/workload-automation | wlauto/instrumentation/daq/__init__.py | 2 | 20324 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
from __future__ import division
import os
import sys
import csv
import shutil
import tempfile
from collections import OrderedDict, defaultdict
from string import ascii_lowercase
from multiprocessing import Process, Queue
from wlauto import Instrument, Parameter
from wlauto.core import signal
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.types import list_of_ints, list_of_strs, boolean
# pylint: disable=wrong-import-position,wrong-import-order
daqpower_path = os.path.join(os.path.dirname(__file__), '..', '..', 'external', 'daq_server', 'src')
sys.path.insert(0, daqpower_path)
try:
import daqpower.client as daq # pylint: disable=F0401
from daqpower.config import DeviceConfiguration, ServerConfiguration, ConfigurationError # pylint: disable=F0401
except ImportError, e:
daq, DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None, None
import_error_mesg = e.message
sys.path.pop(0)
UNITS = {
'energy': 'Joules',
'power': 'Watts',
'voltage': 'Volts',
}
GPIO_ROOT = '/sys/class/gpio'
TRACE_MARKER_PATH = '/sys/kernel/debug/tracing/trace_marker'
def dict_or_bool(value):
"""
Ensures that either a dictionary or a boolean is used as a parameter.
"""
if isinstance(value, dict):
return value
return boolean(value)
class Daq(Instrument):
name = 'daq'
description = """
DAQ instrument obtains the power consumption of the target device's core
measured by National Instruments Data Acquisition(DAQ) device.
WA communicates with a DAQ device server running on a Windows machine
(Please refer to :ref:`daq_setup`) over a network. You must specify the IP
address and port the server is listening on in the config file as follows ::
daq_server_host = '10.1.197.176'
daq_server_port = 45677
These values will be output by the server when you run it on Windows.
You must also specify the values of resistors (in Ohms) across which the
voltages are measured (Please refer to :ref:`daq_setup`). The values should be
specified as a list with an entry for each resistor, e.g.::
daq_resistor_values = [0.005, 0.005]
In addition to this mandatory configuration, you can also optionally specify the
following::
:daq_labels: Labels to be used for ports. Defaults to ``'PORT_<pnum>'``, where
'pnum' is the number of the port.
:daq_device_id: The ID under which the DAQ is registered with the driver.
Defaults to ``'Dev1'``.
:daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ
(please refer to :ref:`daq_setup` for details). Defaults to ``2.5``.
:daq_dv_range: Specifies the voltage range for the resistor voltage channel on
the DAQ (please refer to :ref:`daq_setup` for details).
Defaults to ``0.2``.
:daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each
second. Please note that this maybe limitted by your DAQ model
and then number of ports you're measuring (again, see
:ref:`daq_setup`). Defaults to ``10000``.
:daq_channel_map: Represents mapping from logical AI channel number to physical
connector on the DAQ (varies between DAQ models). The default
assumes DAQ 6363 and similar with AI channels on connectors
0-7 and 16-23.
"""
parameters = [
Parameter('server_host', kind=str, default='localhost',
global_alias='daq_server_host',
description='The host address of the machine that runs the daq Server which the '
'instrument communicates with.'),
Parameter('server_port', kind=int, default=45677,
global_alias='daq_server_port',
description='The port number for daq Server in which daq instrument communicates '
'with.'),
Parameter('device_id', kind=str, default='Dev1',
global_alias='daq_device_id',
description='The ID under which the DAQ is registered with the driver.'),
Parameter('v_range', kind=float, default=2.5,
global_alias='daq_v_range',
description='Specifies the voltage range for the SOC voltage channel on the DAQ '
'(please refer to :ref:`daq_setup` for details).'),
Parameter('dv_range', kind=float, default=0.2,
global_alias='daq_dv_range',
description='Specifies the voltage range for the resistor voltage channel on '
'the DAQ (please refer to :ref:`daq_setup` for details).'),
Parameter('sampling_rate', kind=int, default=10000,
global_alias='daq_sampling_rate',
description='DAQ sampling rate. DAQ will take this many samples each '
'second. Please note that this maybe limitted by your DAQ model '
'and then number of ports you\'re measuring (again, see '
':ref:`daq_setup`)'),
Parameter('resistor_values', kind=list, mandatory=True,
global_alias='daq_resistor_values',
description='The values of resistors (in Ohms) across which the voltages are measured on '
'each port.'),
Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
global_alias='daq_channel_map',
description='Represents mapping from logical AI channel number to physical '
'connector on the DAQ (varies between DAQ models). The default '
'assumes DAQ 6363 and similar with AI channels on connectors '
'0-7 and 16-23.'),
Parameter('labels', kind=list_of_strs,
global_alias='daq_labels',
description='List of port labels. If specified, the lenght of the list must match '
'the length of ``resistor_values``. Defaults to "PORT_<pnum>", where '
'"pnum" is the number of the port.'),
Parameter('negative_samples', default='keep', allowed_values=['keep', 'zero', 'drop', 'abs'],
global_alias='daq_negative_samples',
description="""
Specifies how negative power samples should be handled. The following
methods are possible:
:keep: keep them as they are
:zero: turn negative values to zero
:drop: drop samples if they contain negative values. *warning:* this may result in
port files containing different numbers of samples
:abs: take the absoulte value of negave samples
"""),
Parameter('gpio_sync', kind=int, constraint=lambda x: x > 0,
description="""
If specified, the instrument will simultaneously set the
specified GPIO pin high and put a marker into ftrace. This is
to facillitate syncing kernel trace events to DAQ power
trace.
"""),
Parameter('merge_channels', kind=dict_or_bool, default=False,
description="""
If set to ``True``, channels with consecutive letter suffixes will be summed.
e.g. If you have channels A7a, A7b, A7c, A15a, A15b they will be summed to A7, A15
You can also manually specify the name of channels to be merged and the name of the
result like so:
merge_channels:
A15: [A15dvfs, A15ram]
NonCPU: [GPU, RoS, Mem]
In the above exaples the DAQ channels labeled A15a and A15b will be summed together
with the results being saved as 'channel' ''a''. A7, GPU and RoS will be summed to 'c'
""")
]
def initialize(self, context):
status, devices = self._execute_command('list_devices')
if status == daq.Status.OK and not devices:
raise InstrumentError('DAQ: server did not report any devices registered with the driver.')
self._results = OrderedDict()
self.gpio_path = None
if self.gpio_sync:
if not self.device.file_exists(GPIO_ROOT):
raise InstrumentError('GPIO sysfs not enabled on the device.')
try:
export_path = self.device.path.join(GPIO_ROOT, 'export')
self.device.set_sysfile_value(export_path, self.gpio_sync, verify=False)
pin_root = self.device.path.join(GPIO_ROOT, 'gpio{}'.format(self.gpio_sync))
direction_path = self.device.path.join(pin_root, 'direction')
self.device.set_sysfile_value(direction_path, 'out')
self.gpio_path = self.device.path.join(pin_root, 'value')
self.device.set_sysfile_value(self.gpio_path, 0, verify=False)
signal.connect(self.insert_start_marker, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
signal.connect(self.insert_stop_marker, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
except DeviceError as e:
raise InstrumentError('Could not configure GPIO on device: {}'.format(e))
def setup(self, context):
self.logger.debug('Initialising session.')
self._execute_command('configure', config=self.device_config)
def slow_start(self, context):
self.logger.debug('Starting collecting measurements.')
self._execute_command('start')
def slow_stop(self, context):
self.logger.debug('Stopping collecting measurements.')
self._execute_command('stop')
def update_result(self, context): # pylint: disable=R0914
self.logger.debug('Downloading data files.')
output_directory = _d(os.path.join(context.output_directory, 'daq'))
self._execute_command('get_data', output_directory=output_directory)
if self.merge_channels:
self._merge_channels(context)
for entry in os.listdir(output_directory):
context.add_iteration_artifact('DAQ_{}'.format(os.path.splitext(entry)[0]),
path=os.path.join('daq', entry),
kind='data',
description='DAQ power measurments.')
port = os.path.splitext(entry)[0]
path = os.path.join(output_directory, entry)
key = (context.spec.id, context.spec.label, context.current_iteration)
if key not in self._results:
self._results[key] = {}
temp_file = os.path.join(tempfile.gettempdir(), entry)
writer, wfh = None, None
with open(path) as fh:
if self.negative_samples != 'keep':
wfh = open(temp_file, 'wb')
writer = csv.writer(wfh)
reader = csv.reader(fh)
metrics = reader.next()
if writer:
writer.writerow(metrics)
self._metrics |= set(metrics)
rows = _get_rows(reader, writer, self.negative_samples)
data = zip(*rows)
if writer:
wfh.close()
shutil.move(temp_file, os.path.join(output_directory, entry))
n = len(data[0])
means = [s / n for s in map(sum, data)]
for metric, value in zip(metrics, means):
metric_name = '{}_{}'.format(port, metric)
context.result.add_metric(metric_name, round(value, 3), UNITS[metric])
self._results[key][metric_name] = round(value, 3)
energy = sum(data[metrics.index('power')]) * (self.sampling_rate / 1000000)
context.result.add_metric('{}_energy'.format(port), round(energy, 3), UNITS['energy'])
def teardown(self, context):
self.logger.debug('Terminating session.')
self._execute_command('close')
def finalize(self, context):
if self.gpio_path:
unexport_path = self.device.path.join(GPIO_ROOT, 'unexport')
self.device.set_sysfile_value(unexport_path, self.gpio_sync, verify=False)
def validate(self): # pylint: disable=too-many-branches
if not daq:
raise ImportError(import_error_mesg)
self._results = None
self._metrics = set()
if self.labels:
if len(self.labels) != len(self.resistor_values):
raise ConfigError('Number of DAQ port labels does not match the number of resistor values.')
duplicates = set([x for x in self.labels if self.labels.count(x) > 1])
if len(duplicates) > 0:
raise ConfigError('Duplicate labels: {}'.format(', '.join(duplicates)))
else:
self.labels = ['PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)]
self.server_config = ServerConfiguration(host=self.server_host,
port=self.server_port)
self.device_config = DeviceConfiguration(device_id=self.device_id,
v_range=self.v_range,
dv_range=self.dv_range,
sampling_rate=self.sampling_rate,
resistor_values=self.resistor_values,
channel_map=self.channel_map,
labels=self.labels)
try:
self.server_config.validate()
self.device_config.validate()
except ConfigurationError, ex:
raise ConfigError('DAQ configuration: ' + ex.message) # Re-raise as a WA error
self.grouped_suffixes = defaultdict(str)
if isinstance(self.merge_channels, bool):
if self.merge_channels:
# Create a dict of potential prefixes and a list of their suffixes
grouped_suffixes = defaultdict(list)
for label in sorted(self.labels):
if len(label) > 1:
grouped_suffixes[label[:-1]].append(label)
# Only merge channels if more than one channel has the same prefix and the prefixes
# are consecutive letters starting with 'a'.
self.label_map = {}
for channel, suffixes in grouped_suffixes.iteritems():
if len(suffixes) > 1:
if "".join([s[-1] for s in suffixes]) in ascii_lowercase[:len(suffixes)]:
self.label_map[channel] = suffixes
elif isinstance(self.merge_channels, dict):
# Check if given channel names match labels
for old_names in self.merge_channels.values():
for name in old_names:
if name not in self.labels:
raise ConfigError("No channel with label {} specified".format(name))
self.label_map = self.merge_channels # pylint: disable=redefined-variable-type
self.merge_channels = True
else: # Should never reach here
raise AssertionError("``merge_channels`` is of invalid type")
def before_overall_results_processing(self, context):
if self._results:
headers = ['id', 'workload', 'iteration']
metrics = ['{}_{}'.format(p, m) for p in self.labels for m in sorted(self._metrics)]
headers += metrics
rows = [headers]
for key, value in self._results.iteritems():
rows.append(list(key) + [value[m] for m in metrics])
outfile = os.path.join(context.output_directory, 'daq_power.csv')
with open(outfile, 'wb') as fh:
writer = csv.writer(fh)
writer.writerows(rows)
def insert_start_marker(self, context):
if self.gpio_path:
command = 'echo DAQ_START_MARKER > {}; echo 1 > {}'.format(TRACE_MARKER_PATH, self.gpio_path)
self.device.execute(command, as_root=self.device.is_rooted)
def insert_stop_marker(self, context):
if self.gpio_path:
command = 'echo DAQ_STOP_MARKER > {}; echo 0 > {}'.format(TRACE_MARKER_PATH, self.gpio_path)
self.device.execute(command, as_root=self.device.is_rooted)
def _execute_command(self, command, **kwargs):
# pylint: disable=E1101
q = Queue()
p = Process(target=_send_daq_command, args=(q, self.server_config, command), kwargs=kwargs)
p.start()
result = q.get()
p.join()
if result.status == daq.Status.OK:
pass # all good
elif result.status == daq.Status.OKISH:
self.logger.debug(result.message)
elif result.status == daq.Status.ERROR:
raise InstrumentError('DAQ: {}'.format(result.message))
else:
raise InstrumentError('DAQ: Unexpected result: {} - {}'.format(result.status, result.message))
return (result.status, result.data)
def _merge_channels(self, context): # pylint: disable=r0914
output_directory = _d(os.path.join(context.output_directory, 'daq'))
for name, labels in self.label_map.iteritems():
summed = None
for label in labels:
path = os.path.join(output_directory, "{}.csv".format(label))
with open(path) as fh:
reader = csv.reader(fh)
metrics = reader.next()
rows = _get_rows(reader, None, self.negative_samples)
if summed:
summed = [[x + y for x, y in zip(a, b)] for a, b in zip(rows, summed)]
else:
summed = rows
output_path = os.path.join(output_directory, "{}.csv".format(name))
with open(output_path, 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerow(metrics)
for row in summed:
writer.writerow(row)
def _send_daq_command(q, *args, **kwargs):
result = daq.execute_command(*args, **kwargs)
q.put(result)
def _get_rows(reader, writer, negative_samples):
rows = []
for row in reader:
row = map(float, row)
if negative_samples == 'keep':
rows.append(row)
elif negative_samples == 'zero':
def nonneg(v):
return v if v >= 0 else 0
rows.append([nonneg(v) for v in row])
elif negative_samples == 'drop':
if all(v >= 0 for v in row):
rows.append(row)
elif negative_samples == 'abs':
rows.append([abs(v) for v in row])
else:
raise AssertionError(negative_samples) # should never get here
if writer:
writer.writerow(row)
return rows
| apache-2.0 | 7,794,858,963,362,287,000 | 3,180,209,004,612,539,000 | 47.047281 | 117 | 0.575182 | false |
BadDNA/anolis | web/env/lib/python2.6/site-packages/pip-0.7.2-py2.6.egg/pip/locations.py | 3 | 1508 | """Locations where we look for configs, install stuff, etc"""
import sys
import os
from distutils import sysconfig
if getattr(sys, 'real_prefix', None):
## FIXME: is build/ a good name?
build_prefix = os.path.join(sys.prefix, 'build')
src_prefix = os.path.join(sys.prefix, 'src')
else:
## FIXME: this isn't a very good default
build_prefix = os.path.join(os.getcwd(), 'build')
src_prefix = os.path.join(os.getcwd(), 'src')
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
bin_py = os.path.join(sys.prefix, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'pip')
default_config_file = os.path.join(default_storage_dir, 'pip.ini')
default_log_file = os.path.join(default_storage_dir, 'pip.log')
else:
bin_py = os.path.join(sys.prefix, 'bin')
default_storage_dir = os.path.join(user_dir, '.pip')
default_config_file = os.path.join(default_storage_dir, 'pip.conf')
default_log_file = os.path.join(default_storage_dir, 'pip.log')
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
| bsd-3-clause | -3,149,701,016,085,922,300 | -6,594,143,615,335,219,000 | 40.888889 | 78 | 0.666446 | false |
bgris/ODL_bgris | lib/python3.5/site-packages/qtconsole/mainwindow.py | 7 | 31388 | """The Qt MainWindow for the QtConsole
This is a tabbed pseudo-terminal of Jupyter sessions, with a menu bar for
common actions.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
import webbrowser
from threading import Thread
from qtconsole.qt import QtGui,QtCore
from qtconsole.usage import gui_reference
def background(f):
"""call a function in a simple thread, to prevent blocking"""
t = Thread(target=f)
t.start()
return t
class MainWindow(QtGui.QMainWindow):
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, app,
confirm_exit=True,
new_frontend_factory=None, slave_frontend_factory=None,
):
""" Create a tabbed MainWindow for managing FrontendWidgets
Parameters
----------
app : reference to QApplication parent
confirm_exit : bool, optional
Whether we should prompt on close of tabs
new_frontend_factory : callable
A callable that returns a new JupyterWidget instance, attached to
its own running kernel.
slave_frontend_factory : callable
A callable that takes an existing JupyterWidget, and returns a new
JupyterWidget instance, attached to the same kernel.
"""
super(MainWindow, self).__init__()
self._kernel_counter = 0
self._app = app
self.confirm_exit = confirm_exit
self.new_frontend_factory = new_frontend_factory
self.slave_frontend_factory = slave_frontend_factory
self.tab_widget = QtGui.QTabWidget(self)
self.tab_widget.setDocumentMode(True)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested[int].connect(self.close_tab)
self.setCentralWidget(self.tab_widget)
# hide tab bar at first, since we have no tabs:
self.tab_widget.tabBar().setVisible(False)
# prevent focus in tab bar
self.tab_widget.setFocusPolicy(QtCore.Qt.NoFocus)
def update_tab_bar_visibility(self):
""" update visibility of the tabBar depending of the number of tab
0 or 1 tab, tabBar hidden
2+ tabs, tabBar visible
send a self.close if number of tab ==0
need to be called explicitly, or be connected to tabInserted/tabRemoved
"""
if self.tab_widget.count() <= 1:
self.tab_widget.tabBar().setVisible(False)
else:
self.tab_widget.tabBar().setVisible(True)
if self.tab_widget.count()==0 :
self.close()
@property
def next_kernel_id(self):
"""constantly increasing counter for kernel IDs"""
c = self._kernel_counter
self._kernel_counter += 1
return c
@property
def active_frontend(self):
return self.tab_widget.currentWidget()
def create_tab_with_new_frontend(self):
"""create a new frontend and attach it to a new tab"""
widget = self.new_frontend_factory()
self.add_tab_with_frontend(widget)
def create_tab_with_current_kernel(self):
"""create a new frontend attached to the same kernel as the current tab"""
current_widget = self.tab_widget.currentWidget()
current_widget_index = self.tab_widget.indexOf(current_widget)
current_widget_name = self.tab_widget.tabText(current_widget_index)
widget = self.slave_frontend_factory(current_widget)
if 'slave' in current_widget_name:
# don't keep stacking slaves
name = current_widget_name
else:
name = '(%s) slave' % current_widget_name
self.add_tab_with_frontend(widget,name=name)
def close_tab(self,current_tab):
""" Called when you need to try to close a tab.
It takes the number of the tab to be closed as argument, or a reference
to the widget inside this tab
"""
# let's be sure "tab" and "closing widget" are respectively the index
# of the tab to close and a reference to the frontend to close
if type(current_tab) is not int :
current_tab = self.tab_widget.indexOf(current_tab)
closing_widget=self.tab_widget.widget(current_tab)
# when trying to be closed, widget might re-send a request to be
# closed again, but will be deleted when event will be processed. So
# need to check that widget still exists and skip if not. One example
# of this is when 'exit' is sent in a slave tab. 'exit' will be
# re-sent by this function on the master widget, which ask all slave
# widgets to exit
if closing_widget is None:
return
#get a list of all slave widgets on the same kernel.
slave_tabs = self.find_slave_widgets(closing_widget)
keepkernel = None #Use the prompt by default
if hasattr(closing_widget,'_keep_kernel_on_exit'): #set by exit magic
keepkernel = closing_widget._keep_kernel_on_exit
# If signal sent by exit magic (_keep_kernel_on_exit, exist and not None)
# we set local slave tabs._hidden to True to avoid prompting for kernel
# restart when they get the signal. and then "forward" the 'exit'
# to the main window
if keepkernel is not None:
for tab in slave_tabs:
tab._hidden = True
if closing_widget in slave_tabs:
try :
self.find_master_tab(closing_widget).execute('exit')
except AttributeError:
self.log.info("Master already closed or not local, closing only current tab")
self.tab_widget.removeTab(current_tab)
self.update_tab_bar_visibility()
return
kernel_client = closing_widget.kernel_client
kernel_manager = closing_widget.kernel_manager
if keepkernel is None and not closing_widget._confirm_exit:
# don't prompt, just terminate the kernel if we own it
# or leave it alone if we don't
keepkernel = closing_widget._existing
if keepkernel is None: #show prompt
if kernel_client and kernel_client.channels_running:
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
if closing_widget._may_close:
msg = "You are closing the tab : "+'"'+self.tab_widget.tabText(current_tab)+'"'
info = "Would you like to quit the Kernel and close all attached Consoles as well?"
justthis = QtGui.QPushButton("&No, just this Tab", self)
justthis.setShortcut('N')
closeall = QtGui.QPushButton("&Yes, close all", self)
closeall.setShortcut('Y')
# allow ctrl-d ctrl-d exit, like in terminal
closeall.setShortcut('Ctrl+D')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(justthis, QtGui.QMessageBox.NoRole)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
if reply == 1: # close All
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
kernel_manager.shutdown_kernel()
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
elif reply == 0: # close Console
if not closing_widget._existing:
# Have kernel: don't quit, just close the tab
closing_widget.execute("exit True")
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else:
reply = QtGui.QMessageBox.question(self, title,
"Are you sure you want to close this Console?"+
"\nThe Kernel and other Consoles will remain active.",
okay|cancel,
defaultButton=okay
)
if reply == okay:
self.tab_widget.removeTab(current_tab)
elif keepkernel: #close console but leave kernel running (no prompt)
self.tab_widget.removeTab(current_tab)
background(kernel_client.stop_channels)
else: #close console and kernel (no prompt)
self.tab_widget.removeTab(current_tab)
if kernel_client and kernel_client.channels_running:
for slave in slave_tabs:
background(slave.kernel_client.stop_channels)
self.tab_widget.removeTab(self.tab_widget.indexOf(slave))
if kernel_manager:
kernel_manager.shutdown_kernel()
background(kernel_client.stop_channels)
self.update_tab_bar_visibility()
def add_tab_with_frontend(self,frontend,name=None):
""" insert a tab with a given frontend in the tab bar, and give it a name
"""
if not name:
name = 'kernel %i' % self.next_kernel_id
self.tab_widget.addTab(frontend,name)
self.update_tab_bar_visibility()
self.make_frontend_visible(frontend)
frontend.exit_requested.connect(self.close_tab)
def next_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()+1))
def prev_tab(self):
self.tab_widget.setCurrentIndex((self.tab_widget.currentIndex()-1))
def make_frontend_visible(self,frontend):
widget_index=self.tab_widget.indexOf(frontend)
if widget_index > 0 :
self.tab_widget.setCurrentIndex(widget_index)
def find_master_tab(self,tab,as_list=False):
"""
Try to return the frontend that owns the kernel attached to the given widget/tab.
Only finds frontend owned by the current application. Selection
based on port of the kernel might be inaccurate if several kernel
on different ip use same port number.
This function does the conversion tabNumber/widget if needed.
Might return None if no master widget (non local kernel)
Will crash if more than 1 masterWidget
When asList set to True, always return a list of widget(s) owning
the kernel. The list might be empty or containing several Widget.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate to be the owner of the kernel does have all the same port of the curent widget
# And should have a _may_close attribute
filtered_widget_list = [ widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file and
hasattr(widget,'_may_close') ]
# the master widget is the one that may close the kernel
master_widget= [ widget for widget in filtered_widget_list if widget._may_close]
if as_list:
return master_widget
assert(len(master_widget)<=1 )
if len(master_widget)==0:
return None
return master_widget[0]
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_client
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_client.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list
# Populate the menu bar with common actions and shortcuts
def add_menu_action(self, menu, action, defer_shortcut=False):
"""Add action to menu as well as self
So that when the menu bar is invisible, its actions are still available.
If defer_shortcut is True, set the shortcut context to widget-only,
where it will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if defer_shortcut:
action.setShortcutContext(QtCore.Qt.WidgetShortcut)
def init_menu_bar(self):
#create menu in the order they should appear in the menu bar
self.init_file_menu()
self.init_edit_menu()
self.init_view_menu()
self.init_kernel_menu()
self.init_window_menu()
self.init_help_menu()
def init_file_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
self.new_kernel_tab_act = QtGui.QAction("New Tab with &New kernel",
self,
shortcut="Ctrl+T",
triggered=self.create_tab_with_new_frontend)
self.add_menu_action(self.file_menu, self.new_kernel_tab_act)
self.slave_kernel_tab_act = QtGui.QAction("New Tab with Sa&me kernel",
self,
shortcut="Ctrl+Shift+T",
triggered=self.create_tab_with_current_kernel)
self.add_menu_action(self.file_menu, self.slave_kernel_tab_act)
self.file_menu.addSeparator()
self.close_action=QtGui.QAction("&Close Tab",
self,
shortcut=QtGui.QKeySequence.Close,
triggered=self.close_active_frontend
)
self.add_menu_action(self.file_menu, self.close_action)
self.export_action=QtGui.QAction("&Save to HTML/XHTML",
self,
shortcut=QtGui.QKeySequence.Save,
triggered=self.export_action_active_frontend
)
self.add_menu_action(self.file_menu, self.export_action, True)
self.file_menu.addSeparator()
printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print)
if printkey.matches("Ctrl+P") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
printkey = "Ctrl+Shift+P"
self.print_action = QtGui.QAction("&Print",
self,
shortcut=printkey,
triggered=self.print_action_active_frontend)
self.add_menu_action(self.file_menu, self.print_action, True)
if sys.platform != 'darwin':
# OSX always has Quit in the Application menu, only add it
# to the File menu elsewhere.
self.file_menu.addSeparator()
self.quit_action = QtGui.QAction("&Quit",
self,
shortcut=QtGui.QKeySequence.Quit,
triggered=self.close,
)
self.add_menu_action(self.file_menu, self.quit_action)
def init_edit_menu(self):
self.edit_menu = self.menuBar().addMenu("&Edit")
self.undo_action = QtGui.QAction("&Undo",
self,
shortcut=QtGui.QKeySequence.Undo,
statusTip="Undo last action if possible",
triggered=self.undo_active_frontend
)
self.add_menu_action(self.edit_menu, self.undo_action)
self.redo_action = QtGui.QAction("&Redo",
self,
shortcut=QtGui.QKeySequence.Redo,
statusTip="Redo last action if possible",
triggered=self.redo_active_frontend)
self.add_menu_action(self.edit_menu, self.redo_action)
self.edit_menu.addSeparator()
self.cut_action = QtGui.QAction("&Cut",
self,
shortcut=QtGui.QKeySequence.Cut,
triggered=self.cut_active_frontend
)
self.add_menu_action(self.edit_menu, self.cut_action, True)
self.copy_action = QtGui.QAction("&Copy",
self,
shortcut=QtGui.QKeySequence.Copy,
triggered=self.copy_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_action, True)
self.copy_raw_action = QtGui.QAction("Copy (&Raw Text)",
self,
shortcut="Ctrl+Shift+C",
triggered=self.copy_raw_active_frontend
)
self.add_menu_action(self.edit_menu, self.copy_raw_action, True)
self.paste_action = QtGui.QAction("&Paste",
self,
shortcut=QtGui.QKeySequence.Paste,
triggered=self.paste_active_frontend
)
self.add_menu_action(self.edit_menu, self.paste_action, True)
self.edit_menu.addSeparator()
selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll)
if selectall.matches("Ctrl+A") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
selectall = "Ctrl+Shift+A"
self.select_all_action = QtGui.QAction("Select &All",
self,
shortcut=selectall,
triggered=self.select_all_active_frontend
)
self.add_menu_action(self.edit_menu, self.select_all_action, True)
def init_view_menu(self):
self.view_menu = self.menuBar().addMenu("&View")
if sys.platform != 'darwin':
# disable on OSX, where there is always a menu bar
self.toggle_menu_bar_act = QtGui.QAction("Toggle &Menu Bar",
self,
shortcut="Ctrl+Shift+M",
statusTip="Toggle visibility of menubar",
triggered=self.toggle_menu_bar)
self.add_menu_action(self.view_menu, self.toggle_menu_bar_act)
fs_key = "Ctrl+Meta+F" if sys.platform == 'darwin' else "F11"
self.full_screen_act = QtGui.QAction("&Full Screen",
self,
shortcut=fs_key,
statusTip="Toggle between Fullscreen and Normal Size",
triggered=self.toggleFullScreen)
self.add_menu_action(self.view_menu, self.full_screen_act)
self.view_menu.addSeparator()
self.increase_font_size = QtGui.QAction("Zoom &In",
self,
shortcut=QtGui.QKeySequence.ZoomIn,
triggered=self.increase_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.increase_font_size, True)
self.decrease_font_size = QtGui.QAction("Zoom &Out",
self,
shortcut=QtGui.QKeySequence.ZoomOut,
triggered=self.decrease_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.decrease_font_size, True)
self.reset_font_size = QtGui.QAction("Zoom &Reset",
self,
shortcut="Ctrl+0",
triggered=self.reset_font_size_active_frontend
)
self.add_menu_action(self.view_menu, self.reset_font_size, True)
self.view_menu.addSeparator()
self.clear_action = QtGui.QAction("&Clear Screen",
self,
shortcut='Ctrl+L',
statusTip="Clear the console",
triggered=self.clear_active_frontend)
self.add_menu_action(self.view_menu, self.clear_action)
self.pager_menu = self.view_menu.addMenu("&Pager")
hsplit_action = QtGui.QAction(".. &Horizontal Split",
self,
triggered=lambda: self.set_paging_active_frontend('hsplit'))
vsplit_action = QtGui.QAction(" : &Vertical Split",
self,
triggered=lambda: self.set_paging_active_frontend('vsplit'))
inside_action = QtGui.QAction(" &Inside Pager",
self,
triggered=lambda: self.set_paging_active_frontend('inside'))
self.pager_menu.addAction(hsplit_action)
self.pager_menu.addAction(vsplit_action)
self.pager_menu.addAction(inside_action)
def init_kernel_menu(self):
self.kernel_menu = self.menuBar().addMenu("&Kernel")
# Qt on OSX maps Ctrl to Cmd, and Meta to Ctrl
# keep the signal shortcuts to ctrl, rather than
# platform-default like we do elsewhere.
ctrl = "Meta" if sys.platform == 'darwin' else "Ctrl"
self.interrupt_kernel_action = QtGui.QAction("&Interrupt current Kernel",
self,
triggered=self.interrupt_kernel_active_frontend,
shortcut=ctrl+"+C",
)
self.add_menu_action(self.kernel_menu, self.interrupt_kernel_action)
self.restart_kernel_action = QtGui.QAction("&Restart current Kernel",
self,
triggered=self.restart_kernel_active_frontend,
shortcut=ctrl+"+.",
)
self.add_menu_action(self.kernel_menu, self.restart_kernel_action)
self.kernel_menu.addSeparator()
self.confirm_restart_kernel_action = QtGui.QAction("&Confirm kernel restart",
self,
checkable=True,
checked=self.active_frontend.confirm_restart,
triggered=self.toggle_confirm_restart_active_frontend
)
self.add_menu_action(self.kernel_menu, self.confirm_restart_kernel_action)
self.tab_widget.currentChanged.connect(self.update_restart_checkbox)
def init_window_menu(self):
self.window_menu = self.menuBar().addMenu("&Window")
if sys.platform == 'darwin':
# add min/maximize actions to OSX, which lacks default bindings.
self.minimizeAct = QtGui.QAction("Mini&mize",
self,
shortcut="Ctrl+m",
statusTip="Minimize the window/Restore Normal Size",
triggered=self.toggleMinimized)
# maximize is called 'Zoom' on OSX for some reason
self.maximizeAct = QtGui.QAction("&Zoom",
self,
shortcut="Ctrl+Shift+M",
statusTip="Maximize the window/Restore Normal Size",
triggered=self.toggleMaximized)
self.add_menu_action(self.window_menu, self.minimizeAct)
self.add_menu_action(self.window_menu, self.maximizeAct)
self.window_menu.addSeparator()
prev_key = "Ctrl+Shift+Left" if sys.platform == 'darwin' else "Ctrl+PgUp"
self.prev_tab_act = QtGui.QAction("Pre&vious Tab",
self,
shortcut=prev_key,
statusTip="Select previous tab",
triggered=self.prev_tab)
self.add_menu_action(self.window_menu, self.prev_tab_act)
next_key = "Ctrl+Shift+Right" if sys.platform == 'darwin' else "Ctrl+PgDown"
self.next_tab_act = QtGui.QAction("Ne&xt Tab",
self,
shortcut=next_key,
statusTip="Select next tab",
triggered=self.next_tab)
self.add_menu_action(self.window_menu, self.next_tab_act)
def init_help_menu(self):
# please keep the Help menu in Mac Os even if empty. It will
# automatically contain a search field to search inside menus and
# please keep it spelled in English, as long as Qt Doesn't support
# a QAction.MenuRole like HelpMenuRole otherwise it will lose
# this search field functionality
self.help_menu = self.menuBar().addMenu("&Help")
# Help Menu
self.help_action = QtGui.QAction("Show &QtConsole help", self,
triggered=self._show_help)
self.online_help_action = QtGui.QAction("Open online &help", self,
triggered=self._open_online_help)
self.add_menu_action(self.help_menu, self.help_action)
self.add_menu_action(self.help_menu, self.online_help_action)
def _set_active_frontend_focus(self):
# this is a hack, self.active_frontend._control seems to be
# a private member. Unfortunately this is the only method
# to set focus reliably
QtCore.QTimer.singleShot(200, self.active_frontend._control.setFocus)
# minimize/maximize/fullscreen actions:
def toggle_menu_bar(self):
menu_bar = self.menuBar()
if menu_bar.isVisible():
menu_bar.setVisible(False)
else:
menu_bar.setVisible(True)
def toggleMinimized(self):
if not self.isMinimized():
self.showMinimized()
else:
self.showNormal()
def _show_help(self):
self.active_frontend._page(gui_reference)
def _open_online_help(self):
filename="http://ipython.org/ipython-doc/stable/index.html"
webbrowser.open(filename, new=1, autoraise=True)
def toggleMaximized(self):
if not self.isMaximized():
self.showMaximized()
else:
self.showNormal()
# Min/Max imizing while in full screen give a bug
# when going out of full screen, at least on OSX
def toggleFullScreen(self):
if not self.isFullScreen():
self.showFullScreen()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(False)
self.minimizeAct.setEnabled(False)
else:
self.showNormal()
if sys.platform == 'darwin':
self.maximizeAct.setEnabled(True)
self.minimizeAct.setEnabled(True)
def set_paging_active_frontend(self, paging):
self.active_frontend._set_paging(paging)
def close_active_frontend(self):
self.close_tab(self.active_frontend)
def restart_kernel_active_frontend(self):
self.active_frontend.request_restart_kernel()
def interrupt_kernel_active_frontend(self):
self.active_frontend.request_interrupt_kernel()
def toggle_confirm_restart_active_frontend(self):
widget = self.active_frontend
widget.confirm_restart = not widget.confirm_restart
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def update_restart_checkbox(self):
if self.active_frontend is None:
return
widget = self.active_frontend
self.confirm_restart_kernel_action.setChecked(widget.confirm_restart)
def clear_active_frontend(self):
self.active_frontend.clear()
def cut_active_frontend(self):
widget = self.active_frontend
if widget.can_cut():
widget.cut()
def copy_active_frontend(self):
widget = self.active_frontend
widget.copy()
def copy_raw_active_frontend(self):
self.active_frontend._copy_raw_action.trigger()
def paste_active_frontend(self):
widget = self.active_frontend
if widget.can_paste():
widget.paste()
def undo_active_frontend(self):
self.active_frontend.undo()
def redo_active_frontend(self):
self.active_frontend.redo()
def print_action_active_frontend(self):
self.active_frontend.print_action.trigger()
def export_action_active_frontend(self):
self.active_frontend.export_action.trigger()
def select_all_active_frontend(self):
self.active_frontend.select_all_action.trigger()
def increase_font_size_active_frontend(self):
self.active_frontend.increase_font_size.trigger()
def decrease_font_size_active_frontend(self):
self.active_frontend.decrease_font_size.trigger()
def reset_font_size_active_frontend(self):
self.active_frontend.reset_font_size.trigger()
#---------------------------------------------------------------------------
# QWidget interface
#---------------------------------------------------------------------------
def closeEvent(self, event):
""" Forward the close event to every tabs contained by the windows
"""
if self.tab_widget.count() == 0:
# no tabs, just close
event.accept()
return
# Do Not loop on the widget count as it change while closing
title = self.window().windowTitle()
cancel = QtGui.QMessageBox.Cancel
okay = QtGui.QMessageBox.Ok
accept_role = QtGui.QMessageBox.AcceptRole
if self.confirm_exit:
if self.tab_widget.count() > 1:
msg = "Close all tabs, stop all kernels, and Quit?"
else:
msg = "Close console, stop kernel, and Quit?"
info = "Kernels not started here (e.g. notebooks) will be left alone."
closeall = QtGui.QPushButton("&Quit", self)
closeall.setShortcut('Q')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
title, msg)
box.setInformativeText(info)
box.addButton(cancel)
box.addButton(closeall, QtGui.QMessageBox.YesRole)
box.setDefaultButton(closeall)
box.setEscapeButton(cancel)
pixmap = QtGui.QPixmap(self._app.icon.pixmap(QtCore.QSize(64,64)))
box.setIconPixmap(pixmap)
reply = box.exec_()
else:
reply = okay
if reply == cancel:
event.ignore()
return
if reply == okay or reply == accept_role:
while self.tab_widget.count() >= 1:
# prevent further confirmations:
widget = self.active_frontend
widget._confirm_exit = False
self.close_tab(widget)
event.accept()
| gpl-3.0 | -4,229,790,121,018,733,000 | -4,680,110,330,256,560,000 | 39.189501 | 118 | 0.592551 | false |
CSC-ORG/Dynamic-Dashboard-2015 | engine/lib/python2.7/site-packages/django/contrib/contenttypes/tests/tests.py | 12 | 10973 | from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.shortcuts import get_current_site
from django.core.management import call_command
from django.http import HttpRequest, Http404
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import override_system_checks
from django.utils import six
from .models import ConcreteModel, ProxyModel, FooWithoutUrl, FooWithUrl, FooWithBrokenAbsoluteUrl
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel,
for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel,
for_concrete_model=False))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel,
for_concrete_model=False))
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1])
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
name='Old model',
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(six.text_type(ct), 'Old model')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
class MigrateTests(TestCase):
@skipUnlessDBFeature('can_rollback_ddl')
@override_system_checks([])
def test_unmigrating_first_migration_post_migrate_signal(self):
"""
#24075 - When unmigrating an app before its first migration,
post_migrate signal handler must be aware of the missing tables.
"""
try:
with override_settings(
INSTALLED_APPS=["django.contrib.contenttypes"],
MIGRATION_MODULES={'contenttypes': 'django.contrib.contenttypes.migrations'},
):
call_command("migrate", "contenttypes", "zero", verbosity=0)
finally:
call_command("migrate", verbosity=0)
| mit | 5,235,191,377,694,328,000 | -7,212,269,128,712,130,000 | 40.564394 | 98 | 0.615875 | false |
koichi626/hadoop-gpu | hadoop-gpu-0.20.1/build/contrib/hod/testing/testModule.py | 182 | 2187 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
excludes = ['test_MINITEST3']
# All test-case classes should have the naming convention test_.*
class test_MINITEST1(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class test_MINITEST2(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class test_MINITEST3(unittest.TestCase):
def setUp(self):
pass
# All testMethods have to have their names start with 'test'
def testSuccess(self):
pass
def testFailure(self):
pass
def tearDown(self):
pass
class ModuleTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunModuleTests():
# modulename_suite
suite = ModuleTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunModuleTests()
| apache-2.0 | -1,651,924,329,709,446,000 | 9,093,919,238,872,610,000 | 23.852273 | 73 | 0.717878 | false |
sonnyhu/numpy | numpy/lib/info.py | 61 | 6353 | """
Basic functions used by several sub-packages and
useful to have in the main name-space.
Type Handling
-------------
================ ===================
iscomplexobj Test for complex object, scalar result
isrealobj Test for real object, scalar result
iscomplex Test for complex elements, array result
isreal Test for real elements, array result
imag Imaginary part
real Real part
real_if_close Turns complex number with tiny imaginary part to real
isneginf Tests for negative infinity, array result
isposinf Tests for positive infinity, array result
isnan Tests for nans, array result
isinf Tests for infinity, array result
isfinite Tests for finite numbers, array result
isscalar True if argument is a scalar
nan_to_num Replaces NaN's with 0 and infinities with large numbers
cast Dictionary of functions to force cast to each type
common_type Determine the minimum common type code for a group
of arrays
mintypecode Return minimal allowed common typecode.
================ ===================
Index Tricks
------------
================ ===================
mgrid Method which allows easy construction of N-d
'mesh-grids'
``r_`` Append and construct arrays: turns slice objects into
ranges and concatenates them, for 2d arrays appends rows.
index_exp Konrad Hinsen's index_expression class instance which
can be useful for building complicated slicing syntax.
================ ===================
Useful Functions
----------------
================ ===================
select Extension of where to multiple conditions and choices
extract Extract 1d array from flattened array according to mask
insert Insert 1d array of values into Nd array according to mask
linspace Evenly spaced samples in linear space
logspace Evenly spaced samples in logarithmic space
fix Round x to nearest integer towards zero
mod Modulo mod(x,y) = x % y except keeps sign of y
amax Array maximum along axis
amin Array minimum along axis
ptp Array max-min along axis
cumsum Cumulative sum along axis
prod Product of elements along axis
cumprod Cumluative product along axis
diff Discrete differences along axis
angle Returns angle of complex argument
unwrap Unwrap phase along given axis (1-d algorithm)
sort_complex Sort a complex-array (based on real, then imaginary)
trim_zeros Trim the leading and trailing zeros from 1D array.
vectorize A class that wraps a Python function taking scalar
arguments into a generalized function which can handle
arrays of arguments using the broadcast rules of
numerix Python.
================ ===================
Shape Manipulation
------------------
================ ===================
squeeze Return a with length-one dimensions removed.
atleast_1d Force arrays to be > 1D
atleast_2d Force arrays to be > 2D
atleast_3d Force arrays to be > 3D
vstack Stack arrays vertically (row on row)
hstack Stack arrays horizontally (column on column)
column_stack Stack 1D arrays as columns into 2D array
dstack Stack arrays depthwise (along third dimension)
stack Stack arrays along a new axis
split Divide array into a list of sub-arrays
hsplit Split into columns
vsplit Split into rows
dsplit Split along third dimension
================ ===================
Matrix (2D Array) Manipulations
-------------------------------
================ ===================
fliplr 2D array with columns flipped
flipud 2D array with rows flipped
rot90 Rotate a 2D array a multiple of 90 degrees
eye Return a 2D array with ones down a given diagonal
diag Construct a 2D array from a vector, or return a given
diagonal from a 2D array.
mat Construct a Matrix
bmat Build a Matrix from blocks
================ ===================
Polynomials
-----------
================ ===================
poly1d A one-dimensional polynomial class
poly Return polynomial coefficients from roots
roots Find roots of polynomial given coefficients
polyint Integrate polynomial
polyder Differentiate polynomial
polyadd Add polynomials
polysub Substract polynomials
polymul Multiply polynomials
polydiv Divide polynomials
polyval Evaluate polynomial at given argument
================ ===================
Import Tricks
-------------
================ ===================
ppimport Postpone module import until trying to use it
ppimport_attr Postpone module import until trying to use its attribute
ppresolve Import postponed module and return it.
================ ===================
Machine Arithmetics
-------------------
================ ===================
machar_single Single precision floating point arithmetic parameters
machar_double Double precision floating point arithmetic parameters
================ ===================
Threading Tricks
----------------
================ ===================
ParallelExec Execute commands in parallel thread.
================ ===================
1D Array Set Operations
-----------------------
Set operations for 1D numeric arrays based on sort() function.
================ ===================
ediff1d Array difference (auxiliary function).
unique Unique elements of an array.
intersect1d Intersection of 1D arrays with unique elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
in1d Test whether elements in a 1D array are also present in
another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
"""
from __future__ import division, absolute_import, print_function
depends = ['core', 'testing']
global_symbols = ['*']
| bsd-3-clause | -72,330,796,601,429,970 | 4,860,785,237,831,287,000 | 40.796053 | 74 | 0.587439 | false |
fillycheezstake/MissionPlanner | ExtLibs/Mavlink/mavgen.py | 34 | 3007 | #!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a python implementation
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
def mavgen(opts, args) :
"""Generate mavlink message formatters and parsers (C and Python ) using options
and args where args are a list of xml files. This function allows python
scripts under Windows to control mavgen using the same interface as
shell scripts under Unix"""
import sys, textwrap, os
import mavparse
import mavgen_python
import mavgen_c
import mavgen_csharp
xml = []
for fname in args:
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# expand includes
for x in xml[:]:
for i in x.include:
fname = os.path.join(os.path.dirname(x.filename), i)
print("Parsing %s" % fname)
xml.append(mavparse.MAVXML(fname, opts.wire_protocol))
# include message lengths and CRCs too
for idx in range(0, 256):
if x.message_lengths[idx] == 0:
x.message_lengths[idx] = xml[-1].message_lengths[idx]
x.message_crcs[idx] = xml[-1].message_crcs[idx]
x.message_names[idx] = xml[-1].message_names[idx]
# work out max payload size across all includes
largest_payload = 0
for x in xml:
if x.largest_payload > largest_payload:
largest_payload = x.largest_payload
for x in xml:
x.largest_payload = largest_payload
if mavparse.check_duplicates(xml):
sys.exit(1)
print("Found %u MAVLink message types in %u XML files" % (
mavparse.total_msgs(xml), len(xml)))
if opts.language == 'python':
mavgen_python.generate(opts.output, xml)
elif opts.language == 'C':
mavgen_c.generate(opts.output, xml)
elif opts.language == 'csharp':
mavgen_csharp.generate(opts.output, xml)
else:
print("Unsupported language %s" % opts.language)
if __name__=="__main__":
import sys, textwrap, os
from optparse import OptionParser
# allow import from the parent directory, where mavutil.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import mavparse
import mavgen_python
import mavgen_c
parser = OptionParser("mavgen.py [options] <XML files>")
parser.add_option("-o", "--output", dest="output", default="mavlink", help="output base name")
parser.add_option("--lang", dest="language", default="python", help="language to generate")
parser.add_option("--wire-protocol", dest="wire_protocol", default=mavparse.PROTOCOL_0_9, help="wire protocol version")
(opts, args) = parser.parse_args()
if len(args) < 1:
parser.error("You must supply at least one MAVLink XML protocol definition")
mavgen(opts, args)
| gpl-3.0 | -489,112,228,600,236,000 | -5,704,498,024,162,618,000 | 33.376471 | 123 | 0.619554 | false |
zzicewind/nova | nova/tests/unit/objects/test_virt_cpu_topology.py | 94 | 1397 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova.tests.unit.objects import test_objects
_top_dict = {
'sockets': 2,
'cores': 4,
'threads': 8
}
class _TestVirtCPUTopologyObject(object):
def test_object_from_dict(self):
top_obj = objects.VirtCPUTopology.from_dict(_top_dict)
self.compare_obj(top_obj, _top_dict)
def test_object_to_dict(self):
top_obj = objects.VirtCPUTopology()
top_obj.sockets = 2
top_obj.cores = 4
top_obj.threads = 8
spec = top_obj.to_dict()
self.assertEqual(_top_dict, spec)
class TestVirtCPUTopologyObject(test_objects._LocalTest,
_TestVirtCPUTopologyObject):
pass
class TestRemoteVirtCPUTopologyObject(test_objects._RemoteTest,
_TestVirtCPUTopologyObject):
pass
| apache-2.0 | -2,882,342,805,870,533,600 | 645,407,905,576,104,100 | 29.369565 | 78 | 0.670007 | false |
ChristianKniep/QNIB | serverfiles/usr/local/lib/networkx-1.6/build/lib/networkx/algorithms/tests/test_product.py | 3 | 10011 | import networkx as nx
from networkx import tensor_product,cartesian_product,lexicographic_product,strong_product
from nose.tools import assert_raises, assert_true, assert_equal
def test_tensor_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,tensor_product,G,H)
def test_tensor_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=tensor_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=tensor_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=tensor_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_tensor_product_size():
P5 = nx.path_graph(5)
K3 = nx.complete_graph(3)
K5 = nx.complete_graph(5)
G=tensor_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=tensor_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
def test_tensor_product_classic_result():
K2 = nx.complete_graph(2)
G = nx.petersen_graph()
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.desargues_graph()))
G = nx.cycle_graph(5)
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.cycle_graph(10)))
G = nx.tetrahedral_graph()
G = tensor_product(G,K2)
assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
def test_tensor_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = tensor_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if H.has_edge(u_H,v_H) and G.has_edge(u_G,v_G):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_cartesian_product_multigraph():
G=nx.MultiGraph()
G.add_edge(1,2,key=0)
G.add_edge(1,2,key=1)
H=nx.MultiGraph()
H.add_edge(3,4,key=0)
H.add_edge(3,4,key=1)
GH=cartesian_product(G,H)
assert_equal( set(GH) , set([(1, 3), (2, 3), (2, 4), (1, 4)]))
assert_equal( set(GH.edges(keys=True)) ,
set([((1, 3), (2, 3), 0), ((1, 3), (2, 3), 1),
((1, 3), (1, 4), 0), ((1, 3), (1, 4), 1),
((2, 3), (2, 4), 0), ((2, 3), (2, 4), 1),
((2, 4), (1, 4), 0), ((2, 4), (1, 4), 1)]))
def test_cartesian_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,cartesian_product,G,H)
def test_cartesian_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=cartesian_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=cartesian_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=cartesian_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_cartesian_product_size():
# order(GXH)=order(G)*order(H)
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3=nx.complete_graph(3)
G=cartesian_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
assert_equal(nx.number_of_edges(G),
nx.number_of_edges(P5)*nx.number_of_nodes(K3)+
nx.number_of_edges(K3)*nx.number_of_nodes(P5))
G=cartesian_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
assert_equal(nx.number_of_edges(G),
nx.number_of_edges(K5)*nx.number_of_nodes(K3)+
nx.number_of_edges(K3)*nx.number_of_nodes(K5))
def test_cartesian_product_classic():
# test some classic product graphs
P2 = nx.path_graph(2)
P3 = nx.path_graph(3)
# cube = 2-path X 2-path
G=cartesian_product(P2,P2)
G=cartesian_product(P2,G)
assert_true(nx.is_isomorphic(G,nx.cubical_graph()))
# 3x3 grid
G=cartesian_product(P3,P3)
assert_true(nx.is_isomorphic(G,nx.grid_2d_graph(3,3)))
def test_cartesian_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = cartesian_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if (u_G==v_G and H.has_edge(u_H,v_H)) or \
(u_H==v_H and G.has_edge(u_G,v_G)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_lexicographic_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,lexicographic_product,G,H)
def test_lexicographic_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=lexicographic_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=lexicographic_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=lexicographic_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_lexicographic_product_size():
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3=nx.complete_graph(3)
G=lexicographic_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=lexicographic_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
#No classic easily found classic results for lexicographic product
def test_lexicographic_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = lexicographic_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if G.has_edge(u_G,v_G) or (u_G==v_G and H.has_edge(u_H,v_H)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
def test_strong_product_raises():
G = nx.DiGraph()
H = nx.Graph()
assert_raises(nx.NetworkXError,strong_product,G,H)
def test_strong_product_null():
null=nx.null_graph()
empty10=nx.empty_graph(10)
K3=nx.complete_graph(3)
K10=nx.complete_graph(10)
P3=nx.path_graph(3)
P10=nx.path_graph(10)
# null graph
G=strong_product(null,null)
assert_true(nx.is_isomorphic(G,null))
# null_graph X anything = null_graph and v.v.
G=strong_product(null,empty10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,K3)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,K10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,P3)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(null,P10)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(empty10,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(K3,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(K10,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(P3,null)
assert_true(nx.is_isomorphic(G,null))
G=strong_product(P10,null)
assert_true(nx.is_isomorphic(G,null))
def test_strong_product_size():
K5=nx.complete_graph(5)
P5=nx.path_graph(5)
K3 = nx.complete_graph(3)
G=strong_product(P5,K3)
assert_equal(nx.number_of_nodes(G),5*3)
G=strong_product(K3,K5)
assert_equal(nx.number_of_nodes(G),3*5)
#No classic easily found classic results for strong product
def test_strong_product_random():
G = nx.erdos_renyi_graph(10,2/10.)
H = nx.erdos_renyi_graph(10,2/10.)
GH = strong_product(G,H)
for (u_G,u_H) in GH.nodes_iter():
for (v_G,v_H) in GH.nodes_iter():
if (u_G==v_G and H.has_edge(u_H,v_H)) or \
(u_H==v_H and G.has_edge(u_G,v_G)) or \
(G.has_edge(u_G,v_G) and H.has_edge(u_H,v_H)):
assert_true(GH.has_edge((u_G,u_H),(v_G,v_H)))
else:
assert_true(not GH.has_edge((u_G,u_H),(v_G,v_H)))
| gpl-2.0 | -848,277,887,036,620,200 | -542,081,406,554,513,340 | 33.402062 | 90 | 0.625212 | false |
dmacvicar/spacewalk | client/solaris/smartpm/smart/channels/rpm_md_info.py | 6 | 1125 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart import _
kind = "package"
name = _("RPM MetaData")
description = _("""
Repository created with the rpm-metadata project.
""")
fields = [("baseurl", _("Base URL"), str, None,
_("URL where repodata/ subdirectory is found"))]
| gpl-2.0 | -437,067,595,862,892,600 | -8,054,824,722,882,986,000 | 32.088235 | 75 | 0.731556 | false |
hkemmel/tal | affichage.py | 1 | 2209 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 14:34:25 2017
@author: manfred.madelaine
"""
import time
def affStart():
msg1 = "*** Binvenue dans i-Opinion ou Opinion Way ***"
msg2 = "Le logiciel d'analyse et de classification des revues cinématographiques !"
listMsg = []
listMsg.append("")
listMsg.append(msg1)
listMsg.append("")
listMsg.append(msg2)
listMsg.append("")
print(affBox(listMsg, 1, 1, len(msg2)))
delai()
def affEnd():
msg1 = "*** Opinion Way vous remercie de votre viste, à bientôt ! ***"
msg = []
msg.append(msg1)
box = affBox(msg, 1, 1, len(msg1)-1)
print(box)
def affMessage(msg):
deb = "\n\t--- "
fin = " ---\n\n"
print(deb + msg + fin)
delai()
def delai():
time.sleep(0.8)
"""
Affiche un message dans une boite
msg : message à afficher
x : décalage horizontal
y : décalage vertical
L : largeur de la boite
"""
def affBox(msg, x, y, L):
box = ""
#décalage vertical
box += multChaine("\n", y)
indiceLine = 0
#gestion d'une ligne
for txt in msg:
#bord suppérieur
if(indiceLine == 0):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
#décalage horizontal
box += "\n" + multChaine("\t", x)
esp = ""
mult = 1
#message
if(len(txt) < L ):
esp = " "
mult = (L - len(txt)) / 2
box += "| " + multChaine(esp, mult) + txt + multChaine(esp, mult) + " |"
#bord inférieur
if(indiceLine == len(msg) - 1 ):
#décalage horizontal
box += "\n" + multChaine("\t", x)
box += multChaine("-", L+3)
indiceLine += 1
box+="\n"
return(box)
def affErr():
affMessage("Votre réponse est incorrecte !")
def multChaine(chaine, mult):
i = 0
msg = ""
while i < mult:
msg += chaine
i += 1
return msg
| gpl-3.0 | 3,184,239,128,160,332,000 | 4,442,744,288,621,361,700 | 19.342593 | 87 | 0.474954 | false |
LuizGsa21/p4-conference-central | models.py | 1 | 7226 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
import datetime
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty(default='')
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.KeyProperty(kind='Conference', repeated=True)
wishList = ndb.KeyProperty(kind='Session', repeated=True)
def toForm(self):
form = ProfileForm(
displayName=self.displayName,
mainEmail=self.mainEmail,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize),
conferenceKeysToAttend=[key.urlsafe() for key in self.conferenceKeysToAttend]
)
form.check_initialized()
return form
def toMiniForm(self):
form = ProfileMiniForm(
displayName=self.displayName,
teeShirtSize=getattr(TeeShirtSize, self.teeShirtSize)
)
form.check_initialized()
return form
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class Conference(ndb.Model):
"""Conference -- Conference object"""
required_fields_schema = ('name', 'organizerUserId', 'startDate', 'endDate')
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty(required=True)
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty(required=True)
month = ndb.IntegerProperty()
endDate = ndb.DateProperty(required=True)
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
@property
def sessions(self):
return Session.query(ancestor=self.key)
def toForm(self, display_name=''):
form = ConferenceForm(
websafeKey=self.key.urlsafe(),
name=self.name,
description=self.description,
organizerUserId=self.organizerUserId,
topics=self.topics,
city=self.city,
startDate=self.startDate.strftime('%Y-%m-%d'),
month=self.month,
endDate=self.endDate.strftime('%Y-%m-%d'),
maxAttendees=self.maxAttendees,
seatsAvailable=self.seatsAvailable,
organizerDisplayName=display_name
)
form.check_initialized()
return form
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
class Session(ndb.Model):
"""Session -- Session object"""
required_fields_schema = ('name', 'speaker', 'duration', 'typeOfSession', 'date', 'startTime')
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StructuredProperty(modelclass=Speaker, required=True)
duration = ndb.IntegerProperty(required=True)
typeOfSession = ndb.StringProperty(required=True)
date = ndb.DateProperty(required=True)
startTime = ndb.TimeProperty(required=True)
def toForm(self):
form = SessionForm(
websafeKey=self.key.urlsafe(),
name=self.name,
highlights=self.highlights,
speaker=self.speaker.name,
duration=self.duration,
typeOfSession=self.typeOfSession,
date=self.date.strftime('%Y-%m-%d'),
startTime=self.startTime.strftime('%H:%M')
)
form.check_initialized()
return form
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
websafeKey = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3)
speaker = messages.StringField(4)
duration = messages.IntegerField(5)
typeOfSession = messages.StringField(6)
date = messages.StringField(7)
startTime = messages.StringField(8)
class SessionForms(messages.Message):
"""SessionForm -- multiple SessionForm outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
| apache-2.0 | 5,175,814,352,134,603,000 | -7,941,081,598,902,905,000 | 31.696833 | 98 | 0.687517 | false |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/axis.py | 4 | 85084 | """
Classes for the ticks and x and y axis
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
import numpy as np
import warnings
GRIDLINE_INTERPOLATION_STEPS = 180
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size=None, # points
width=None,
color=None,
tickdir=None,
pad=None,
labelsize=None,
labelcolor=None,
zorder=None,
gridOn=None, # defaults to axes.grid depending on
# axes.grid.which
tick1On=True,
tick2On=True,
label1On=True,
label2On=False,
major=True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
artist.Artist.__init__(self)
if gridOn is None:
if major and (rcParams['axes.grid.which'] in ('both', 'major')):
gridOn = rcParams['axes.grid']
elif (not major) and (rcParams['axes.grid.which']
in ('both', 'minor')):
gridOn = rcParams['axes.grid']
else:
gridOn = False
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
self._name = name
self._loc = loc
if size is None:
if major:
size = rcParams['%s.major.size' % name]
else:
size = rcParams['%s.minor.size' % name]
self._size = size
if width is None:
if major:
width = rcParams['%s.major.width' % name]
else:
width = rcParams['%s.minor.width' % name]
self._width = width
if color is None:
color = rcParams['%s.color' % name]
self._color = color
if pad is None:
if major:
pad = rcParams['%s.major.pad' % name]
else:
pad = rcParams['%s.minor.pad' % name]
self._base_pad = pad
if labelcolor is None:
labelcolor = rcParams['%s.color' % name]
self._labelcolor = labelcolor
if labelsize is None:
labelsize = rcParams['%s.labelsize' % name]
self._labelsize = labelsize
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
self.apply_tickdir(tickdir)
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def apply_tickdir(self, tickdir):
"""
Calculate self._pad and self._tickmarkers
"""
pass
def get_tickdir(self):
return self._tickdir
def get_tick_padding(self):
"""
Get the length of the tick outside of the axes.
"""
padding = {
'in': 0.0,
'inout': 0.5,
'out': 1.0
}
return self._size * padding[self._tickdir]
def get_children(self):
children = [self.tick1line, self.tick2line,
self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
self.gridline.set_clip_path(clippath, transform)
self.stale = True
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._base_pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
return False, {}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._apply_params(pad=val)
self.stale = True
def get_pad(self):
'Get the value of the tick label pad in points'
return self._base_pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
self.stale = False
return
renderer.open_group(self.__name__)
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
self.stale = False
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
self.stale = True
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
self.stale = True
def _set_artist_props(self, a):
a.set_figure(self.figure)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def _apply_params(self, **kw):
switchkw = ['gridOn', 'tick1On', 'tick2On', 'label1On', 'label2On']
switches = [k for k in kw if k in switchkw]
for k in switches:
setattr(self, k, kw.pop(k))
newmarker = [k for k in kw if k in ['size', 'width', 'pad', 'tickdir']]
if newmarker:
self._size = kw.pop('size', self._size)
# Width could be handled outside this block, but it is
# convenient to leave it here.
self._width = kw.pop('width', self._width)
self._base_pad = kw.pop('pad', self._base_pad)
# apply_tickdir uses _size and _base_pad to make _pad,
# and also makes _tickmarkers.
self.apply_tickdir(kw.pop('tickdir', self._tickdir))
self.tick1line.set_marker(self._tickmarkers[0])
self.tick2line.set_marker(self._tickmarkers[1])
for line in (self.tick1line, self.tick2line):
line.set_markersize(self._size)
line.set_markeredgewidth(self._width)
# _get_text1_transform uses _pad from apply_tickdir.
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
tick_kw = dict([kv for kv in six.iteritems(kw)
if kv[0] in ['color', 'zorder']])
if tick_kw:
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in six.iteritems(tick_kw):
setattr(self, '_' + k, v)
label_list = [k for k in six.iteritems(kw)
if k[0] in ['labelsize', 'labelcolor']]
if label_list:
label_kw = dict([(k[5:], v) for (k, v) in label_list])
self.label1.set(**label_kw)
self.label2.set(**label_kw)
for k, v in six.iteritems(label_kw):
# for labelsize the text objects covert str ('small')
# -> points. grab the integer from the `Text` object
# instead of saving the string representation
v = getattr(self.label1, 'get_' + k)()
setattr(self, '_label' + k, v)
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
raise NotImplementedError('Derived must override')
def _get_text1_transform(self):
raise NotImplementedError('Derived must override')
def _get_text2_transform(self):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
elif self._tickdir == 'inout':
self._tickmarkers = ('|', '|')
else:
self._tickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,), color=self._color,
linestyle='None', marker=self._tickmarkers[0],
markersize=self._size,
markeredgewidth=self._width, zorder=self._zorder)
l.set_transform(self.axes.get_xaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(1,),
color=self._color,
linestyle='None',
marker=self._tickmarkers[1],
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_xaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
alpha=rcParams['grid.alpha'],
markersize=0)
l.set_transform(self.axes.get_xaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
if tickdir is None:
tickdir = rcParams['%s.direction' % self._name]
self._tickdir = tickdir
if self._tickdir == 'in':
self._tickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
elif self._tickdir == 'inout':
self._tickmarkers = ('_', '_')
else:
self._tickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text1_transform()
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
trans, vert, horiz = self._get_text2_transform()
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=self._labelsize),
color=self._labelcolor,
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D((0,), (0,),
color=self._color,
marker=self._tickmarkers[0],
linestyle='None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_yaxis_transform(which='tick1'))
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D((1,), (0,),
color=self._color,
marker=self._tickmarkers[1],
linestyle='None',
markersize=self._size,
markeredgewidth=self._width,
zorder=self._zorder)
l.set_transform(self.axes.get_yaxis_transform(which='tick2'))
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D(xdata=(0, 1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
alpha=rcParams['grid.alpha'],
markersize=0)
l.set_transform(self.axes.get_yaxis_transform(which='grid'))
l.get_path()._interpolation_steps = GRIDLINE_INTERPOLATION_STEPS
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y(y)
if self.label2On:
self.label2.set_y(y)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
class Ticker(object):
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`axes.transData` - transform data coords to display coords
* :attr:`axes.transAxes` - transform axis coords to display coords
* :attr:`labelpad` - number of points between the axis and its label
"""
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)" % tuple(self.axes.transAxes.transform_point((0, 0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry()
self._autolabelpos = True
self._smart_bounds = False
self.label = self._get_label()
self.labelpad = rcParams['axes.labelpad']
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
self.cla()
self._set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
self.stale = True
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def _set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label, self.offsetText]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
self.set_label_text('')
self._set_artist_props(self.label)
# Keep track of setting to the default value, this allows use to know
# if any of the following values is explicitly set by the user, so as
# to not overwrite their settings with any of our 'auto' settings.
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
self.isDefault_label = True
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry()
# whether the grids are on
self._gridOnMajor = (rcParams['axes.grid'] and
rcParams['axes.grid.which'] in ('both', 'major'))
self._gridOnMinor = (rcParams['axes.grid'] and
rcParams['axes.grid.which'] in ('both', 'minor'))
self.label.set_text('')
self._set_artist_props(self.label)
self.reset_ticks()
self.converter = None
self.units = None
self.set_units(None)
self.stale = True
def reset_ticks(self):
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
def set_tick_params(self, which='major', reset=False, **kw):
"""
Set appearance parameters for ticks and ticklabels.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
"""
dicts = []
if which == 'major' or which == 'both':
dicts.append(self._major_tick_kw)
if which == 'minor' or which == 'both':
dicts.append(self._minor_tick_kw)
kwtrans = self._translate_tick_kw(kw, to_init_kw=True)
for d in dicts:
if reset:
d.clear()
d.update(kwtrans)
if reset:
self.reset_ticks()
else:
if which == 'major' or which == 'both':
for tick in self.majorTicks:
tick._apply_params(**self._major_tick_kw)
if which == 'minor' or which == 'both':
for tick in self.minorTicks:
tick._apply_params(**self._minor_tick_kw)
if 'labelcolor' in kwtrans:
self.offsetText.set_color(kwtrans['labelcolor'])
self.stale = True
@staticmethod
def _translate_tick_kw(kw, to_init_kw=True):
# We may want to move the following function to
# a more visible location; or maybe there already
# is something like this.
def _bool(arg):
if cbook.is_string_like(arg):
if arg.lower() == 'on':
return True
if arg.lower() == 'off':
return False
raise ValueError('String "%s" should be "on" or "off"' % arg)
return bool(arg)
# The following lists may be moved to a more
# accessible location.
kwkeys0 = ['size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On']
kwkeys1 = ['length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop']
kwkeys = kwkeys0 + kwkeys1
kwtrans = dict()
if to_init_kw:
if 'length' in kw:
kwtrans['size'] = kw.pop('length')
if 'direction' in kw:
kwtrans['tickdir'] = kw.pop('direction')
if 'left' in kw:
kwtrans['tick1On'] = _bool(kw.pop('left'))
if 'bottom' in kw:
kwtrans['tick1On'] = _bool(kw.pop('bottom'))
if 'right' in kw:
kwtrans['tick2On'] = _bool(kw.pop('right'))
if 'top' in kw:
kwtrans['tick2On'] = _bool(kw.pop('top'))
if 'labelleft' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelleft'))
if 'labelbottom' in kw:
kwtrans['label1On'] = _bool(kw.pop('labelbottom'))
if 'labelright' in kw:
kwtrans['label2On'] = _bool(kw.pop('labelright'))
if 'labeltop' in kw:
kwtrans['label2On'] = _bool(kw.pop('labeltop'))
if 'colors' in kw:
c = kw.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw:
if key not in kwkeys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, kwkeys))
kwtrans.update(kw)
else:
raise NotImplementedError("Inverse translation is deferred")
return kwtrans
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
self.stale = True
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'''set the axis data limits'''
raise NotImplementedError('Derived must override')
def set_default_intervals(self):
'''set the default limits for the axis data and view interval if they
are not mutated'''
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if is available (else just use 0..1)
pass
def _set_artist_props(self, a):
if a is None:
return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i)
for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def set_smart_bounds(self, value):
"""set the axis to have smart bounds"""
self._smart_bounds = value
self.stale = True
def get_smart_bounds(self):
"""get whether the axis has smart bounds"""
return self._smart_bounds
def _update_ticks(self, renderer):
"""
Update ticks (position and labels) using the current data
interval of the axes. Returns a list of ticks that will be
drawn.
"""
interval = self.get_view_interval()
tick_tups = [t for t in self.iter_ticks()]
if self._smart_bounds:
# handle inverted limits
view_low, view_high = min(*interval), max(*interval)
data_low, data_high = self.get_data_interval()
if data_low > data_high:
data_low, data_high = data_high, data_low
locs = [ti[1] for ti in tick_tups]
locs.sort()
locs = np.array(locs)
if len(locs):
if data_low <= view_low:
# data extends beyond view, take view as limit
ilow = view_low
else:
# data stops within view, take best tick
cond = locs <= data_low
good_locs = locs[cond]
if len(good_locs) > 0:
# last tick prior or equal to first data point
ilow = good_locs[-1]
else:
# No ticks (why not?), take first tick
ilow = locs[0]
if data_high >= view_high:
# data extends beyond view, take view as limit
ihigh = view_high
else:
# data stops within view, take best tick
cond = locs >= data_high
good_locs = locs[cond]
if len(good_locs) > 0:
# first tick after or equal to last data point
ihigh = good_locs[0]
else:
# No ticks (why not?), take last tick
ihigh = locs[-1]
tick_tups = [ti for ti in tick_tups
if (ti[1] >= ilow) and (ti[1] <= ihigh)]
# so that we don't lose ticks on the end, expand out the interval ever
# so slightly. The "ever so slightly" is defined to be the width of a
# half of a pixel. We don't want to draw a tick that even one pixel
# outside of the defined axis interval.
if interval[0] <= interval[1]:
interval_expanded = interval
else:
interval_expanded = interval[1], interval[0]
if hasattr(self, '_get_pixel_distance_along_axis'):
# normally, one does not want to catch all exceptions that
# could possibly happen, but it is not clear exactly what
# exceptions might arise from a user's projection (their
# rendition of the Axis object). So, we catch all, with
# the idea that one would rather potentially lose a tick
# from one side of the axis or another, rather than see a
# stack trace.
# We also catch users warnings here. These are the result of
# invalid numpy calculations that may be the result of out of
# bounds on axis with finite allowed intervals such as geo
# projections i.e. Mollweide.
with np.errstate(invalid='ignore'):
try:
ds1 = self._get_pixel_distance_along_axis(
interval_expanded[0], -0.5)
except:
warnings.warn("Unable to find pixel distance along axis "
"for interval padding of ticks; assuming no "
"interval padding needed.")
ds1 = 0.0
if np.isnan(ds1):
ds1 = 0.0
try:
ds2 = self._get_pixel_distance_along_axis(
interval_expanded[1], +0.5)
except:
warnings.warn("Unable to find pixel distance along axis "
"for interval padding of ticks; assuming no "
"interval padding needed.")
ds2 = 0.0
if np.isnan(ds2):
ds2 = 0.0
interval_expanded = (interval_expanded[0] - ds1,
interval_expanded[1] + ds2)
ticks_to_draw = []
for tick, loc, label in tick_tups:
if tick is None:
continue
if not mtransforms.interval_contains(interval_expanded, loc):
continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_tick_bboxes(self, ticks, renderer):
"""
Given the list of ticks, return two lists of bboxes. One for
tick lable1's and another for tick label2's.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
for tick in ticks:
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
return ticklabelBoxes, ticklabelBoxes2
def get_tightbbox(self, renderer):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
"""
if not self.get_visible():
return
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
bb = []
for a in [self.label, self.offsetText]:
if a.get_visible():
bb.append(a.get_window_extent(renderer))
bb.extend(ticklabelBoxes)
bb.extend(ticklabelBoxes2)
bb = [b for b in bb if b.width != 0 or b.height != 0]
if bb:
_bbox = mtransforms.Bbox.union(bb)
return _bbox
else:
return None
def get_tick_padding(self):
values = []
if len(self.majorTicks):
values.append(self.majorTicks[0].get_tick_padding())
if len(self.minorTicks):
values.append(self.minorTicks[0].get_tick_padding())
if len(values):
return max(values)
return 0.0
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
if not self.get_visible():
return
renderer.open_group(__name__)
ticks_to_draw = self._update_ticks(renderer)
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in self.majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
self.stale = False
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline',
[tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1 + labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1 + labels2)
def get_ticklabels(self, minor=False, which=None):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
Parameters
----------
minor : bool
If True return the minor ticklabels,
else return the major ticklabels
which : None, ('minor', 'major', 'both')
Overrides `minor`.
Selects which ticklabels to return
Returns
-------
ret : list
List of :class:`~matplotlib.text.Text` instances.
"""
if which is not None:
if which == 'minor':
return self.get_minorticklabels()
elif which == 'major':
return self.get_majorticklabels()
elif which == 'both':
return self.get_majorticklabels() + self.get_minorticklabels()
else:
raise ValueError("`which` must be one of ('minor', 'major', "
"'both') not " + str(which))
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick instance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None:
return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_label_text(self):
'Get the text of the label'
return self.label.get_text()
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor:
tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor:
tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean. Use *which* =
'major' | 'minor' | 'both' to set the grid for major or minor ticks.
If *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True.
*kwargs* are used to set the line properties of the grids, e.g.,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs):
b = True
which = which.lower()
if which in ['minor', 'both']:
if b is None:
self._gridOnMinor = not self._gridOnMinor
else:
self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None:
continue
tick.gridOn = self._gridOnMinor
if len(kwargs):
tick.gridline.update(kwargs)
self._minor_tick_kw['gridOn'] = self._gridOnMinor
if which in ['major', 'both']:
if b is None:
self._gridOnMajor = not self._gridOnMajor
else:
self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None:
continue
tick.gridOn = self._gridOnMajor
if len(kwargs):
tick.gridline.update(kwargs)
self._major_tick_kw['gridOn'] = self._gridOnMajor
self.stale = True
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
converter = munits.registry.get_converter(data)
if converter is None:
return False
neednew = self.converter != converter
self.converter = converter
default = self.converter.default_units(data, self)
if default is not None and self.units is None:
self.set_units(default)
if neednew:
self._update_axisinfo()
self.stale = True
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and \
self.major.locator != info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and \
self.minor.locator != info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and \
self.major.formatter != info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and \
self.minor.formatter != info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
return x
ret = self.converter.convert(x, self.units, self)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u != self.units:
self.units = u
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
self.stale = True
def get_units(self):
'return the units for axis'
return self.units
def set_label_text(self, label, fontdict=None, **kwargs):
""" Sets the text value of the axis label
ACCEPTS: A string value for the label
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None:
self.label.update(fontdict)
self.label.update(kwargs)
self.stale = True
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_majfmt = False
self.major.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.isDefault_minfmt = False
self.minor.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_majloc = False
self.major.locator = locator
locator.set_axis(self)
self.stale = True
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.isDefault_minloc = False
self.minor.locator = locator
locator.set_axis(self)
self.stale = True
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
All other kwargs are used to update the text object properties.
As for get_ticklabels, label1 (left or bottom) is
affected for a given tick only if its label1On attribute
is True, and similarly for label2. The list of returned
label text objects consists of all such label1 objects followed
by all such label2 objects.
The input *ticklabels* is assumed to match the set of
tick locations, regardless of the state of label1On and
label2On.
ACCEPTS: sequence of strings or Text objects
"""
get_labels = []
for t in ticklabels:
# try calling get_text() to check whether it is Text object
# if it is Text, get label content
try:
get_labels.append(t.get_text())
# otherwise add the label to the list directly
except AttributeError:
get_labels.append(t)
# replace the ticklabels list with the processed one
ticklabels = get_labels
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_major_ticks()
ret = []
for tick_label, tick in zip(ticklabels, ticks):
# deal with label1
tick.label1.set_text(tick_label)
tick.label1.update(kwargs)
# deal with label2
tick.label2.set_text(tick_label)
tick.label2.update(kwargs)
# only return visible tick labels
if tick.label1On:
ret.append(tick.label1)
if tick.label2On:
ret.append(tick.label2)
self.stale = True
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
# XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator(mticker.FixedLocator(ticks))
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
def axis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a :class:`tzinfo` instance or a timezone string.
This timezone is used to create date labels.
"""
# By providing a sample datetime instance with the desired
# timezone, the registered converter can be selected,
# and the "units" attribute, which is the timezone, can
# be set.
import datetime
if isinstance(tz, six.string_types):
import pytz
tz = pytz.timezone(tz)
self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
def get_tick_space(self):
"""
Return the estimated number of ticks that can fit on the axis.
"""
# Must be overridden in the subclass
raise NotImplementedError()
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
raise NotImplementedError()
def get_minpos(self):
raise NotImplementedError()
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self, mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform_point((x, y))
except ValueError:
return False, {}
l, b = self.axes.transAxes.transform_point((0, 0))
r, t = self.axes.transAxes.transform_point((1, 1))
inaxis = xaxes >= 0 and xaxes <= 1 and (
(y < b and y > b - self.pickradius) or
(y > t and y < t + self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return XTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color=rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center')
label.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()))
self._set_artist_props(label)
self.label_position = 'bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties=font_manager.FontProperties(
size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right')
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform())
)
self._set_artist_props(offsetText)
self.offset_text_position = 'bottom'
return offsetText
def _get_pixel_distance_along_axis(self, where, perturb):
"""
Returns the amount, in data coordinates, that a single pixel
corresponds to in the locality given by "where", which is also given
in data coordinates, and is an x coordinate. "perturb" is the amount
to perturb the pixel. Usually +0.5 or -0.5.
Implementing this routine for an axis is optional; if present, it will
ensure that no ticks are lost due to round-off at the extreme ends of
an axis.
"""
# Note that this routine does not work for a polar axis, because of
# the 1e-10 below. To do things correctly, we need to use rmax
# instead of 1e-10 for a polar axis. But since we do not have that
# kind of information at this point, we just don't try to pad anything
# for the theta axis of a polar plot.
if self.axes.name == 'polar':
return 0.0
#
# first figure out the pixel location of the "where" point. We use
# 1e-10 for the y point, so that we remain compatible with log axes.
# transformation from data coords to display coords
trans = self.axes.transData
# transformation from display coords to data coords
transinv = trans.inverted()
pix = trans.transform_point((where, 1e-10))
# perturb the pixel
ptp = transinv.transform_point((pix[0] + perturb, pix[1]))
dx = abs(ptp[0] - where)
return dx
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
if position == 'top':
self.label.set_verticalalignment('baseline')
elif position == 'bottom':
self.label.set_verticalalignment('top')
else:
msg = "Position accepts only [ 'top' | 'bottom' ]"
raise ValueError(msg)
self.label_position = position
self.stale = True
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
x, y = self.label.get_position()
if self.label_position == 'bottom':
try:
spine = self.axes.spines['bottom']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
bottom = bbox.y0
self.label.set_position(
(x, bottom - self.labelpad * self.figure.dpi / 72.0)
)
else:
try:
spine = self.axes.spines['top']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
top = bbox.y1
self.label.set_position(
(x, top + self.labelpad * self.figure.dpi / 72.0)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position(
(x, bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
)
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
else:
raise ValueError("invalid position: %s" % position)
self.stale = True
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt = self.majorTicks[0]
mT = self.minorTicks[0]
majorTop = ((not majt.tick1On) and majt.tick2On and
(not majt.label1On) and majt.label2On)
minorTop = ((not mT.tick1On) and mT.tick2On and
(not mT.label1On) and mT.label2On)
if majorTop and minorTop:
return 'top'
MajorBottom = (majt.tick1On and (not majt.tick2On) and
majt.label1On and (not majt.label2On))
MinorBottom = (mT.tick1On and (not mT.tick2On) and
mT.label1On and (not mT.label2On))
if MajorBottom and MinorBottom:
return 'bottom'
majorDefault = (majt.tick1On and majt.tick2On and
majt.label1On and (not majt.label2On))
minorDefault = (mT.tick1On and mT.tick2On and
mT.label1On and (not mT.label2On))
if majorDefault and minorDefault:
return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_xlim`.
"""
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervalx = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervalx = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
self.stale = True
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
xmin, xmax = 0., 1.
dataMutated = self.axes.dataLim.mutatedx()
viewMutated = self.axes.viewLim.mutatedx()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
xmin = self.converter.convert(valmin, self.units, self)
xmax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervalx = xmin, xmax
if not viewMutated:
self.axes.viewLim.intervalx = xmin, xmax
self.stale = True
def get_tick_space(self):
ends = self.axes.transAxes.transform([[0, 0], [1, 0]])
length = ((ends[1][0] - ends[0][0]) / self.axes.figure.dpi) * 72.0
tick = self._get_tick(True)
# There is a heuristic here that the aspect ratio of tick text
# is no more than 3:1
size = tick.label1.get_size() * 3
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform_point((x, y))
except ValueError:
return False, {}
l, b = self.axes.transAxes.transform_point((0, 0))
r, t = self.axes.transAxes.transform_point((1, 1))
inaxis = yaxes >= 0 and yaxes <= 1 and (
(x < l and x > l - self.pickradius) or
(x > r and x < r + self.pickradius))
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return YTick(self.axes, 0, '', major=major, **tick_kw)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight']),
color=rcParams['axes.labelcolor'],
verticalalignment='bottom',
horizontalalignment='center',
rotation='vertical',
rotation_mode='anchor')
label.set_transform(mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes))
self._set_artist_props(label)
self.label_position = 'left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties=font_manager.FontProperties(
size=rcParams['ytick.labelsize']
),
color=rcParams['ytick.color'],
verticalalignment='baseline',
horizontalalignment='left')
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform())
)
self._set_artist_props(offsetText)
self.offset_text_position = 'left'
return offsetText
def _get_pixel_distance_along_axis(self, where, perturb):
"""
Returns the amount, in data coordinates, that a single pixel
corresponds to in the locality given by *where*, which is also given
in data coordinates, and is a y coordinate.
*perturb* is the amount to perturb the pixel. Usually +0.5 or -0.5.
Implementing this routine for an axis is optional; if present, it will
ensure that no ticks are lost due to round-off at the extreme ends of
an axis.
"""
#
# first figure out the pixel location of the "where" point. We use
# 1e-10 for the x point, so that we remain compatible with log axes.
# transformation from data coords to display coords
trans = self.axes.transData
# transformation from display coords to data coords
transinv = trans.inverted()
pix = trans.transform_point((1e-10, where))
# perturb the pixel
ptp = transinv.transform_point((pix[0], pix[1] + perturb))
dy = abs(ptp[1] - where)
return dy
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
self.label.set_rotation_mode('anchor')
self.label.set_horizontalalignment('center')
if position == 'left':
self.label.set_verticalalignment('bottom')
elif position == 'right':
self.label.set_verticalalignment('top')
else:
msg = "Position accepts only [ 'left' | 'right' ]"
raise ValueError(msg)
self.label_position = position
self.stale = True
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
x, y = self.label.get_position()
if self.label_position == 'left':
try:
spine = self.axes.spines['left']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
left = bbox.x0
self.label.set_position(
(left - self.labelpad * self.figure.dpi / 72.0, y)
)
else:
try:
spine = self.axes.spines['right']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
right = bbox.x1
self.label.set_position(
(right + self.labelpad * self.figure.dpi / 72.0, y)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position(
(x, top + self.OFFSETTEXTPAD * self.figure.dpi / 72.0)
)
def set_offset_position(self, position):
x, y = self.offsetText.get_position()
if position == 'left':
x = 0
elif position == 'right':
x = 1
else:
msg = "Position accepts only [ 'left' | 'right' ]"
raise ValueError(msg)
self.offsetText.set_ha(position)
self.offsetText.set_position((x, y))
self.stale = True
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both, default or none)
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
self.set_offset_position(position)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
self.set_offset_position(position)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
raise ValueError("invalid position: %s" % position)
self.stale = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt = self.majorTicks[0]
mT = self.minorTicks[0]
majorRight = ((not majt.tick1On) and majt.tick2On and
(not majt.label1On) and majt.label2On)
minorRight = ((not mT.tick1On) and mT.tick2On and
(not mT.label1On) and mT.label2On)
if majorRight and minorRight:
return 'right'
majorLeft = (majt.tick1On and (not majt.tick2On) and
majt.label1On and (not majt.label2On))
minorLeft = (mT.tick1On and (not mT.tick2On) and
mT.label1On and (not mT.label2On))
if majorLeft and minorLeft:
return 'left'
majorDefault = (majt.tick1On and majt.tick2On and
majt.label1On and (not majt.label2On))
minorDefault = (mT.tick1On and mT.tick2On and
mT.label1On and (not mT.label2On))
if majorDefault and minorDefault:
return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
"""
If *ignore* is *False*, the order of vmin, vmax
does not matter; the original axis orientation will
be preserved. In addition, the view limits can be
expanded, but will not be reduced. This method is
for mpl internal use; for normal use, see
:meth:`~matplotlib.axes.Axes.set_ylim`.
"""
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
if Vmin < Vmax:
self.axes.viewLim.intervaly = (min(vmin, vmax, Vmin),
max(vmin, vmax, Vmax))
else:
self.axes.viewLim.intervaly = (max(vmin, vmax, Vmin),
min(vmin, vmax, Vmax))
self.stale = True
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'set the axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
self.stale = True
def set_default_intervals(self):
'set the default limits for the axis interval if they are not mutated'
ymin, ymax = 0., 1.
dataMutated = self.axes.dataLim.mutatedy()
viewMutated = self.axes.viewLim.mutatedy()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
ymin = self.converter.convert(valmin, self.units, self)
ymax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervaly = ymin, ymax
if not viewMutated:
self.axes.viewLim.intervaly = ymin, ymax
self.stale = True
def get_tick_space(self):
ends = self.axes.transAxes.transform([[0, 0], [0, 1]])
length = ((ends[1][1] - ends[0][1]) / self.axes.figure.dpi) * 72.0
tick = self._get_tick(True)
# Having a spacing of at least 2 just looks good.
size = tick.label1.get_size() * 2.0
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
| bsd-3-clause | 7,157,633,586,537,087,000 | 439,299,407,017,074,400 | 35.006771 | 79 | 0.559471 | false |
brettwooldridge/buck | third-party/py/pywatchman/pywatchman/encoding.py | 29 | 2957 | # Copyright 2016-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
'''Module to deal with filename encoding on the local system, as returned by
Watchman.'''
import sys
from . import (
compat,
)
if compat.PYTHON3:
default_local_errors = 'surrogateescape'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
# On the Python 3 versions we support, sys.getfilesystemencoding never
# returns None.
return sys.getfilesystemencoding()
else:
# Python 2 doesn't support surrogateescape, so use 'strict' by
# default. Users can register a custom surrogateescape error handler and use
# that if they so desire.
default_local_errors = 'strict'
def get_local_encoding():
if sys.platform == 'win32':
# Watchman always returns UTF-8 encoded strings on Windows.
return 'utf-8'
fsencoding = sys.getfilesystemencoding()
if fsencoding is None:
# This is very unlikely to happen, but if it does, just use UTF-8
fsencoding = 'utf-8'
return fsencoding
def encode_local(s):
return s.encode(get_local_encoding(), default_local_errors)
def decode_local(bs):
return bs.decode(get_local_encoding(), default_local_errors)
| apache-2.0 | -6,414,812,443,931,013,000 | -435,265,079,455,988,740 | 39.506849 | 80 | 0.725736 | false |
gratteur/zim-desktop | zim/config/manager.py | 5 | 10560 | # -*- coding: utf-8 -*-
# Copyright 2013 Jaap Karssenberg <[email protected]>
from __future__ import with_statement
from weakref import WeakValueDictionary
from . import basedirs
from .dicts import INIConfigFile
from zim.fs import FileNotFoundError
from zim.signals import ConnectorMixin, SignalEmitter, SignalHandler
class ConfigManager(object):
'''This class defines an object that manages a set of config files.
The config manager abstracts the lookup of files using the XDG
search paths and ensures that there is only a single instance used
for each config file.
The config manager can switch the config file based on the config
X{profile} that is used. The profile is determined by the notebook
properties. However this object relies on it's creator to setup
the hooks to get the property from the notebook. Changes to the
profile are communicated to all users of the config by means of the
"changed" signals on L{ConfigFile} and L{ConfigDict} objects.
'''
def __init__(self, dir=None, dirs=None, profile=None):
'''Constructor
@param dir: the folder for reading and writing config files,
e.g. a C{Dir} or a C{VirtualConfigBackend} objects.
If no dir is given, the XDG basedirs are used and C{dirs} is
ignored.
@param dirs: list or generator of C{Dir} objects used as
search path when a config file does not exist on C{dir}
@param profile: initial profile name
'''
self.profile = profile
self._config_files = WeakValueDictionary()
self._config_dicts = WeakValueDictionary()
if dir is None:
assert dirs is None, "Do not provide 'dirs' without 'dir'"
self._dir = dir
self._dirs = dirs
def set_profile(self, profile):
'''Set the profile to use for the configuration
@param profile: the profile name or C{None}
'''
assert profile is None or isinstance(profile, basestring)
if profile != self.profile:
self.profile = profile
for path, conffile in self._config_files.items():
if path.startswith('<profile>/'):
file, defaults = self._get_file(path)
conffile.set_files(file, defaults)
# Updates will cascade through the dicts by the
# "changed" signals on various objects
def _get_file(self, filename):
basepath = filename.replace('<profile>/', '')
if self.profile:
path = filename.replace('<profile>/', 'profiles/%s/' % self.profile)
else:
path = basepath
if self._dir:
file = self._dir.file(path)
if self._dirs:
defaults = DefaultFileIter(self._dirs, path)
else:
defaults = DefaultFileIter([], path)
if self.profile and filename.startswith('<profile>/'):
mypath = filename.replace('<profile>/', '')
defaults.extra.insert(0, self._dir.file(mypath))
else:
file = basedirs.XDG_CONFIG_HOME.file('zim/' + path)
defaults = XDGConfigFileIter(basepath)
## Backward compatibility for profiles
if self.profile \
and filename in (
'<profile>/preferences.conf',
'<profile>/style.conf'
):
backwardfile = self._get_backward_file(filename)
defaults.extra.insert(0, backwardfile)
return file, defaults
def _get_backward_file(self, filename):
if filename == '<profile>/preferences.conf':
path = 'profiles/%s.conf' % self.profile
elif filename == '<profile>/style.conf':
path = 'styles/%s.conf' % self.profile
else:
raise AssertionError
if self._dir:
return self._dir.file(path)
else:
return basedirs.XDG_CONFIG_HOME.file('zim/' + path)
def get_config_file(self, filename):
'''Returns a C{ConfigFile} object for C{filename}'''
if filename not in self._config_files:
file, defaults = self._get_file(filename)
config_file = ConfigFile(file, defaults)
self._config_files[filename] = config_file
return self._config_files[filename]
def get_config_dict(self, filename):
'''Returns a C{SectionedConfigDict} object for C{filename}'''
if filename not in self._config_dicts:
file = self.get_config_file(filename)
config_dict = ConfigManagerINIConfigFile(file)
self._config_dicts[filename] = config_dict
return self._config_dicts[filename]
#def get_all_config_files(filename) - iterate multiple values ?
#def get_config_section(filename, section): - return section
def VirtualConfigManager(**data):
return ConfigManager(VirtualConfigBackend(**data))
class DefaultFileIter(object):
'''Generator for iterating default files
Will yield first the files in C{extra} followed by files that
are based on C{path} and C{dirs}. Yields only existing files.
'''
def __init__(self, dirs, path, extra=None):
self.path = path
self.dirs = dirs
self.extra = extra or []
def __iter__(self):
for file in self.extra:
if file.exists():
yield file
for dir in self.dirs:
file = dir.file(self.path)
if file.exists():
yield file
class XDGConfigDirsIter(object):
'''Generator for iterating XDG config dirs
Yields the "zim" subdir of each XDG config file.
'''
def __iter__(self):
from . import data_dirs # XXX
yield basedirs.XDG_CONFIG_HOME.subdir(('zim'))
for dir in basedirs.XDG_CONFIG_DIRS:
yield dir.subdir(('zim'))
for dir in data_dirs():
yield dir
class XDGConfigFileIter(DefaultFileIter):
'''Like C{DefaultFileIter}, but uses XDG config dirs'''
def __init__(self, path, extra=None):
self.path = path
self.dirs = XDGConfigDirsIter()
self.extra = extra or []
class ConfigManagerINIConfigFile(INIConfigFile):
'''Like L{INIConfigFile} but with autosave when the dict changes'''
def __init__(self, file):
INIConfigFile.__init__(self, file, monitor=True)
self.connect_after('changed', self.on_changed)
# autosave on changing the dict, connect after
# regular handlers to avoid getting stuck with a set
@SignalHandler
def on_changed(self, *a):
with self.on_file_changed.blocked():
self.write()
@SignalHandler
def on_file_changed(self, *a):
with self.on_changed.blocked():
INIConfigFile.on_file_changed(self, *a)
class ConfigFile(ConnectorMixin, SignalEmitter):
'''Container object for a config file
Maps to a "base" file in the home folder, used to write new values,
and an optional default file, which is used for reading only.
@ivar file: the underlying file object for the base config file
in the home folder
@ivar defaults: a generator that yields default files
@note: this class implement similar API to the L{File} class but
is explicitly not a sub-class of L{File} because config files should
typically not be moved, renamed, etc. It just implements the reading
and writing methods.
@signal: C{changed ()}: emitted when the
underlying file changed (based on C{gio} monitoring support)
or for file monitors or on profile switched
'''
# TODO __signals__
def __init__(self, file, defaults=None):
self.file = None
self.defaults = None
with self.blocked_signals('changed'):
self.set_files(file, defaults)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.file.path)
def __eq__(self, other):
return isinstance(other, ConfigFile) \
and other.file == self.file
def set_files(self, file, defaults=None):
if self.file:
self.disconnect_from(self.file)
self.file = file
self.defaults = defaults or []
#~ self.connectto(self.file, 'changed', self.on_file_changed)
self.emit('changed')
#~ def on_file_changed(self, file, *a):
#~ print "CONF FILE changed:", file
# TODO verify etag (we didn't write ourselves)
#~ self.emit('changed')
def check_has_changed_on_disk(self):
return True # we do not emit the signal if it is not real...
@property
def basename(self):
return self.file.basename
def touch(self):
'''Ensure the custom file in the home folder exists. Either by
copying a default config file, or touching an empty file.
Intended to be called before trying to edit the file with an
external editor.
'''
if not self.file.exists():
for default in self.defaults:
default.copyto(self.file)
break
else:
self.file.touch() # create empty file
def read(self, fail=False):
'''Read the base file or first default file
@param fail: if C{True} a L{FileNotFoundError} error is raised
when neither the base file or a default file are found. If
C{False} it will return C{''} for a non-existing file.
@returns: file content as a string
'''
try:
return self.file.read()
except FileNotFoundError:
for default in self.defaults:
return default.read()
else:
if fail:
raise
else:
return ''
def readlines(self, fail=False):
'''Read the base file or first default file
@param fail: if C{True} a L{FileNotFoundError} error is raised
when neither the base file or a default file are found. If
C{False} it will return C{[]} for a non-existing file.
@returns: file content as a list of lines
'''
try:
return self.file.readlines()
except FileNotFoundError:
for default in self.defaults:
return default.readlines()
else:
if fail:
raise
else:
return []
def write(self, text):
'''Write base file, see L{File.write()}'''
self.file.write(text)
def writelines(self, lines):
'''Write base file, see L{File.writelines()}'''
self.file.writelines(lines)
def remove(self):
'''Remove user file, leaves default files in place'''
if self.file.exists():
return self.file.remove()
class VirtualConfigBackend(object):
'''Virtual dir, mainly used for testing'''
def __init__(self, **data):
self._data = data
def file(self, path):
return VirtualConfigBackendFile(self._data, path)
class VirtualConfigBackendFile(object):
'''Virtual file, mainly used for testing'''
def __init__(self, data, path):
self._key = path
self._data = data
@property
def path(self):
return '<virtual>/' + self._key
@property
def basename(self):
import os
return os.path.basename(self.path)
def connect(self, handler, *a):
pass
def disconnect(self, handler):
pass
def exists(self):
return self._key in self._data \
and self._data[self._key] is not None
def touch(self):
self._data.setdefault(self._key, '')
def copyto(self, other):
text = self.read()
other.write(text)
def read(self):
try:
text = self._data[self._key]
except KeyError:
raise FileNotFoundError(self)
else:
if text is None:
raise FileNotFoundError(self)
else:
return text
def readlines(self):
text = self.read()
return text.splitlines(True)
def write(self, text):
self._data[self._key] = text or ''
def writelines(self, lines):
self._data[self._key] = ''.join(lines) or ''
def remove(self):
del self._data[self._key]
| gpl-2.0 | 235,828,220,063,419,620 | -3,540,997,099,990,724,600 | 26.007673 | 71 | 0.697917 | false |
PyBossa/pybossa | pybossa/default_settings.py | 1 | 4813 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
DEBUG = False
# webserver host and port
HOST = '0.0.0.0'
PORT = 5000
SECRET = 'foobar'
SECRET_KEY = 'my-session-secret'
ITSDANGEROUSKEY = 'its-dangerous-key'
## project configuration
BRAND = 'PYBOSSA'
TITLE = 'PYBOSSA'
COPYRIGHT = 'Set Your Institution'
DESCRIPTION = 'Set the description in your config'
TERMSOFUSE = 'http://okfn.org/terms-of-use/'
DATAUSE = 'http://opendatacommons.org/licenses/by/'
LOGO = ''
DEFAULT_LOCALE = 'en'
LOCALES = [('en', 'English'), ('es', u'Español'),
('it', 'Italiano'), ('fr', u'Français'),
('ja', u'日本語'), ('el', u'ελληνικά')]
## Default THEME
THEME = 'default'
## Default number of apps per page
APPS_PER_PAGE = 20
## Default allowed extensions
ALLOWED_EXTENSIONS = ['js', 'css', 'png', 'jpg', 'jpeg', 'gif', 'zip']
UPLOAD_METHOD = 'local'
## Default number of users shown in the leaderboard
LEADERBOARD = 20
## Default configuration for debug toolbar
ENABLE_DEBUG_TOOLBAR = False
# Cache default key prefix
REDIS_SENTINEL = [('localhost', 26379)]
REDIS_MASTER = 'mymaster'
REDIS_DB = 0
REDIS_KEYPREFIX = 'pybossa_cache'
## Default cache timeouts
# Project cache
AVATAR_TIMEOUT = 30 * 24 * 60 * 60
APP_TIMEOUT = 15 * 60
REGISTERED_USERS_TIMEOUT = 15 * 60
ANON_USERS_TIMEOUT = 5 * 60 * 60
STATS_FRONTPAGE_TIMEOUT = APP_TIMEOUT
STATS_APP_TIMEOUT = 12 * 60 * 60
STATS_DRAFT_TIMEOUT = 24 * 60 * 60
N_APPS_PER_CATEGORY_TIMEOUT = 60 * 60
BROWSE_TASKS_TIMEOUT = 3 * 60 * 60
# Category cache
CATEGORY_TIMEOUT = 24 * 60 * 60
# User cache
USER_TIMEOUT = 15 * 60
USER_TOP_TIMEOUT = 24 * 60 * 60
USER_TOTAL_TIMEOUT = 24 * 60 * 60
# Project Presenters
PRESENTERS = ["basic", "image", "sound", "video", "map", "pdf"]
# Default Google Docs spreadsheet template tasks URLs
TEMPLATE_TASKS = {
'image': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdHFEN29mZUF0czJWMUhIejF6dWZXdkE&usp=sharing",
'sound': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEczcWduOXRUb1JUc1VGMmJtc2xXaXc&usp=sharing",
'video': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZ2UGhxSTJjQl9YNVhfUVhGRUdoRWc&usp=sharing",
'map': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdGZnbjdwcnhKRVNlN1dGXy0tTnNWWXc&usp=sharing",
'pdf': "https://docs.google.com/spreadsheet/ccc?key=0AsNlt0WgPAHwdEVVamc0R0hrcjlGdXRaUXlqRXlJMEE&usp=sharing"}
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Expiration time for password protected project cookies
PASSWD_COOKIE_TIMEOUT = 60 * 30
# Expiration time for account confirmation / password recovery links
ACCOUNT_LINK_EXPIRATION = 5 * 60 * 60
# Rate limits default values
LIMIT = 300
PER = 15 * 60
# Disable new account confirmation (via email)
ACCOUNT_CONFIRMATION_DISABLED = True
# Send emails weekly update every
WEEKLY_UPDATE_STATS = 'Sunday'
# Enable Server Sent Events
SSE = False
# Pro user features. False will make the feature available to all regular users,
# while True will make it available only to pro users
PRO_FEATURES = {
'auditlog': True,
'webhooks': True,
'updated_exports': True,
'notify_blog_updates': True,
'project_weekly_report': True,
'autoimporter': True,
'better_stats': True
}
CORS_RESOURCES = {r"/api/*": {"origins": "*",
"allow_headers": ['Content-Type',
'Authorization'],
"max_age": 21600
}}
FAILED_JOBS_RETRIES = 3
FAILED_JOBS_MAILS = 7
FULLTEXTSEARCH_LANGUAGE = 'english'
STRICT_SLASHES = True
# Background jobs default time outs
MINUTE = 60
TIMEOUT = 10 * MINUTE
# OneSignal GCM Sender ID
# DO NOT MODIFY THIS
GCM_SENDER_ID = "482941778795"
# Unpublish inactive projects
UNPUBLISH_PROJECTS = True
# TTL for ZIP files of personal data
TTL_ZIP_SEC_FILES = 3
# Default cryptopan key
CRYPTOPAN_KEY = '32-char-str-for-AES-key-and-pad.'
# Instruct PYBOSSA to generate absolute paths or not for avatars
AVATAR_ABSOLUTE = True
# Spam accounts to avoid
SPAM = []
| agpl-3.0 | -3,004,701,902,484,256,000 | -6,014,766,098,554,078,000 | 28.429448 | 116 | 0.689806 | false |
computersalat/ansible | test/support/integration/plugins/modules/postgresql_query.py | 53 | 10477 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Felix Archambault
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_query
short_description: Run PostgreSQL queries
description:
- Runs arbitrary PostgreSQL queries.
- Can run queries from SQL script files.
- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
to run queries on files made by pg_dump/pg_dumpall utilities.
version_added: '2.8'
options:
query:
description:
- SQL query to run. Variables can be escaped with psycopg2 syntax
U(http://initd.org/psycopg/docs/usage.html).
type: str
positional_args:
description:
- List of values to be passed as positional arguments to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(named_args).
type: list
elements: raw
named_args:
description:
- Dictionary of key-value arguments to pass to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(positional_args).
type: dict
path_to_script:
description:
- Path to SQL script on the remote host.
- Returns result of the last query in the script.
- Mutually exclusive with I(query).
type: path
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
autocommit:
description:
- Execute in autocommit mode when the query can't be run inside a transaction block
(e.g., VACUUM).
- Mutually exclusive with I(check_mode).
type: bool
default: no
version_added: '2.9'
encoding:
description:
- Set the client encoding for the current session (e.g. C(UTF-8)).
- The default is the encoding defined by the database.
type: str
version_added: '2.10'
seealso:
- module: postgresql_db
author:
- Felix Archambault (@archf)
- Andrew Klychkov (@Andersson007)
- Will Rouesnel (@wrouesnel)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Simple select query to acme db
postgresql_query:
db: acme
query: SELECT version()
- name: Select query to db acme with positional arguments and non-default credentials
postgresql_query:
db: acme
login_user: django
login_password: mysecretpass
query: SELECT * FROM acme WHERE id = %s AND story = %s
positional_args:
- 1
- test
- name: Select query to test_db with named_args
postgresql_query:
db: test_db
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
named_args:
id_val: 1
story_val: test
- name: Insert query to test_table in db test_db
postgresql_query:
db: test_db
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
- name: Run queries from SQL script using UTF-8 client encoding for session
postgresql_query:
db: test_db
path_to_script: /var/lib/pgsql/test.sql
positional_args:
- 1
encoding: UTF-8
- name: Example of using autocommit parameter
postgresql_query:
db: test_db
query: VACUUM
autocommit: yes
- name: >
Insert data to the column of array type using positional_args.
Note that we use quotes here, the same as for passing JSON, etc.
postgresql_query:
query: INSERT INTO test_table (array_column) VALUES (%s)
positional_args:
- '{1,2,3}'
# Pass list and string vars as positional_args
- name: Set vars
set_fact:
my_list:
- 1
- 2
- 3
my_arr: '{1, 2, 3}'
- name: Select from test table by passing positional_args as arrays
postgresql_query:
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
positional_args:
- '{{ my_list }}'
- '{{ my_arr|string }}'
'''
RETURN = r'''
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'SELECT * FROM bar'
statusmessage:
description: Attribute containing the message returned by the command.
returned: always
type: str
sample: 'INSERT 0 1'
query_result:
description:
- List of dictionaries in column:value form representing returned rows.
returned: changed
type: list
sample: [{"Column": "Value1"},{"Column": "Value2"}]
rowcount:
description: Number of affected rows.
returned: changed
type: int
sample: 5
'''
try:
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
from psycopg2.extras import DictCursor
except ImportError:
# it is needed for checking 'no result to fetch' in main(),
# psycopg2 availability will be checked by connect_to_db() into
# ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# Module execution.
#
def list_to_pg_array(elem):
"""Convert the passed list to PostgreSQL array
represented as a string.
Args:
elem (list): List that needs to be converted.
Returns:
elem (str): String representation of PostgreSQL array.
"""
elem = str(elem).strip('[]')
elem = '{' + elem + '}'
return elem
def convert_elements_to_pg_arrays(obj):
"""Convert list elements of the passed object
to PostgreSQL arrays represented as strings.
Args:
obj (dict or list): Object whose elements need to be converted.
Returns:
obj (dict or list): Object with converted elements.
"""
if isinstance(obj, dict):
for (key, elem) in iteritems(obj):
if isinstance(elem, list):
obj[key] = list_to_pg_array(elem)
elif isinstance(obj, list):
for i, elem in enumerate(obj):
if isinstance(elem, list):
obj[i] = list_to_pg_array(elem)
return obj
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
query=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
positional_args=dict(type='list', elements='raw'),
named_args=dict(type='dict'),
session_role=dict(type='str'),
path_to_script=dict(type='path'),
autocommit=dict(type='bool', default=False),
encoding=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
query = module.params["query"]
positional_args = module.params["positional_args"]
named_args = module.params["named_args"]
path_to_script = module.params["path_to_script"]
autocommit = module.params["autocommit"]
encoding = module.params["encoding"]
if autocommit and module.check_mode:
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
if path_to_script and query:
module.fail_json(msg="path_to_script is mutually exclusive with query")
if positional_args:
positional_args = convert_elements_to_pg_arrays(positional_args)
elif named_args:
named_args = convert_elements_to_pg_arrays(named_args)
if path_to_script:
try:
with open(path_to_script, 'rb') as f:
query = to_native(f.read())
except Exception as e:
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
if encoding is not None:
db_connection.set_client_encoding(encoding)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Prepare args:
if module.params.get("positional_args"):
arguments = module.params["positional_args"]
elif module.params.get("named_args"):
arguments = module.params["named_args"]
else:
arguments = None
# Set defaults:
changed = False
# Execute query:
try:
cursor.execute(query, arguments)
except Exception as e:
if not autocommit:
db_connection.rollback()
cursor.close()
db_connection.close()
module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
statusmessage = cursor.statusmessage
rowcount = cursor.rowcount
try:
query_result = [dict(row) for row in cursor.fetchall()]
except Psycopg2ProgrammingError as e:
if to_native(e) == 'no results to fetch':
query_result = {}
except Exception as e:
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
if 'SELECT' not in statusmessage:
if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
s = statusmessage.split()
if len(s) == 3:
if statusmessage.split()[2] != '0':
changed = True
elif len(s) == 2:
if statusmessage.split()[1] != '0':
changed = True
else:
changed = True
else:
changed = True
if module.check_mode:
db_connection.rollback()
else:
if not autocommit:
db_connection.commit()
kw = dict(
changed=changed,
query=cursor.query,
statusmessage=statusmessage,
query_result=query_result,
rowcount=rowcount if rowcount >= 0 else 0,
)
cursor.close()
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,035,124,335,437,452,000 | 9,098,684,490,880,702,000 | 27.782967 | 97 | 0.642455 | false |
Naoto-Imamachi/MIRAGE | scripts/module/preparation/phastcons_score_list.py | 1 | 3683 | #!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
| mit | -5,745,701,975,999,545,000 | 6,637,574,411,123,980,000 | 41.329412 | 203 | 0.524572 | false |
matthiasdiener/spack | var/spack/repos/builtin/packages/paml/package.py | 5 | 2221 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Paml(MakefilePackage):
"""PAML is a package of programs for phylogenetic analyses of DNA or
protein sewuences using maximum likelihood."""
homepage = "http://abacus.gene.ucl.ac.uk/software/paml.html"
url = "http://abacus.gene.ucl.ac.uk/software/paml4.9e.tgz"
version('4.9e', 'ac5a062bfea1f4eaac79008434030acf')
build_directory = 'src'
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir(self.build_directory):
install('baseml', prefix.bin)
install('basemlg', prefix.bin)
install('chi2', prefix.bin)
install('codeml', prefix.bin)
install('evolver', prefix.bin)
install('infinitesites', prefix.bin)
install('mcmctree', prefix.bin)
install('pamp', prefix.bin)
install('yn00', prefix.bin)
install_tree('dat', prefix.dat)
install_tree('Technical', prefix.Technical)
| lgpl-2.1 | 8,972,019,964,686,027,000 | -2,311,976,118,876,116,500 | 41.711538 | 78 | 0.647906 | false |
nyasara/azuremono-docker | IronPython-2.7.4/Lib/encodings/cp861.py | 93 | 35587 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp861',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
u'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f0: 0x008c, # LATIN SMALL LETTER ETH
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x0095, # LATIN SMALL LETTER THORN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit | 34,420,539,021,711,268 | -7,426,068,439,574,373,000 | 48.984241 | 97 | 0.595667 | false |
alrusdi/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/templates/unicode.py | 39 | 1290 | # -*- coding: utf-8 -*-
from unittest import TestCase
from django.template import Template, TemplateEncodingError, Context
from django.utils.safestring import SafeData
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
t1 = Template(u'ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = '\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = Template(s)
s = '\x80\xc5\xc0'
self.assertRaises(TemplateEncodingError, Template, s)
# Contexts can be constructed from unicode or UTF-8 bytestrings.
c1 = Context({"var": "foo"})
c2 = Context({u"var": "foo"})
c3 = Context({"var": u"Đđ"})
c4 = Context({u"var": "\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertTrue(isinstance(t1.render(c3), unicode))
self.assertTrue(isinstance(t1.render(c3), SafeData))
| gpl-3.0 | -8,286,400,658,015,384,000 | 7,699,995,939,455,638,000 | 41.666667 | 88 | 0.644531 | false |
DerekK88/PICwriter | picwriter/components/stripslotconverter.py | 1 | 9317 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import picwriter.toolkit as tk
class StripSlotConverter(tk.Component):
"""Strip-to-Slot Side Converter Cell class. Adiabatically transforms a strip to a slot waveguide mode, with two sections. Section 1 introduces a narrow waveguide alongside the input strip waveguide and gradually lowers the gap between the strip waveguide and narrow side waveguide. Section 2 gradually converts the widths of the two waveguides until they are equal to the slot rail widths.
Args:
* **wgt_input** (WaveguideTemplate): WaveguideTemplate object for the input waveguide (should be either of type `strip` or `slot`).
* **wgt_output** (WaveguideTemplate): WaveguideTemplate object for the output waveguide (should be either of type `strip` or `slot`, opposite of the input type).
* **length1** (float): Length of section 1 that gradually changes the distance between the two waveguides.
* **length2** (float): Length of section 2 that gradually changes the widths of the two waveguides until equal to the slot waveguide rail widths.
* **start_rail_width** (float): Width of the narrow waveguide appearing next to the strip waveguide.
* **end_strip_width** (float): Width of the strip waveguide at the end of `length1` and before `length2`
* **d** (float): Distance between the outer edge of the strip waveguide and the start of the slot waveguide rail.
Keyword Args:
* **input_strip** (Boolean): If `True`, sets the input port to be the strip waveguide side. If `False`, slot waveguide is on the input. Defaults to `None`, in which case the input port waveguide template is used to choose.
* **port** (tuple): Cartesian coordinate of the input port. Defaults to (0,0).
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the taper, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
Note: The waveguide and cladding layer/datatype are taken from the `wgt_slot` by default.
"""
def __init__(
self,
wgt_input,
wgt_output,
length1,
length2,
start_rail_width,
end_strip_width,
d,
input_strip=None,
port=(0, 0),
direction="EAST",
):
tk.Component.__init__(self, "StripSlotConverter", locals())
self.portlist = {}
if (not isinstance(input_strip, bool)) and (input_strip != None):
raise ValueError(
"Invalid input provided for `input_strip`. Please specify a boolean."
)
if input_strip == None:
# Auto-detect based on wgt_input
self.input_strip = (
wgt_input.wg_type == "strip" or wgt_input.wg_type == "swg"
)
else:
# User-override
self.input_strip = input_strip
if self.input_strip:
self.wgt_strip = wgt_input
self.wgt_slot = wgt_output
else:
self.wgt_strip = wgt_output
self.wgt_slot = wgt_input
self.wg_spec = {
"layer": wgt_output.wg_layer,
"datatype": wgt_output.wg_datatype,
}
self.clad_spec = {
"layer": wgt_output.clad_layer,
"datatype": wgt_output.clad_datatype,
}
self.length1 = length1
self.length2 = length2
self.d = d
self.start_rail_width = start_rail_width
self.end_strip_width = end_strip_width
self.port = port
self.direction = direction
self.__build_cell()
self.__build_ports()
""" Translate & rotate the ports corresponding to this specific component object
"""
self._auto_transform_()
def __build_cell(self):
# Sequentially build all the geometric shapes using polygons
# Add strip waveguide taper for region 1
x0, y0 = (0, 0)
pts = [
(x0, y0 - self.wgt_strip.wg_width / 2.0),
(x0, y0 + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
]
strip1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the thin side waveguide for region 1
pts = [
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d),
(x0, y0 + self.wgt_strip.wg_width / 2.0 + self.d + self.start_rail_width),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
]
thin_strip = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the bottom rail for region 2
pts = [
(
x0 + self.length1,
y0 - self.wgt_strip.wg_width / 2.0 + self.end_strip_width,
),
(x0 + self.length1, y0 - self.wgt_strip.wg_width / 2.0),
(x0 + self.length1 + self.length2, y0 - self.wgt_slot.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.wg_width / 2.0 + self.wgt_slot.rail,
),
]
rail1 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add the top rail for region 2
pts = [
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot
+ self.start_rail_width,
),
(
x0 + self.length1,
y0
- self.wgt_strip.wg_width / 2.0
+ self.end_strip_width
+ self.wgt_slot.slot,
),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.wg_width / 2.0 - self.wgt_slot.rail,
),
(x0 + self.length1 + self.length2, y0 + self.wgt_slot.wg_width / 2.0),
]
rail2 = gdspy.Polygon(
pts, layer=self.wgt_strip.wg_layer, datatype=self.wgt_strip.wg_datatype
)
# Add a cladding polygon
pts = [
(x0, y0 + self.wgt_strip.clad_width + self.wgt_strip.wg_width / 2.0),
(
x0 + self.length1 + self.length2,
y0 + self.wgt_slot.clad_width + self.wgt_slot.wg_width / 2.0,
),
(
x0 + self.length1 + self.length2,
y0 - self.wgt_slot.clad_width - self.wgt_slot.wg_width / 2.0,
),
(x0, y0 - self.wgt_strip.clad_width - self.wgt_strip.wg_width / 2.0),
]
clad = gdspy.Polygon(
pts, layer=self.wgt_strip.clad_layer, datatype=self.wgt_strip.clad_datatype
)
self.add(strip1)
self.add(thin_strip)
self.add(rail1)
self.add(rail2)
self.add(clad)
def __build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {"port": (0, 0), "direction": "WEST"}
self.portlist["output"] = {
"port": (self.length1 + self.length2, 0),
"direction": "EAST",
}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt_strip = WaveguideTemplate(bend_radius=50, wg_type="strip", wg_width=0.7)
wgt_slot = WaveguideTemplate(bend_radius=50, wg_type="slot", wg_width=0.7, slot=0.2)
wg1 = Waveguide([(0, 0), (100, 0)], wgt_strip)
tk.add(top, wg1)
ssc = StripSlotConverter(
wgt_strip,
wgt_slot,
length1=15.0,
length2=15.0,
start_rail_width=0.1,
end_strip_width=0.4,
d=1.0,
**wg1.portlist["output"]
)
tk.add(top, ssc)
(x1, y1) = ssc.portlist["output"]["port"]
wg2 = Waveguide([(x1, y1), (x1 + 100, y1)], wgt_slot)
tk.add(top, wg2)
gdspy.LayoutViewer(cells=top)
# gdspy.write_gds('StripSlotConverter.gds', unit=1.0e-6, precision=1.0e-9)
| mit | -3,337,111,491,968,125,400 | -4,045,885,733,079,358,500 | 36.268 | 396 | 0.545347 | false |
EricForgy/JuliaBox | container/interactive/IJulia/tornado/src/gdrivesync.py | 4 | 9487 | import base64
import shutil
import os
import hashlib
import time
import datetime
import pytz
import isodate
from oauth2client.client import OAuth2Credentials
from oauth2client import GOOGLE_REVOKE_URI, GOOGLE_TOKEN_URI, GOOGLE_AUTH_URI
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
class GDriveSync:
"""Synchronizes folders from Google Drive.
Requires credentials to be provided as base64 encoded JSON representation of OAuth2Credentials, in form field gauth.
If credentials are not found, the Google authentication plugin is invoked
with state as ask_gdrive (/jboxauth/google?state=ask_gdrive). On successful
authentication and authorization, the plugin must call JuliaBox.init_gauth_tok
on the browser with appropriately formatted credentials.
"""
CREDSB64 = None
CREDS = None
GAUTH = None
DRIVE = None
LOCAL_TZ_OFFSET = 0
def __init__(self, loc):
self.loc = loc
with open(os.path.join(loc, '.gdrive')) as f:
self.gfolder = f.read().strip()
def repo_hash(self):
return hashlib.sha1('_'.join([self.loc, self.gfolder])).hexdigest()
def repo_name(self):
return os.path.basename(self.loc) + ' (' + self.gfolder + ')'
def sync(self):
self._sync_folder(self.loc, GDriveSync.folder_id(self.gfolder))
def _sync_folder(self, loc, gfolder):
# list local folder
loc_flist = {}
for f in os.listdir(loc):
if f.startswith('.'):
continue
full_path = os.path.join(loc, f)
is_dir = os.path.isdir(full_path)
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(full_path), pytz.utc)
# + datetime.timedelta(seconds=GDriveSync.LOCAL_TZ_OFFSET)
loc_flist[f] = {'fullpath': full_path, 'is_dir': is_dir, 'mtime': mtime}
# list remote folder
gdrive_flist = {}
for f in GDriveSync.DRIVE.ListFile({'q': "'" + gfolder + "' in parents and trashed=false"}).GetList():
fname = f['title']
full_path = os.path.join(loc, fname)
is_dir = ('application/vnd.google-apps.folder' in f['mimeType'])
mtime = GDriveSync.parse_gdrive_time(f['modifiedDate'])
gdrive_flist[fname] = {'fullpath': full_path, 'is_dir': is_dir, 'mtime': mtime, 'id': f['id']}
parent_spec = [{"kind": "drive#fileLink", "id": gfolder}]
# for all files in local folder
for f, attrs in loc_flist.items():
# if it is a folder
if attrs['is_dir']:
# if file not on remote create remote folder, remove file from local list, add to remote list
if f not in gdrive_flist:
gdrive_file = GDriveSync.DRIVE.CreateFile({
'title': f,
'mimeType': 'application/vnd.google-apps.folder',
'parents': parent_spec,
'modifiedDate': attrs['mtime']
})
gdrive_file.Upload()
gdrive_flist[f] = {
'fullpath': attrs['full_path'],
'is_dir': attrs['is_dir'],
'mtime': attrs['mtime'],
'id': gdrive_file['id']
}
del loc_flist[f]
else: # it is a file
# if file not on remote, upload local file, remove file from local list
if f not in gdrive_flist:
GDriveSync._upload(attrs['fullpath'], parents=parent_spec)
del loc_flist[f]
else:
gf_attrs = gdrive_flist[f]
# if file in remote is older, upload local file
tdiff = (attrs['mtime'] - gf_attrs['mtime']).total_seconds()
# print("existing file tdiff: " + str(tdiff))
if tdiff >= 1:
GDriveSync._upload(attrs['fullpath'], parents=None, remid=gf_attrs['id'])
# if file on remote is newer, download remote file
elif tdiff <= -1:
GDriveSync._download(attrs['fullpath'], gf_attrs['id'])
#else:
# print("already in sync " + attrs['fullpath'])
# remove file from both lists
del loc_flist[f]
del gdrive_flist[f]
# for files remaining in remote list
for f, gf_attrs in gdrive_flist.items():
# create local folder if it does not exist
fullpath = gf_attrs['fullpath']
if gf_attrs['is_dir']:
if not os.path.exists(fullpath):
os.makedirs(fullpath)
# download remote file, remove from remote list
else:
GDriveSync._download(fullpath, gf_attrs['id'])
del gdrive_flist[f]
# gdrive_flist should only have folders if any
# for folders remaining in remote list call _sync_folder recursively on them
for f, gf_attrs in gdrive_flist.items():
self._sync_folder(gf_attrs['fullpath'], gf_attrs['id'])
@staticmethod
def _upload(locpath, parents=None, remid=None):
fname = os.path.basename(locpath)
# print("uploading " + fname + " to " + locpath + ", parents: " + str(parents) + ", remid: " + str(remid))
gdrive_file = GDriveSync.DRIVE.CreateFile({'id': remid}) if (remid is not None) else \
GDriveSync.DRIVE.CreateFile({'title': fname, 'parents': parents})
gdrive_file.SetContentFile(locpath)
gdrive_file.Upload()
GDriveSync._sync_file_time(locpath, gdrive_file)
@staticmethod
def _download(locpath, remid):
# print("downloading " + locpath + " from " + remid)
gdrive_file = GDriveSync.DRIVE.CreateFile({'id': remid})
gdrive_file.GetContentFile(locpath)
GDriveSync._sync_file_time(locpath, gdrive_file)
@staticmethod
def _sync_file_time(locpath, gdrive_file):
gdrive_file.FetchMetadata()
mtime = GDriveSync.parse_gdrive_time(gdrive_file['modifiedDate'])
timestamp = (mtime - datetime.datetime.fromtimestamp(0, pytz.utc)).total_seconds()
# print("setting file time to " + str(mtime) + " timestamp: " + str(timestamp))
os.utime(locpath, (timestamp, timestamp))
@staticmethod
def parse_gdrive_time(tm):
if None != tm:
tm = isodate.parse_datetime(tm)
return tm
@staticmethod
def local_time_offset():
"""Return offset of local zone from GMT"""
if time.localtime().tm_isdst and time.daylight:
return time.altzone
else:
return time.timezone
@staticmethod
def init_creds(credsb64):
GDriveSync.LOCAL_TZ_OFFSET = GDriveSync.local_time_offset()
if GDriveSync.CREDSB64 == credsb64:
return
creds_json = base64.b64decode(credsb64)
creds = OAuth2Credentials.from_json(creds_json)
GDriveSync.CREDS = creds
GDriveSync.CREDSB64 = credsb64
gauth = GoogleAuth()
gauth.settings = {
'client_config_backend': 'settings',
'client_config_file': 'client_secrets.json',
'save_credentials': False,
'oauth_scope': ['https://www.googleapis.com/auth/drive'],
'client_config': {
'client_id': creds.client_id,
'client_secret': creds.client_secret,
'auth_uri': GOOGLE_AUTH_URI,
'token_uri': GOOGLE_TOKEN_URI,
'revoke_uri': GOOGLE_REVOKE_URI,
'redirect_uri': 'http://juliabox.org/jboxauth/google/'
}
}
gauth.LoadClientConfigSettings()
gauth.credentials = creds
GDriveSync.GAUTH = gauth
GDriveSync.DRIVE = GoogleDrive(gauth)
@staticmethod
def folder_name(gfolder):
return gfolder.split('/')[-2]
@staticmethod
def folder_id(gfolder):
return gfolder.split('/')[-1]
@staticmethod
def clone(gfolder, loc, overwrite=False):
if overwrite and os.path.exists(loc):
shutil.rmtree(loc)
# create the folder and .gdrive file
os.mkdir(loc)
with open(os.path.join(loc, '.gdrive'), 'w') as f:
f.write(gfolder)
GDriveSync._clone_gfolder(GDriveSync.folder_id(gfolder), loc)
return GDriveSync(loc)
@staticmethod
def _clone_gfolder(gfolder, loc):
drive = GDriveSync.DRIVE
for f in drive.ListFile({'q': "'" + gfolder + "' in parents and trashed=false"}).GetList():
fpath = os.path.join(loc, f['title'])
if 'application/vnd.google-apps.folder' in f['mimeType']:
os.mkdir(fpath)
GDriveSync._clone_gfolder(f['id'], fpath)
else:
GDriveSync._download(fpath, f['id'])
@staticmethod
def scan_repo_paths(dirs):
repos = []
for d in dirs:
for pth in os.listdir(d):
if pth.startswith('.'):
continue
fpth = os.path.join(d, pth)
if os.path.isdir(fpth):
gdrive_pth = os.path.join(fpth, '.gdrive')
if os.path.isfile(gdrive_pth):
repos.append(fpth)
return repos
| mit | -6,751,839,081,025,678,000 | 9,114,603,090,923,793,000 | 38.694561 | 120 | 0.559608 | false |
dzamie/weasyl | weasyl/blocktag.py | 1 | 4024 | # blocktag.py
from error import PostgresError
import define as d
import profile
import searchtag
from libweasyl import ratings
from weasyl.cache import region
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['title'] in tags for b in blocked_tags)
def suggest(userid, target):
if not target:
return []
return d.execute("SELECT title FROM searchtag"
" WHERE title LIKE '%s%%' AND tagid NOT IN (SELECT tagid FROM blocktag WHERE userid = %i)"
" ORDER BY title LIMIT 10", [target, userid], options="within")
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def cached_select(userid):
return select(userid)
def insert(userid, tagid=None, title=None, rating=None):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
if tagid:
tag = int(tagid)
try:
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
except PostgresError:
return
elif title:
tag_name = d.get_search_tag(title)
try:
d.engine.execute("""
INSERT INTO blocktag (userid, tagid, rating)
VALUES (
%(user)s,
(SELECT tagid FROM searchtag WHERE title = %(tag_name)s),
%(rating)s
)
""", user=userid, tag_name=tag_name, rating=rating)
except PostgresError:
try:
tag = searchtag.create(title)
except PostgresError:
return
d.engine.execute("INSERT INTO blocktag VALUES (%s, %s, %s)", userid, tag, rating)
cached_select.invalidate(userid)
def remove(userid, tagid=None, title=None):
if tagid:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, %i)", [userid, tagid])
elif title:
d.execute("DELETE FROM blocktag WHERE (userid, tagid) = (%i, (SELECT tagid FROM searchtag WHERE title = '%s'))",
[userid, d.get_search_tag(title)])
cached_select.invalidate(userid)
| apache-2.0 | 2,104,018,866,886,099,700 | -7,390,802,647,552,328,000 | 30.193798 | 120 | 0.587227 | false |
phihag/youtube-dl | youtube_dl/extractor/primesharetv.py | 73 | 1853 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
sanitized_Request,
urlencode_postdata,
)
class PrimeShareTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?primeshare\.tv/download/(?P<id>[\da-zA-Z]+)'
_TEST = {
'url': 'http://primeshare.tv/download/238790B611',
'md5': 'b92d9bf5461137c36228009f31533fbc',
'info_dict': {
'id': '238790B611',
'ext': 'mp4',
'title': 'Public Domain - 1960s Commercial - Crest Toothpaste-YKsuFona',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>File not exist<' in webpage:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
headers = {
'Referer': url,
'Content-Type': 'application/x-www-form-urlencoded',
}
wait_time = int(self._search_regex(
r'var\s+cWaitTime\s*=\s*(\d+)',
webpage, 'wait time', default=7)) + 1
self._sleep(wait_time, video_id)
req = sanitized_Request(
url, urlencode_postdata(fields), headers)
video_page = self._download_webpage(
req, video_id, 'Downloading video page')
video_url = self._search_regex(
r"url\s*:\s*'([^']+\.primeshare\.tv(?::443)?/file/[^']+)'",
video_page, 'video url')
title = self._html_search_regex(
r'<h1>Watch\s*(?: )?\s*\((.+?)(?:\s*\[\.\.\.\])?\)\s*(?: )?\s*<strong>',
video_page, 'title')
return {
'id': video_id,
'url': video_url,
'title': title,
'ext': 'mp4',
}
| unlicense | 8,634,438,579,241,626,000 | 1,094,559,810,751,178,000 | 28.887097 | 94 | 0.524555 | false |
atruberg/django-custom | django/contrib/admin/templatetags/log.py | 114 | 2125 | from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit]
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
| bsd-3-clause | 6,546,302,768,282,089,000 | 3,222,945,500,978,071,600 | 36.946429 | 141 | 0.628706 | false |
guewen/OpenUpgrade | openerp/service/server.py | 32 | 35650 | #-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
if os.name == 'posix':
import resource
else:
resource = None
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools.misc import stripped_sys_argv, dumpstacks
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class BaseWSGIServerNoBind(werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.modules.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.modules.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if not stop or test_mode:
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen(nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
if resource is None:
return
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,038,134,246,218,642,000 | -9,105,870,641,588,592,000 | 36.684989 | 97 | 0.568583 | false |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/stats/_binned_statistic.py | 10 | 25912 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import callable, xrange
from scipy._lib._numpy_compat import suppress_warnings
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| mit | 7,746,193,893,508,509,000 | -3,275,516,658,641,505,000 | 40.861066 | 79 | 0.610374 | false |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/idlelib/IdleHistory.py | 122 | 4052 | "Implement Idle Shell history mechanism with History class"
from idlelib.configHandler import idleConf
class History:
''' Implement Idle Shell history mechanism.
store - Store source statement (called from PyShell.resetoutput).
fetch - Fetch stored statement matching prefix already entered.
history_next - Bound to <<history-next>> event (default Alt-N).
history_prev - Bound to <<history-prev>> event (default Alt-P).
'''
def __init__(self, text):
'''Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not).
'''
self.text = text
self.history = []
self.prefix = None
self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
"Fetch later statement; start with ealiest if cyclic."
self.fetch(reverse=False)
return "break"
def history_prev(self, event):
"Fetch earlier statement; start with most recent."
self.fetch(reverse=True)
return "break"
def fetch(self, reverse):
'''Fetch statememt and replace current line in text widget.
Set prefix and pointer as needed for successive fetches.
Reset them to None, None when returning to the start line.
Sound bell when return to start line or cannot leave a line
because cyclic is False.
'''
nhist = len(self.history)
pointer = self.pointer
prefix = self.prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
self.text.get("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
self.text.mark_set("insert", "end-1c") # != after cursor move
if pointer is None or prefix is None:
prefix = self.text.get("iomark", "end-1c")
if reverse:
pointer = nhist # will be decremented
else:
if self.cyclic:
pointer = -1 # will be incremented
else: # abort history_next
self.text.bell()
return
nprefix = len(prefix)
while 1:
pointer += -1 if reverse else 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
if not self.cyclic and pointer < 0: # abort history_prev
return
else:
if self.text.get("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", item)
break
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
self.pointer = pointer
self.prefix = prefix
def store(self, source):
"Store Shell input statement into history list."
source = source.strip()
if len(source) > 2:
# avoid duplicates
try:
self.history.remove(source)
except ValueError:
pass
self.history.append(source)
self.pointer = None
self.prefix = None
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False)
| gpl-3.0 | 8,497,906,845,334,149,000 | -3,723,569,653,722,902,000 | 37.961538 | 80 | 0.560217 | false |
cloudbase/nova-virtualbox | nova/virt/ironic/patcher.py | 7 | 7408 | # coding=utf-8
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Helper classes for Ironic HTTP PATCH creation.
"""
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
def create(node):
"""Create an instance of the appropriate DriverFields class.
:param node: a node object returned from ironicclient
:returns: GenericDriverFields or a subclass thereof, as appropriate
for the supplied node.
"""
if 'pxe' in node.driver:
return PXEDriverFields(node)
else:
return GenericDriverFields(node)
class GenericDriverFields(object):
def __init__(self, node):
self.node = node
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb',
'op': 'add',
'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format',
'op': 'add',
'value': CONF.default_ephemeral_format})
if preserve_ephemeral is not None:
patch.append({'path': '/instance_info/preserve_ephemeral',
'op': 'add', 'value': str(preserve_ephemeral)})
capabilities = {}
# read the flavor and get the extra_specs value.
extra_specs = flavor.get('extra_specs')
# scan through the extra_specs values and ignore the keys
# not starting with keyword 'capabilities'.
for key, val in six.iteritems(extra_specs):
if not key.startswith('capabilities:'):
continue
# split the extra_spec key to remove the keyword
# 'capabilities' and get the actual key.
capabilities_string, capabilities_key = key.split(':', 1)
if capabilities_key:
capabilities[capabilities_key] = val
if capabilities:
patch.append({'path': '/instance_info/capabilities',
'op': 'add', 'value': jsonutils.dumps(capabilities)})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
return []
class PXEDriverFields(GenericDriverFields):
def _get_kernel_ramdisk_dict(self, flavor):
"""Get the deploy ramdisk and kernel IDs from the flavor.
:param flavor: the flavor object.
:returns: a dict with the pxe options for the deploy ramdisk and
kernel if the IDs were found in the flavor, otherwise an empty
dict is returned.
"""
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if deploy_kernel and deploy_ramdisk:
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
Build a json-patch to add the required fields to deploy a node
using the PXE driver.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_deploy_patch(
instance, image_meta, flavor, preserve_ephemeral)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains both ramdisk
# and kernel ids, use them.
for key, value in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': '/driver_info/%s' % key,
'op': 'add', 'value': value})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
Build a json-patch to remove the fields used to deploy a node
using the PXE driver. Note that the fields added to the Node's
instance_info don't need to be removed because they are purged
during the Node's tear down.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_cleanup_patch(
instance, network_info, flavor)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains a ramdisk and
# kernel id remove it from nodes as part of the tear down process
for key in self._get_kernel_ramdisk_dict(flavor):
if key in self.node.driver_info:
patch.append({'op': 'remove',
'path': '/driver_info/%s' % key})
return patch
| apache-2.0 | 5,113,681,668,116,818,000 | 5,831,671,679,446,513,000 | 37.78534 | 79 | 0.606371 | false |
KyleJamesWalker/ansible | lib/ansible/modules/cloud/openstack/_quantum_subnet.py | 12 | 10250 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: quantum_subnet
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use M(os_subnet) instead.
version_added: "1.2"
short_description: Add/remove subnet from a network
description:
- Add/remove subnet from a network
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: True
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: True
auth_url:
description:
- The keystone URL for authentication
required: false
default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true
default: None
name:
description:
- The name of the subnet that should be created
required: true
default: None
cidr:
description:
- The CIDR representation of the subnet that should be assigned to the subnet
required: true
default: None
tenant_name:
description:
- The name of the tenant for whom the subnet should be created
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- DNS nameservers for this subnet, comma-separated
required: false
default: None
version_added: "1.4"
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should be allocated
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the virtual machines
required: false
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Create a subnet for a tenant with the specified subnet
quantum_subnet:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
tenant_name: tenant1
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
'''
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
_os_keystone = None
_os_tenant_id = None
_os_network_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = " Error in connecting to neutron: %s" % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception as e:
module.fail_json(msg="Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_subnet_id(module, neutron):
global _os_network_id
subnet_id = None
_os_network_id = _get_net_id(neutron, module)
if not _os_network_id:
module.fail_json(msg = "network id of network not found.")
else:
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception as e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _create_subnet(module, neutron):
neutron.format = 'json'
subnet = {
'name': module.params['name'],
'ip_version': module.params['ip_version'],
'enable_dhcp': module.params['enable_dhcp'],
'tenant_id': _os_tenant_id,
'gateway_ip': module.params['gateway_ip'],
'dns_nameservers': module.params['dns_nameservers'],
'network_id': _os_network_id,
'cidr': module.params['cidr'],
}
if module.params['allocation_pool_start'] and module.params['allocation_pool_end']:
allocation_pools = [
{
'start' : module.params['allocation_pool_start'],
'end' : module.params['allocation_pool_end']
}
]
subnet.update({'allocation_pools': allocation_pools})
if not module.params['gateway_ip']:
subnet.pop('gateway_ip')
if module.params['dns_nameservers']:
subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',')
else:
subnet.pop('dns_nameservers')
try:
new_subnet = neutron.create_subnet(dict(subnet=subnet))
except Exception as e:
module.fail_json(msg = "Failure in creating subnet: %s" % e.message)
return new_subnet['subnet']['id']
def _delete_subnet(module, neutron, subnet_id):
try:
neutron.delete_subnet(subnet_id)
except Exception as e:
module.fail_json( msg = "Error in deleting subnet: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
network_name = dict(required=True),
cidr = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
ip_version = dict(default='4', choices=['4', '6']),
enable_dhcp = dict(default='true', type='bool'),
gateway_ip = dict(default=None),
dns_nameservers = dict(default=None),
allocation_pool_start = dict(default=None),
allocation_pool_end = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
subnet_id = _create_subnet(module, neutron)
module.exit_json(changed = True, result = "Created" , id = subnet_id)
else:
module.exit_json(changed = False, result = "success" , id = subnet_id)
else:
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.exit_json(changed = False, result = "success")
else:
_delete_subnet(module, neutron, subnet_id)
module.exit_json(changed = True, result = "deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,079,613,863,702,320,000 | -1,690,484,992,644,797,700 | 31.747604 | 122 | 0.614829 | false |
IronLanguages/ironpython3 | Src/StdLib/Lib/email/encoders.py | 146 | 1786 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
from base64 import encodebytes as _bencode
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(b' ', b'=20')
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = str(_bencode(orig), 'ascii')
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload(decode=True)
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload(decode=True)
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If decoding from ASCII succeeds,
# we know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.decode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
| apache-2.0 | 8,629,456,593,499,399,000 | 1,420,077,150,234,992,400 | 23.465753 | 77 | 0.660134 | false |
YehudaItkin/virt-test | virttest/staging/backports/__init__.py | 23 | 3497 | """
This module contains backported functions that are not present in Python 2.4
but are standard in more recent versions.
"""
import re
import sys
# Import backported modules
import simplejson
import collections
import itertools
if not hasattr(itertools, 'product'):
import _itertools
itertools.product = _itertools.product
# pylint: disable=I0011,W0622
# noinspection PyShadowingBuiltins
def _next(*args):
"""
Retrieve the next item from the iterator by calling its next() method.
If default is given, it is returned if the iterator is exhausted,
otherwise StopIteration is raised.
New in version 2.6.
:param iterator: the iterator
:type iterator: iterator
:param default: the value to return if the iterator raises StopIteration
:type default: object
:return: The object returned by iterator.next()
:rtype: object
"""
if len(args) == 2:
try:
return args[0].next()
except StopIteration:
return args[1]
elif len(args) > 2:
raise TypeError("next expected at most 2 arguments, %s" % len(args))
else:
return args[0].next()
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _any(iterable):
"""
From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods
:codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker
licensed under cc-wiki with attribution required
"""
for element in iterable:
if element:
return True
return False
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _all(iterable):
"""
From http://stackoverflow.com/questions/3785433/python-backports-for-some-methods
:codeauthor: Tim Pietzcker http://stackoverflow.com/users/20670/tim-pietzcker
licensed under cc-wiki with attribution required
"""
for element in iterable:
if not element:
return False
return True
# Adapted from http://code.activestate.com/recipes/576847/
# :codeauthor: Vishal Sapre
# :license: MIT
BIN_HEX_DICT = {
'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100',
'5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001',
'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110',
'f': '1111', 'L': ''}
# match left leading zeroes, but don't match a single 0 for the case of
# bin(0) == '0b0'
BIN_ZSTRIP = re.compile(r'^0*(?=[01])')
# pylint: disable=W0622
# noinspection PyShadowingBuiltins
def _bin(number):
"""
Adapted from http://code.activestate.com/recipes/576847/
:codeauthor: Vishal Sapre
:license: MIT
A foolishly simple look-up method of getting binary string from an integer
This happens to be faster than all other ways!!!
"""
# =========================================================
# create hex of int, remove '0x'. now for each hex char,
# look up binary string, append in list and join at the end.
# =========================================================
# replace leading left zeroes with '0b'
tmp = [BIN_HEX_DICT[hstr] for hstr in hex(number)[2:]]
return BIN_ZSTRIP.sub('0b', ''.join(tmp))
if not hasattr(__builtins__, 'next'):
next = _next
else:
next = next
if not hasattr(__builtins__, 'any'):
any = _any
else:
any = any
if not hasattr(__builtins__, 'all'):
all = _all
else:
all = all
if not hasattr(__builtins__, 'bin'):
bin = _bin
else:
bin = bin
| gpl-2.0 | 6,464,835,136,731,482,000 | -5,951,961,987,662,817,000 | 26.753968 | 85 | 0.626537 | false |
EndyKaufman/django-postgres-angularjs-blog | app/manager/migrations/0006_properties.py | 1 | 1170 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-24 14:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0005_add_fields_and_set_defaults'),
]
operations = [
migrations.CreateModel(
name='Properties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=512, unique=True)),
('value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -7,454,474,897,194,405,000 | 7,267,026,965,673,940,000 | 39.344828 | 150 | 0.62906 | false |
robinro/ansible-modules-extras | network/f5/bigip_sys_db.py | 23 | 5861 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_sys_db
short_description: Manage BIG-IP system database variables
description:
- Manage BIG-IP system database variables
version_added: "2.2"
options:
key:
description:
- The database variable to manipulate.
required: true
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value). When C(reset) sets the
variable back to the default value. At least one of value and state
C(reset) are required.
required: false
default: present
choices:
- present
- reset
value:
description:
- The value to set the key to. At least one of value and state C(reset)
are required.
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the boot.quiet DB variable on the BIG-IP
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "boot.quiet"
value: "disable"
delegate_to: localhost
- name: Disable the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
value: "false"
delegate_to: localhost
- name: Reset the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
state: "reset"
delegate_to: localhost
'''
RETURN = '''
name:
description: The key in the system database that was specified
returned: changed and success
type: string
sample: "setup.run"
default_value:
description: The default value of the key
returned: changed and success
type: string
sample: "true"
value:
description: The value that you set the key to
returned: changed and success
type: string
sample: "false"
'''
try:
from f5.bigip import ManagementRoot
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpSysDb(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def flush(self):
result = dict()
state = self.params['state']
value = self.params['value']
if not state == 'reset' and not value:
raise F5ModuleError(
"When setting a key, a value must be supplied"
)
current = self.read()
if self.params['check_mode']:
if value == current:
changed = False
else:
changed = True
else:
if state == "present":
changed = self.present()
elif state == "reset":
changed = self.reset()
current = self.read()
result.update(
name=current.name,
default_value=current.defaultValue,
value=current.value
)
result.update(dict(changed=changed))
return result
def read(self):
dbs = self.api.tm.sys.dbs.db.load(
name=self.params['key']
)
return dbs
def present(self):
current = self.read()
if current.value == self.params['value']:
return False
current.update(value=self.params['value'])
current.refresh()
if current.value != self.params['value']:
raise F5ModuleError(
"Failed to set the DB variable"
)
return True
def reset(self):
current = self.read()
default = current.defaultValue
if current.value == default:
return False
current.update(value=default)
current.refresh()
if current.value != current.defaultValue:
raise F5ModuleError(
"Failed to reset the DB variable"
)
return True
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
key=dict(required=True),
state=dict(default='present', choices=['present', 'reset']),
value=dict(required=False, default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpSysDb(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 237,915,758,964,182,560 | -2,990,151,952,655,723,500 | 25.282511 | 77 | 0.601092 | false |
bert9bert/statsmodels | statsmodels/tsa/statespace/kalman_filter.py | 2 | 86079 | """
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import (validate_vector_shape, validate_matrix_shape,
reorder_missing_matrix, reorder_missing_vector)
from . import tools
from statsmodels.tools.sm_exceptions import ValueWarning
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6
FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7
FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3
FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4
FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5
FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2
FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST = 0x01
MEMORY_NO_PREDICTED = 0x02
MEMORY_NO_FILTERED = 0x04
MEMORY_NO_LIKELIHOOD = 0x08
MEMORY_NO_GAIN = 0x10
MEMORY_NO_SMOOTHING = 0x20
MEMORY_NO_STD_FORECAST = 0x40
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING |
MEMORY_NO_STD_FORECAST
)
TIMING_INIT_PREDICTED = 0
TIMING_INIT_FILTERED = 1
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
filter_timing
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
"""
filter_methods = [
'filter_conventional', 'filter_exact_initial', 'filter_augmented',
'filter_square_root', 'filter_univariate', 'filter_collapsed',
'filter_extended', 'filter_unscented'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL)
"""
(bool) Flag for exact initial Kalman filtering. Not implemented.
"""
filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED)
"""
(bool) Flag for augmented Kalman filtering. Not implemented.
"""
filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT)
"""
(bool) Flag for square-root Kalman filtering. Not implemented.
"""
filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE)
"""
(bool) Flag for univariate filtering of multivariate observation vector.
"""
filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED)
"""
(bool) Flag for Kalman filtering with collapsed observation vector.
"""
filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED)
"""
(bool) Flag for extended Kalman filtering. Not implemented.
"""
filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED)
"""
(bool) Flag for unscented Kalman filtering. Not implemented.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast', 'memory_no_predicted',
'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain',
'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST)
"""
(bool) Flag to prevent storing forecasts.
"""
memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED)
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED)
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN)
"""
(bool) Flag to prevent storing the Kalman gain matrices.
"""
memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_no_std_forecast = (
OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST))
"""
(bool) Flag to prevent storing standardized forecast errors.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
timing_options = [
'timing_init_predicted', 'timing_init_filtered'
]
timing_init_predicted = OptionWrapper('filter_timing',
TIMING_INIT_PREDICTED)
"""
(bool) Flag for the default timing convention (Durbin and Koopman, 2012).
"""
timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED)
"""
(bool) Flag for the alternate timing convention (Kim and Nelson, 2012).
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
filter_timing = TIMING_INIT_PREDICTED
"""
(int) Filter timing.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
kalman_filter_classes=None, **kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
# Options
self.prefix_kalman_filter_map = (
kalman_filter_classes
if kalman_filter_classes is not None
else tools.prefix_kalman_filter_map.copy())
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.set_filter_timing(**kwargs)
self.tolerance = tolerance
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, filter_timing=None,
loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if filter_timing is None:
filter_timing = self.filter_timing
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = self.prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, filter_timing, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.filter_timing = filter_timing
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
r"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
FILTER_CONVENTIONAL = 0x01
Conventional Kalman filter.
FILTER_UNIVARIATE = 0x10
Univariate approach to Kalman filtering. Overrides conventional
method if both are specified.
FILTER_COLLAPSED = 0x20
Collapsed approach to Kalman filtering. Will be used *in addition*
to conventional or univariate filtering.
Note that only the first method is available if using a Scipy version
older than 0.16.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.filter_method
1
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
>>> mod.ssm.set_filter_method(filter_univariate=False,
... filter_collapsed=True)
>>> mod.ssm.filter_method
33
>>> mod.ssm.set_filter_method(filter_method=1)
>>> mod.ssm.filter_conventional
True
>>> mod.ssm.filter_univariate
False
>>> mod.ssm.filter_collapsed
False
>>> mod.ssm.filter_univariate = True
>>> mod.ssm.filter_method
17
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
if self._compatibility_mode and not self.filter_method == 1:
raise NotImplementedError('Only conventional Kalman filtering'
' is available. Consider updating'
' dependencies for more options.')
def set_inversion_method(self, inversion_method=None, **kwargs):
r"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE = 0x01
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU = 0x02
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU = 0x04
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY = 0x08
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY = 0x10
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- If the filtering method is specified to be univariate, then simple
division is always used regardless of the dimension of the endogenous
time series.
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.inversion_method
1
>>> mod.ssm.solve_cholesky
True
>>> mod.ssm.invert_univariate
True
>>> mod.ssm.invert_lu
False
>>> mod.ssm.invert_univariate = False
>>> mod.ssm.inversion_method
8
>>> mod.ssm.set_inversion_method(solve_cholesky=False,
... invert_cholesky=True)
>>> mod.ssm.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
r"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm.stability_method
1
>>> mod.ssm.stability_force_symmetry
True
>>> mod.ssm.stability_force_symmetry = False
>>> mod.ssm.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
r"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL = 0
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST = 0x01
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED = 0x02
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED = 0x04
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD = 0x08
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_NO_GAIN = 0x10
Do not store the Kalman gain matrices.
MEMORY_NO_SMOOTHING = 0x20
Do not store temporary variables related to Klaman smoothing. If
this option is used, smoothing is unavailable.
MEMORY_NO_SMOOTHING = 0x20
Do not store standardized forecast errors.
MEMORY_CONSERVE
Do not store any intermediate matrices.
Note that if using a Scipy version less than 0.16, the options
MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST
have no effect.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.ssm..conserve_memory
0
>>> mod.ssm.memory_no_predicted
False
>>> mod.ssm.memory_no_predicted = True
>>> mod.ssm.conserve_memory
2
>>> mod.ssm.set_conserve_memory(memory_no_filtered=True,
... memory_no_forecast=True)
>>> mod.ssm.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_filter_timing(self, alternate_timing=None, **kwargs):
r"""
Set the filter timing convention
By default, the Kalman filter follows Durbin and Koopman, 2012, in
initializing the filter with predicted values. Kim and Nelson, 1999,
instead initialize the filter with filtered values, which is
essentially just a different timing convention.
Parameters
----------
alternate_timing : integer, optional
Whether or not to use the alternate timing convention. Default is
unspecified.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
"""
if alternate_timing is not None:
self.filter_timing = int(alternate_timing)
if 'timing_init_predicted' in kwargs:
self.filter_timing = int(not kwargs['timing_init_predicted'])
if 'timing_init_filtered' in kwargs:
self.filter_timing = int(kwargs['timing_init_filtered'])
if (self._compatibility_mode and
self.filter_timing == TIMING_INIT_FILTERED):
raise NotImplementedError('Only "predicted" Kalman filter'
' timing is available. Consider'
' updating dependencies for more'
' options.')
def _filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
filter_timing=None, tolerance=None, loglikelihood_burn=None,
complex_step=False):
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Initialize the state
self._initialize_state(prefix=prefix, complex_step=complex_step)
# Run the filter
kfilter()
tmp = np.array(kfilter.loglikelihood)
tmp2 = np.array(kfilter.predicted_state)
return kfilter
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, filter_timing=None,
tolerance=None, loglikelihood_burn=None, complex_step=False):
r"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
filter_timing : int, optional
Determines the timing convention of the filter. Default is that
from Durbin and Koopman (2012), in which the filter is initialized
with predicted values.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
Notes
-----
This function by default does not compute variables required for
smoothing.
"""
if conserve_memory is None:
conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING
# Run the filter
kfilter = self._filter(
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, complex_step)
tmp = np.array(kfilter.loglikelihood)
# Create the results object
results = self.results_class(self)
results.update_representation(self)
results.update_filter(kfilter)
return results
def loglike(self, **kwargs):
r"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
return np.sum(kfilter.loglikelihood[loglikelihood_burn:])
def loglikeobs(self, **kwargs):
r"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD
kfilter = self._filter(**kwargs)
llf_obs = np.array(kfilter.loglikelihood, copy=True)
# Set any burned observations to have zero likelihood
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
r"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
simulated_states : array
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
if not measurement_shocks.shape == (nsimulations, self.k_endog):
raise ValueError('Invalid shape of provided measurement'
' shocks. Required (%d, %d)'
% (nsimulations, self.k_endog))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
if not state_shocks.shape == (nsimulations, self.k_posdef):
raise ValueError('Invalid shape of provided state shocks.'
' Required (%d, %d).'
% (nsimulations, self.k_posdef))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization == 'known':
initial_state = np.random.multivariate_normal(
self._initial_state, self._initial_state_cov)
elif self.initialization == 'stationary':
from scipy.linalg import solve_discrete_lyapunov
# (I - T)^{-1} c = x => (I - T) x = c
initial_state_mean = np.linalg.solve(
np.eye(self.k_states) - self['transition', :, :, 0],
self['state_intercept', :, 0])
R = self['selection', :, :, 0]
Q = self['state_cov', :, :, 0]
selected_state_cov = R.dot(Q).dot(R.T)
initial_state_cov = solve_discrete_lyapunov(
self['transition', :, :, 0], selected_state_cov)
initial_state = np.random.multivariate_normal(
initial_state_mean, initial_state_cov)
elif self.initialization == 'approximate_diffuse':
initial_state = np.zeros(self.k_states)
else:
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
time_invariant = self.time_invariant
# Holding variables for the simulations
simulated_obs = np.zeros((nsimulations, self.k_endog),
dtype=self.dtype)
simulated_states = np.zeros((nsimulations+1, self.k_states),
dtype=self.dtype)
simulated_states[0] = initial_state
# Perform iterations to create the new time series
obs_intercept_t = 0
design_t = 0
state_intercept_t = 0
transition_t = 0
selection_t = 0
for t in range(nsimulations):
# Get the current shocks (this accomodates time-varying matrices)
if measurement_shocks is None:
measurement_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t])
else:
measurement_shock = measurement_shocks[t]
if state_shocks is None:
state_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', :, :, t])
else:
state_shock = state_shocks[t]
# Get current-iteration matrices
if not time_invariant:
obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t
design_t = 0 if self.design.shape[-1] == 1 else t
state_intercept_t = (
0 if self.state_intercept.shape[-1] == 1 else t)
transition_t = 0 if self.transition.shape[-1] == 1 else t
selection_t = 0 if self.selection.shape[-1] == 1 else t
obs_intercept = self['obs_intercept', :, obs_intercept_t]
design = self['design', :, :, design_t]
state_intercept = self['state_intercept', :, state_intercept_t]
transition = self['transition', :, :, transition_t]
selection = self['selection', :, :, selection_t]
# Iterate the measurement equation
simulated_obs[t] = (
obs_intercept + np.dot(design, simulated_states[t]) +
measurement_shock)
# Iterate the state equation
simulated_states[t+1] = (
state_intercept + np.dot(transition, simulated_states[t]) +
np.dot(selection, state_shock))
return simulated_obs, simulated_states[:-1]
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
r"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
# Since the first step is the impulse itself, we actually want steps+1
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' index of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:, :, 0])
impulse = np.dot(state_chol, impulse)
# If we have a time-invariant system, we can solve for the IRF directly
if self.time_invariant:
# Get the state space matrices
design = self.design[:, :, 0]
transition = self.transition[:, :, 0]
selection = self.selection[:, :, 0]
# Holding arrays
irf = np.zeros((steps, self.k_endog), dtype=self.dtype)
states = np.zeros((steps, self.k_states), dtype=self.dtype)
# First iteration
states[0] = np.dot(selection, impulse)
irf[0] = np.dot(design, states[0])
# Iterations
for t in range(1, steps):
states[t] = np.dot(transition, states[t-1])
irf[t] = np.dot(design, states[t])
# Otherwise, create a new model
else:
# Get the basic model components
representation = {}
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
representation[name] = getattr(self, name)
# Allow additional specification
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `irf` has been ignored.')
exception = ('Impulse response functions for models with'
' time-varying %s matrix requires an updated'
' time-varying matrix for any periods beyond those in'
' the original model.')
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], steps)
if mat.ndim < 3 or not mat.shape[2] == steps:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states,
self.k_posdef, **model_kwargs)
model.initialize_approximate_diffuse()
model._initialize_filter()
model._initialize_state()
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
# Since simulate returns the zero-th period, we need to simulate
# steps + 1 periods and exclude the zero-th observation.
steps += 1
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
irf, _ = model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)
irf = irf[1:]
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
return irf
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
filter_timing : int
Whether or not to use the alternate timing convention.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
kalman_gain : array
The Kalman gain at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn',
'converged', 'period_converged', 'filtered_state',
'filtered_state_cov', 'predicted_state', 'predicted_state_cov',
'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts',
'forecasts_error', 'forecasts_error_cov', 'llf_obs',
'collapsed_forecasts', 'collapsed_forecasts_error',
'collapsed_forecasts_error_cov',
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : boolean, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.filter_timing = kalman_filter.filter_timing
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Reset caches
has_missing = np.sum(self.nmissing) > 0
if not self._compatibility_mode and not (self.memory_no_std_forecast or
self.invert_lu or
self.solve_lu or
self.filter_collapsed):
if has_missing:
self._standardized_forecasts_error = np.array(
reorder_missing_vector(
kalman_filter.standardized_forecast_error,
self.missing, prefix=self.prefix))
else:
self._standardized_forecasts_error = np.array(
kalman_filter.standardized_forecast_error, copy=True)
else:
self._standardized_forecasts_error = None
if not self._compatibility_mode:
# In the partially missing data case, all entries will
# be in the upper left submatrix rather than the correct placement
# Re-ordering does not make sense in the collapsed case.
if has_missing and (not self.memory_no_gain and
not self.filter_collapsed):
self._kalman_gain = np.array(reorder_missing_matrix(
kalman_filter.kalman_gain, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp1 = np.array(reorder_missing_matrix(
kalman_filter.tmp1, self.missing, reorder_cols=True,
prefix=self.prefix))
self.tmp2 = np.array(reorder_missing_vector(
kalman_filter.tmp2, self.missing, prefix=self.prefix))
self.tmp3 = np.array(reorder_missing_matrix(
kalman_filter.tmp3, self.missing, reorder_rows=True,
prefix=self.prefix))
self.tmp4 = np.array(reorder_missing_matrix(
kalman_filter.tmp4, self.missing, reorder_cols=True,
reorder_rows=True, prefix=self.prefix))
else:
self._kalman_gain = np.array(
kalman_filter.kalman_gain, copy=True)
self.tmp1 = np.array(kalman_filter.tmp1, copy=True)
self.tmp2 = np.array(kalman_filter.tmp2, copy=True)
self.tmp3 = np.array(kalman_filter.tmp3, copy=True)
self.tmp4 = np.array(kalman_filter.tmp4, copy=True)
else:
self._kalman_gain = None
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Save the collapsed values
self.collapsed_forecasts = None
self.collapsed_forecasts_error = None
self.collapsed_forecasts_error_cov = None
if self.filter_collapsed:
# Copy the provided arrays (which are from the collapsed dataset)
# into new variables
self.collapsed_forecasts = self.forecasts[:self.k_states, :]
self.collapsed_forecasts_error = (
self.forecasts_error[:self.k_states, :]
)
self.collapsed_forecasts_error_cov = (
self.forecasts_error_cov[:self.k_states, :self.k_states, :]
)
# Recreate the original arrays (which should be from the original
# dataset) in the appropriate dimension
self.forecasts = np.zeros((self.k_endog, self.nobs))
self.forecasts_error = np.zeros((self.k_endog, self.nobs))
self.forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs))
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
if not (self.memory_no_forecast or self.memory_no_predicted):
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] > 0:
mask = ~self.missing[:, t].astype(bool)
# We can recover forecasts
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the
# first blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in),
# regardless of which endogenous variables they refer to
# (i.e. the non- missing endogenous variables for that
# observation). Furthermore, the forecast error covariance
# matrix is only valid for those elements. What is done is
# to set all elements to nan for these observations so that
# they are flagged as missing. The variables
# missing_forecasts, etc. then provide the forecasts, etc.
# provided by the Kalman filter, from which the data can be
# retrieved if desired.
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error[mask, t] = (
self.endog[mask, t] - self.forecasts[mask, t])
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# In the collapsed case, everything just needs to be rebuilt
# for the original observed data, since the Kalman filter
# produced these values for the collapsed data.
elif self.filter_collapsed:
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = (
self.endog[:, t] - self.forecasts[:, t]
)
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
# In the case of entirely missing observations, let the Kalman
# gain be zeros.
if self.nmissing[t] == self.k_endog:
continue
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
if self.nmissing[t] == 0:
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
else:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
self._kalman_gain[:, mask, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[mask, :, design_t]),
np.linalg.inv(F[:, :, 0])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
"""
Standardized forecast errors
Notes
-----
The forecast errors produced by the Kalman filter are
.. math::
v_t \sim N(0, F_t)
Hypothesis tests are usually applied to the standardized residuals
.. math::
v_t^s = B_t v_t \sim N(0, I)
where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then
:math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t`
and :math:`L_t` are lower triangular. Finally,
:math:`B_t v_t \sim N(0, B_t F_t B_t')` and
:math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`.
Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or
:math:`L_t v_t^s = v_t`; the latter equation is the form required to
use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is
lower triangular, we can use a triangular solver (?TRTRS).
"""
if self._standardized_forecasts_error is None:
if self.k_endog == 1:
self._standardized_forecasts_error = (
self.forecasts_error /
self.forecasts_error_cov[0, 0, :]**0.5)
else:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
if self.nmissing[t] < self.k_endog:
mask = ~self.missing[:, t].astype(bool)
F = self.forecasts_error_cov[np.ix_(mask, mask, [t])]
upper, _ = linalg.cho_factor(F[:, :, 0])
self._standardized_forecasts_error[mask, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[mask, t], trans=1))
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
r"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Cannot predict if we do not have appropriate arrays
if self.memory_no_forecast or self.memory_no_predicted:
raise ValueError('Predict is not possible if memory conservation'
' has been used to avoid storing forecasts or'
' predicted values.')
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
ndynamic = 0
if dynamic is not None:
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.', ValueWarning)
dynamic = None
elif dynamic > self.nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.', ValueWarning)
dynamic = None
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
if dynamic is not None:
ndynamic = max(0, min(end, self.nobs) - dynamic)
# Get the number of in-sample static predictions
nstatic = min(end, self.nobs) if dynamic is None else dynamic
# Construct the design and observation intercept and covariance
# matrices for start-npadded:end. If not time-varying in the original
# model, then they will be copied over if none are provided in
# `kwargs`. Otherwise additional matrices must be provided in `kwargs`.
representation = {}
for name, shape in self.shapes.items():
if name == 'obs':
continue
representation[name] = getattr(self, name)
# Update the matrices from kwargs for forecasts
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `predict` has been ignored.')
exception = ('Forecasting for models with time-varying %s matrix'
' requires an updated time-varying matrix for the'
' period to be forecasted.')
if nforecast > 0:
for name, shape in self.shapes.items():
if name == 'obs':
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name), ValueWarning)
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, mat.shape,
shape[0], nforecast)
if mat.ndim < 2 or not mat.shape[1] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
else:
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Update the matrices from kwargs for dynamic prediction in the case
# that `end` is less than `nobs` and `dynamic` is less than `end`. In
# this case, any time-varying matrices in the default `representation`
# will be too long, causing an error to be thrown below in the
# KalmanFilter(...) construction call, because the endog has length
# nstatic + ndynamic + nforecast, whereas the time-varying matrices
# from `representation` have length nobs.
if ndynamic > 0 and end < self.nobs:
for name, shape in self.shapes.items():
if not name == 'obs' and representation[name].shape[-1] > 1:
representation[name] = representation[name][..., :end]
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
else:
# Construct the new endogenous array.
endog = np.empty((self.k_endog, ndynamic + nforecast))
endog.fill(np.nan)
endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog])
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'filter_timing': self.filter_timing,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(
endog, self.k_states, self.k_posdef, **model_kwargs
)
model.initialize_known(
self.initial_state,
self.initial_state_cov
)
model._initialize_filter()
model._initialize_state()
results = self._predict(nstatic, ndynamic, nforecast, model)
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
def _predict(self, nstatic, ndynamic, nforecast, model):
# Note: this doesn't use self, and can either be a static method or
# moved outside the class altogether.
# Get the underlying filter
kfilter = model._kalman_filter
# Save this (which shares memory with the memoryview on which the
# Kalman filter will be operating) so that we can replace actual data
# with predicted data during dynamic forecasting
endog = model._representations[model.prefix]['obs']
for t in range(kfilter.model.nobs):
# Run the Kalman filter for the first `nstatic` periods (for
# which dynamic computation will not be performed)
if t < nstatic:
next(kfilter)
# Perform dynamic prediction
elif t < nstatic + ndynamic:
design_t = 0 if model.design.shape[2] == 1 else t
obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t
# Unconditional value is the intercept (often zeros)
endog[:, t] = model.obs_intercept[:, obs_intercept_t]
# If t > 0, then we can condition the forecast on the state
if t > 0:
# Predict endog[:, t] given `predicted_state` calculated in
# previous iteration (i.e. t-1)
endog[:, t] += np.dot(
model.design[:, :, design_t],
kfilter.predicted_state[:, t]
)
# Advance Kalman filter
next(kfilter)
# Perform any (one-step-ahead) forecasting
else:
next(kfilter)
# Return the predicted state and predicted state covariance matrices
results = FilterResults(model)
results.update_representation(model)
results.update_filter(kfilter)
return results
class PredictionResults(FilterResults):
r"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
| bsd-3-clause | -6,798,554,260,335,441,000 | -1,945,769,810,443,826,700 | 41.340876 | 79 | 0.594628 | false |
rjw57/cubbie | migrations/versions/316bb58e84f_add_user_identities.py | 1 | 1110 | """add user_identities
Revision ID: 316bb58e84f
Revises: 38c8ec357e0
Create Date: 2015-03-11 01:40:12.157458
"""
# revision identifiers, used by Alembic.
revision = '316bb58e84f'
down_revision = '38c8ec357e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_identities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider', sa.Text(), nullable=False),
sa.Column('provider_user_id', sa.Text(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_user_identities_provider_provider_id', 'user_identities', ['provider', 'provider_user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_user_identities_provider_provider_id', table_name='user_identities')
op.drop_table('user_identities')
### end Alembic commands ###
| mit | 1,623,178,689,735,911,200 | 1,812,499,397,922,364,700 | 30.714286 | 130 | 0.684685 | false |
njl/pycon | symposion/schedule/tests/factories.py | 3 | 1750 | import datetime
import factory
import factory.fuzzy
from pycon.tests.factories import PyConTutorialProposalFactory
from symposion.conference.models import Section
from symposion.conference.tests.factories import SectionFactory
from symposion.speakers.tests.factories import SpeakerFactory
from ..models import Presentation, Slot, SlotKind, Day, Schedule
class ScheduleFactory(factory.DjangoModelFactory):
class Meta:
model = Schedule
section = factory.SubFactory(SectionFactory)
class DayFactory(factory.DjangoModelFactory):
class Meta:
model = Day
schedule = factory.SubFactory(ScheduleFactory)
date = factory.fuzzy.FuzzyDate(start_date=datetime.date(1900, 1, 1))
class SlotKindFactory(factory.DjangoModelFactory):
class Meta:
model = SlotKind
schedule = factory.SubFactory(ScheduleFactory)
label = factory.fuzzy.FuzzyText()
class SlotFactory(factory.DjangoModelFactory):
class Meta:
model = Slot
day = factory.SubFactory(DayFactory)
# .kind and .day both need to point at the same schedule
kind = factory.SubFactory(
SlotKindFactory,
schedule=factory.LazyAttribute(lambda kind: kind.factory_parent.day.schedule)
)
start = factory.LazyAttribute(lambda n: datetime.time())
end = factory.LazyAttribute(lambda n: datetime.time())
class PresentationFactory(factory.DjangoModelFactory):
class Meta:
model = Presentation
title = 'Presentation'
description = 'Description'
abstract = 'Abstract'
speaker = factory.SubFactory(SpeakerFactory)
proposal_base = factory.SubFactory(PyConTutorialProposalFactory)
section = Section.objects.get(slug='tutorials')
slot = factory.SubFactory(SlotFactory)
| bsd-3-clause | 2,644,576,952,924,643,300 | -8,091,949,764,702,611,000 | 28.661017 | 85 | 0.746857 | false |
cwgreene/Nanostructure-Simulator | utils/plot_trajectories.py | 1 | 1140 | import os
import sys
import re
import pylab
def parse_trajectory_line(line):
trajectory = []
for x,y in re.findall("\(([0-9.]+), ([0-9.]+)\)",line):
trajectory.append((float(x),float(y)))
return trajectory
def generate_trajectories(file):
#get rid fo two first lines
file.readline()
file.readline()
#parse each line
for line in file:
yield parse_trajectory_line(line)
def open_trajectory_file(n):
for filename in os.listdir("results"):
if re.match(str(n)+"traj",filename):
return open("results/"+filename)
raise "File not found"
def display_trajectories(n):
input =""
file = open_trajectory_file(n)
trajectory_gen = generate_trajectories(file)
trajectory = trajectory_gen.next()
interactive = True
i = 0
while input != 'q':
first = map(lambda x: x[0],trajectory)
second = map(lambda x: x[1],trajectory)
pylab.plot(first,second)
if interactive:
input = raw_input()
if input == "go":
i += 1
interactive=False
if i %100 == 0:
print i
raw_input()
try:
trajectory=trajectory_gen.next()
except:
print "Done"
break
if __name__=="__main__":
display_trajectories(sys.argv[1])
| mit | 8,428,924,565,387,101,000 | -1,776,970,422,128,848,600 | 20.923077 | 56 | 0.669298 | false |
danieljaouen/ansible | lib/ansible/plugins/inventory/auto.py | 25 | 2196 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: auto
plugin_type: inventory
author:
- Matt Davis <@nitzmahone>
short_description: Loads and executes an inventory plugin specified in a YAML config
description:
- By whitelisting C(auto) as the final inventory plugin, any YAML inventory config file with a
C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
config. This effectively provides automatic whitelisting of all installed/accessible inventory plugins.
- To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
'''
EXAMPLES = '''
# This plugin is not intended for direct use; it is a fallback mechanism for automatic whitelisting of
# all installed inventory plugins.
'''
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.plugins.loader import inventory_loader
class InventoryModule(BaseInventoryPlugin):
NAME = 'auto'
def verify_file(self, path):
if not path.endswith('.yml') and not path.endswith('.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=True):
config_data = loader.load_from_file(path, cache=False)
plugin_name = config_data.get('plugin')
if not plugin_name:
raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
plugin = inventory_loader.get(plugin_name)
if not plugin:
raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
if not plugin.verify_file(path):
raise AnsibleParserError("inventory config '{0}' could not be verified by plugin '{1}'".format(path, plugin_name))
plugin.parse(inventory, loader, path, cache=cache)
| gpl-3.0 | 9,201,741,196,603,140,000 | 2,400,689,529,161,310,000 | 38.927273 | 135 | 0.703552 | false |
jonathansick/androcmd | scripts/phat_baseline_test.py | 1 | 3612 | #!/usr/bin/env python
# encoding: utf-8
"""
Grid computation of dust attenuation for old vs. young stellar populations.
2015-05-12 - Created by Jonathan Sick
"""
import argparse
from androcmd.phatpipeline import PhatCatalog
from androcmd.baselineexp import SolarZPipeline, ThreeZPipeline
def main():
args = parse_args()
if args.pipeline == 'solarz':
# Use the single-Z solar pipeline
Pipeline = SolarZPipeline
elif args.pipeline == 'threez':
# Use the three-metallicity track pipeline
Pipeline = ThreeZPipeline
isoc = dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang')
pipeline = Pipeline(brick=23,
root_dir=args.model_name,
isoc_args=isoc)
if args.fit is not None:
dataset = PhatCatalog(args.brick)
pipeline.fit(args.fit, [args.fit], dataset)
if args.plot_hess is not None:
from androcmd.baselineexp import plot_fit_hess_grid
dataset = PhatCatalog(args.brick)
plot_fit_hess_grid(args.plot_hess, pipeline, dataset)
if args.plot_diff is not None:
from androcmd.baselineexp import plot_diff_hess_grid
dataset = PhatCatalog(args.brick)
plot_diff_hess_grid(args.plot_diff, pipeline, dataset)
if args.plot_sfh is not None:
from androcmd.baselineexp import sfh_comparison_plot
dataset = PhatCatalog(args.brick)
sfh_comparison_plot(args.plot_sfh, pipeline, dataset)
if args.plot_zsfh is not None:
from androcmd.baselineexp import plot_sfh_metallicity_trends
dataset = PhatCatalog(args.brick)
for fit_key in args.plot_zsfh:
plot_path = "{model}_b{brick:d}_zsfh_{key}".format(
model=args.model_name, brick=args.brick, key=fit_key)
plot_sfh_metallicity_trends(plot_path, pipeline, dataset, fit_key)
if args.chi_table is not None:
from androcmd.baselineexp import tabulate_fit_chi
dataset = PhatCatalog(args.brick)
tabulate_fit_chi(args.chi_table, pipeline, dataset)
if args.plot_isoc is not None:
from androcmd.baselineexp import plot_isocs, plot_isocs_lewis
dataset = PhatCatalog(args.brick)
plot_isocs(args.plot_isoc, pipeline, dataset)
plot_isocs_lewis(args.plot_isoc + '_lewis', pipeline, dataset)
if args.plot_lock is not None:
from androcmd.baselineexp import plot_lockfile
plot_lockfile(args.plot_lock, pipeline)
def parse_args():
parser = argparse.ArgumentParser(
description="Model a brick with differential old/young dust.")
parser.add_argument('model_name')
parser.add_argument('brick', type=int)
parser.add_argument('--fit',
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'],
default=None)
parser.add_argument('--pipeline',
choices=['solarz', 'threez'],
default='solarz')
parser.add_argument('--plot-hess', default=None)
parser.add_argument('--plot-diff', default=None)
parser.add_argument('--plot-sfh', default=None)
parser.add_argument('--chi-table', default=None)
parser.add_argument('--plot-zsfh', nargs='*', default=None,
choices=['lewis', 'acs_rgb', 'acs_all',
'oir_all', 'ir_rgb'])
parser.add_argument('--plot-isoc', default=None)
parser.add_argument('--plot-lock', default=None)
return parser.parse_args()
if __name__ == '__main__':
main()
| mit | -8,070,963,336,533,866,000 | 177,485,913,798,454,820 | 35.484848 | 78 | 0.623477 | false |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/stats/_stats_mstats_common.py | 12 | 8157 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit | -3,471,896,539,351,905,000 | -5,758,386,771,509,563,000 | 33.129707 | 86 | 0.578031 | false |
lhellebr/spacewalk | backend/server/rhnLib.py | 1 | 8211 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import hashlib
import string
import base64
import posixpath
from spacewalk.common.rhnLib import parseRPMName
from spacewalk.common.rhnLog import log_debug
from spacewalk.common.rhnException import rhnFault
# architecture work
from rhnMapping import check_package_arch
def computeSignature(*fields):
# Init the hash
m = hashlib.new('sha256')
for i in fields:
# use str(i) since some of the fields may be non-string
m.update(str(i))
return base64.encodestring(m.digest()).rstrip()
# 'n_n-n-v.v.v-r_r.r:e.ARCH.rpm' ---> [n,v,r,e,a]
def parseRPMFilename(pkgFilename):
"""
IN: Package Name: xxx-yyy-ver.ver.ver-rel.rel_rel:e.ARCH.rpm (string)
Understood rules:
o Name can have nearly any char, but end in a - (well seperated by).
Any character; may include - as well.
o Version cannot have a -, but ends in one.
o Release should be an actual number, and can't have any -'s.
o Release can include the Epoch, e.g.: 2:4 (4 is the epoch)
o Epoch: Can include anything except a - and the : seperator???
XXX: Is epoch info above correct?
OUT: [n,e,v,r, arch].
"""
if type(pkgFilename) != type(''):
raise rhnFault(21, str(pkgFilename)) # Invalid arg.
pkgFilename = os.path.basename(pkgFilename)
# Check that this is a package NAME (with arch.rpm) and strip
# that crap off.
pkg = string.split(pkgFilename, '.')
# 'rpm' at end?
if string.lower(pkg[-1]) not in ['rpm', 'deb']:
raise rhnFault(21, 'neither an rpm nor a deb package name: %s' % pkgFilename)
# Valid architecture next?
if check_package_arch(pkg[-2]) is None:
raise rhnFault(21, 'Incompatible architecture found: %s' % pkg[-2])
_arch = pkg[-2]
# Nuke that arch.rpm.
pkg = string.join(pkg[:-2], '.')
ret = list(parseRPMName(pkg))
if ret:
ret.append(_arch)
return ret
# XXX TBD where to place this function - it has to be accessible from several
# places
def normalize_server_arch(arch):
log_debug(4, 'server arch', arch)
if arch is None:
return ''
arch = str(arch)
if '-' in arch:
# Already normalized
return arch
# Fix the arch if need be
suffix = '-redhat-linux'
arch = arch + suffix
return arch
class InvalidAction(Exception):
""" An error class to signal when we can not handle an action """
pass
class EmptyAction(Exception):
""" An error class that signals that we encountered an internal error
trying to handle an action through no fault of the client
"""
pass
class ShadowAction(Exception):
""" An error class for actions that should not get to the client """
pass
def transpose_to_hash(arr, column_names):
""" Handy function to transpose an array from row-based to column-based,
with named columns.
"""
result = []
for c in column_names:
result.append([])
colnum = len(column_names)
for r in arr:
if len(r) != colnum:
raise Exception(
"Mismatching number of columns: expected %s, got %s; %s" % (
colnum, len(r), r))
for i in range(len(r)):
result[i].append(r[i])
# Now build the hash labeled with the column names
rh = {}
for i in range(len(column_names)):
rh[column_names[i]] = result[i]
return rh
def get_package_path(nevra, org_id, source=0, prepend="", omit_epoch=None,
package_type='rpm', checksum_type=None, checksum=None):
""" Computes a package path, optionally prepending a prefix
The path will look like
<prefix>/<org_id>/checksum[:3]/n/e:v-r/a/checksum/n-v-r.a.rpm if not omit_epoch
<prefix>/<org_id>/checksum[:3]/n/v-r/a/checksum/n-v-r.a.rpm if omit_epoch
"""
name, epoch, version, release, pkgarch = nevra
# dirarch and pkgarch are special-cased for source rpms
if source:
dirarch = 'SRPMS'
else:
dirarch = pkgarch
if org_id in ['', None]:
org = "NULL"
else:
org = org_id
if not omit_epoch and epoch not in [None, '']:
version = str(epoch) + ':' + version
# normpath sanitizes the path (removing duplicated / and such)
template = os.path.normpath(prepend +
"/%s/%s/%s/%s-%s/%s/%s/%s-%s-%s.%s.%s")
return template % (org, checksum[:3], name, version, release, dirarch, checksum,
name, nevra[2], release, pkgarch, package_type)
# bug #161989
# It seems that our software was written specifically for rpms in far too many
# ways. Here's a little bit of a hack function that will return the package path
# (as in from get_package_path) but without the filename appended.
# This enables us to append an arbitrary file name that is not restricted to the
# form: name-version-release.arch.type
def get_package_path_without_package_name(nevra, org_id, prepend="",
checksum_type=None, checksum=None):
"""return a package path without the package name appended"""
return os.path.dirname(get_package_path(nevra, org_id, prepend=prepend,
checksum_type=checksum_type, checksum=checksum))
class CallableObj:
""" Generic callable object """
def __init__(self, name, func):
self.func = func
self.name = name
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
def make_evr(nvre, source=False):
""" IN: 'e:name-version-release' or 'name-version-release:e'
OUT: {'name':name, 'version':version, 'release':release, 'epoch':epoch }
"""
if ":" in nvre:
nvr, epoch = nvre.rsplit(":", 1)
if "-" in epoch:
nvr, epoch = epoch, nvr
else:
nvr, epoch = nvre, ""
nvr_parts = nvr.rsplit("-", 2)
if len(nvr_parts) != 3:
raise rhnFault(err_code=21, err_text="NVRE is missing name, version, or release.")
result = dict(zip(["name", "version", "release"], nvr_parts))
result["epoch"] = epoch
if source and result["release"].endswith(".src"):
result["release"] = result["release"][:-4]
return result
def _is_secure_path(path):
path = posixpath.normpath(path)
return not (path.startswith('/') or path.startswith('../'))
def get_crash_path(org_id, system_id, crash):
"""For a given org_id, system_id and crash, return relative path to a crash directory."""
path = os.path.join('systems', org_id, system_id, 'crashes', crash)
if _is_secure_path(path):
return path
else:
return None
def get_crashfile_path(org_id, system_id, crash, filename):
"""For a given org_id, system_id, crash and filename, return relative path to a crash file."""
path = os.path.join(get_crash_path(org_id, system_id, crash), filename)
if _is_secure_path(path):
return path
else:
return None
def get_action_path(org_id, system_id, action_id):
"""For a given org_id, system_id, and action_id, return relative path to a store directory."""
path = os.path.join('systems', str(org_id), str(system_id), 'actions', str(action_id))
if _is_secure_path(path):
return path
def get_actionfile_path(org_id, system_id, action_id, filename):
"""For a given org_id, system_id, action_id, and file, return relative path to a file."""
path = os.path.join(get_action_path(org_id, system_id, action_id), str(filename))
if _is_secure_path(path):
return path
| gpl-2.0 | -2,852,621,470,295,296,500 | -7,001,253,282,598,133,000 | 30.580769 | 98 | 0.629765 | false |
h4ck3rm1k3/MapNickAutotools | scons/scons-local-1.2.0/SCons/Tool/suncc.py | 12 | 1857 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 3842 2008/12/20 22:59:52 scons"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
| lgpl-2.1 | 7,729,526,750,858,599,000 | -864,985,073,822,736,000 | 34.711538 | 83 | 0.731287 | false |
SabunMacTavish/CTF-Platform | api/autogenerators/rtfm.py | 2 | 1943 | __author__ = "Collin Petty"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman"]
__email__ = ["[email protected]", "[email protected]"]
__status__ = "Production"
import tempfile
import os
import random
import string
template_file = "rtfm.txt"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - rtfm.py (autogen)"
if not os.path.exists(_template_path()):
print "ERROR - Read the Manual - Could not find the template file (%s)" % template_file
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
template = template.replace('###KEY###', key)
shift = random.randint(1, 26)
out_text = _caesar(template, shift)
output = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
output.write(out_text)
output.close()
return [os.path.abspath(output.name)], key, """<p>On the back of the broken panel you see a recovery\
<a href='###file_1_url###' target='_blank'>manual</a>. You need to find the emergency repair key in\
order to put the robot into <code>autoboot</code> mode, but it appears to be ciphered using a Caesar cipher.</p>"""
def _template_path():
return templates + template_file
def _caesar(text, shift):
ret = list()
for t in text:
t = ord(t)
if t in range(ord('a'), ord('z')+1):
ret.append(((t - ord('a') + shift) % 26) + ord('a'))
elif t in range(ord('A'), ord('Z')+1):
ret.append(((t - ord('A') + shift) % 26) + ord('A'))
elif t in range(ord('0'), ord('9')+1):
ret.append(((t - ord('0') + shift) % 10) + ord('0'))
else:
ret.append(t)
return string.joinfields(map(chr, ret), "") | mit | -3,637,362,875,687,387,600 | -1,112,045,032,608,212,700 | 34.345455 | 119 | 0.607823 | false |
esthermm/odoomrp-wip | stock_quant_valuation/models/stock_quant.py | 8 | 1040 | # -*- coding: utf-8 -*-
# (c) 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, api, models
from openerp.addons import decimal_precision as dp
class StockQuant(models.Model):
_inherit = 'stock.quant'
@api.multi
@api.depends("product_id", "product_id.manual_standard_cost", "qty")
def _compute_manual_value(self):
for record in self:
record.manual_value = (record.product_id.manual_standard_cost *
record.qty)
@api.multi
@api.depends('cost', 'qty')
def _compute_real_value(self):
for record in self:
record.real_value = record.cost * record.qty
manual_value = fields.Float(
string="Manual Value", store=True, compute="_compute_manual_value",
digits=dp.get_precision('Product Price'))
real_value = fields.Float(
string="Real Value", store=True, compute="_compute_real_value",
digits=dp.get_precision('Product Price'))
| agpl-3.0 | 946,208,045,718,555,600 | -453,815,907,046,304,500 | 32.548387 | 75 | 0.631731 | false |
jy723/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/names.py | 215 | 5223 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
PRN_SEPARATOR = '/'
import re
def normalize_package_context(package_context):
package_context = package_context.strip()
while package_context.endswith(PRN_SEPARATOR):
package_context = package_context[:-1]
return package_context
#######################################################################
# RESOURCE NAMES
# resource names refer to entities in a file system
def resource_name(res_pkg_name, name, my_pkg=None):
"""
Convert package name + resource into a fully qualified resource name
@param res_pkg_name: name of package resource is located in
@type res_pkg_name: str
@param name: resource base name
@type name: str
@param my_pkg: name of package resource is being referred to
in. If specified, name will be returned in local form if
res_pkg_name is my_pkg
@type my_pkg: str
@return: name for resource
@rtype: str
"""
if res_pkg_name != my_pkg:
return res_pkg_name+PRN_SEPARATOR+name
return name
def resource_name_base(name):
"""
pkg/typeName -> typeName, typeName -> typeName
Convert fully qualified resource name into the package-less resource name
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: resource name sans package-name scope
@rtype: str
"""
return name[name.rfind(PRN_SEPARATOR)+1:]
def resource_name_package(name):
"""
pkg/typeName -> pkg, typeName -> None
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: package name of resource
@rtype: str
"""
if not PRN_SEPARATOR in name:
return None
return name[:name.find(PRN_SEPARATOR)]
def package_resource_name(name):
"""
Split a name into its package and resource name parts, e.g. 'std_msgs/String -> std_msgs, String'
@param name: package resource name, e.g. 'std_msgs/String'
@type name: str
@return: package name, resource name
@rtype: str
@raise ValueError: if name is invalid
"""
if PRN_SEPARATOR in name:
val = tuple(name.split(PRN_SEPARATOR))
if len(val) != 2:
raise ValueError("invalid name [%s]"%name)
else:
return val
else:
return '', name
################################################################################
# NAME VALIDATORS
#ascii char followed by (alphanumeric, _, /)
RESOURCE_NAME_LEGAL_CHARS_P = re.compile('^[A-Za-z][\w_\/]*$')
def is_legal_resource_name(name):
"""
Check if name is a legal ROS name for filesystem resources
(alphabetical character followed by alphanumeric, underscore, or
forward slashes). This constraint is currently not being enforced,
but may start getting enforced in later versions of ROS.
@param name: Name
@type name: str
"""
# resource names can be unicode due to filesystem
if name is None:
return False
m = RESOURCE_NAME_LEGAL_CHARS_P.match(name)
# '//' check makes sure there isn't double-slashes
return m is not None and m.group(0) == name and not '//' in name
BASE_RESOURCE_NAME_LEGAL_CHARS_P = re.compile('^[A-Za-z][\w_]*$') #ascii char followed by (alphanumeric, _)
def is_legal_resource_base_name(name):
"""
Validates that name is a legal resource base name. A base name has
no package context, e.g. "String".
"""
# resource names can be unicode due to filesystem
if name is None:
return False
m = BASE_RESOURCE_NAME_LEGAL_CHARS_P.match(name)
return m is not None and m.group(0) == name
| gpl-3.0 | 238,071,106,978,954,560 | 4,239,515,901,800,762,000 | 35.02069 | 107 | 0.668773 | false |
INM-6/nest-git-migration | topology/examples/conncomp.py | 13 | 4213 | # -*- coding: utf-8 -*-
#
# conncomp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers with nodes composed of one pyramidal cell
and one interneuron. Connect with two projections, one pyr->pyr, one
pyr->in, and visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import pylab
pylab.ion()
import nest
import nest.topology as topo
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': ['pyr', 'in']})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'pyr'},
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.5,
'weights': 1.0,
'delays': 1.0})
topo.ConnectLayers(a, b, {'connection_type': 'divergent',
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'mask': {'circular': {'radius': 1.0}},
'kernel': 0.2,
'weights': 1.0,
'delays': 1.0})
pylab.clf()
# plot targets of neurons in different grid locations
for ctr in [[15,15]]:
# obtain node id for center: pick first node of composite
ctr_id = topo.GetElement(a, ctr)
# get all projection targets of center neuron
tgts = [ci[1] for ci in nest.GetConnections(ctr_id)]
# get positions of targets
tpyr = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if nest.GetStatus([n],'model')[0]=='pyr'])))
tin = pylab.array(tuple(zip(*[topo.GetPosition([n])[0] for n in tgts
if nest.GetStatus([n],'model')[0]=='in'])))
# scatter-plot
pylab.scatter(tpyr[0]-0.02, tpyr[1]-0.02, 20, 'b', zorder = 10)
pylab.scatter(tin[0] +0.02, tin[1] +0.02, 20, 'r', zorder = 10)
# mark locations with background grey circle
pylab.plot(tpyr[0],tpyr[1],'o',markerfacecolor=(0.7,0.7,0.7),
markersize=10,markeredgewidth=0,zorder=1,label='_nolegend_')
pylab.plot(tin[0], tin[1] ,'o',markerfacecolor=(0.7,0.7,0.7),
markersize=10,markeredgewidth=0,zorder=1,label='_nolegend_')
# mark sender position with transparent red circle
ctrpos = topo.GetPosition(ctr_id)[0]
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.15, zorder = 99,
fc = 'r', alpha = 0.4, ec = 'none'))
# mark mask positions with open red/blue circles
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.5, zorder = 2,
fc = 'none', ec = 'b', lw=3))
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=1.0, zorder = 2,
fc = 'none', ec = 'r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5,-1.5), 3.0, 3.0, zorder = 1,
fc = 'none', ec = 'k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-1.6, 1.6, -1.6, 1.6])
pylab.axes().set_aspect('equal', 'box')
| gpl-2.0 | 5,425,869,644,391,549,000 | -5,195,337,097,612,061,000 | 36.616071 | 75 | 0.569665 | false |
geomagpy/magpy | magpy/lib/format_dtu.py | 3 | 5567 | """
MagPy
Auxiliary input filter - WIC/WIK
Written by Roman Leonhardt June 2012
- contains test and read function, toDo: write function
"""
from __future__ import print_function
from magpy.stream import *
def isDTU1(filename):
"""
Checks whether a file is ASCII DTU (type1) format used within the DTU's FGE network
Characteristic features are:
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith('FILENAME: '):
elem = temp.split()
if len(elem) == 6:
try:
testtime = datetime.strptime(elem[0],"%H:%M:%S")
except:
return False
else:
return False
except:
return False
return True
def readDTU1(filename, headonly=False, **kwargs):
"""
Reading DTU1 format data.
Looks like:
FILENAME: GDH4_20091215.sec
INST. TYPE: Primary magnetometer
INSTRUMENT: FGE S0120 E0192
FILTER: Electronic lowpass
ADC: ICP 7017 vers. B2.3
SOFTWARE: FG_ComData vers. 3.04
CHANNELS: 6 Time,x,y,z,T1,T2
TIME 1 hh:mm:ss PC clock, UT, timeserver
x 400 nT/V variation horizontal magnetic north in nT
y 400 nT/V variation horizontal magnetic east in nT
z 400 nT/V variation vertical in nT
T1 0 Kelvin/v no temp sensor on pendulum
T2 320 Kelvin/V electronic temp in Kelvin, sensor: AD592
DATA:
00:00:01 124.04 134.08 -17.68 0.00 291.90
00:00:02 124.00 134.00 -17.68 0.00 291.90
00:00:03 124.08 134.00 -17.64 0.00 291.90
"""
fh = open(filename, 'rt')
# read file and split text into channels
data = []
getfile = True
key = None
stream = DataStream()
# Check whether header infromation is already present
headers = {}
# get day from filename (platform independent)
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
daystring = daystring[0].split('_')
print(daystring[1])
try:
day = datetime.strftime(datetime.strptime(daystring[1] , "%Y%m%d"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
return []
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
elem = line.split()
if line.isspace():
# blank line
pass
elif line.startswith('FILENAME:'):
pass
elif line.startswith('INST. TYPE:'):
tmp = line.split(':')[1]
headers['InstrumentType'] = tmp.lstrip()
elif line.startswith('INSTRUMENT:'):
tmp = line.split(':')[1]
headers['Instrument'] = tmp.lstrip()
elif line.startswith('FILTER:'):
tmp = line.split(':')[1]
headers['Filter'] = tmp.lstrip()
elif line.startswith('ADC:'):
tmp = line.split(':')[1]
headers['ADC'] = tmp.lstrip()
elif line.startswith('SOFTWARE:'):
tmp = line.split(':')[1]
headers['Software'] = tmp.lstrip()
elif line.startswith('CHANNELS:'):
tmp = line.split(':')[1]
headers['Channels'] = tmp.lstrip()
elif line.startswith('TIME'):
pass
elif line.startswith('x'):
pass
elif line.startswith('y'):
pass
elif line.startswith('z'):
pass
elif line.startswith('T1'):
pass
elif line.startswith('T2'):
pass
elif line.startswith('DATA:'):
pass
elif headonly:
# skip data for option headonly
continue
else:
row = LineStruct()
try:
row.time=date2num(datetime.strptime(day+'T'+elem[0],"%Y-%m-%dT%H:%M:%S"))
try:
row.x = float(elem[1])
except:
row.x = float('nan')
try:
row.y = float(elem[2])
except:
row.y = float('nan')
try:
row.z = float(elem[3])
except:
row.z = float('nan')
try:
row.t1 = float(elem[4])
except:
row.t1 = float('nan')
try:
row.t2 = float(elem[5])
except:
row.t2 = float('nan')
except:
#raise ValueError, "Wrong date format in %s" % filename
pass
stream.add(row)
fh.close()
else:
headers = stream.header
stream =[]
return DataStream(stream, headers)
| bsd-3-clause | 6,606,087,506,888,581,000 | -6,244,753,752,108,859,000 | 32.536145 | 140 | 0.491827 | false |
m-r-hunt/invaders | enemies.py | 1 | 6646 | # Invaders
# Copyright (C) 2013 Maximilian Hunt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, random, pygame, projectiles, score_counter
class EnemySprite(pygame.sprite.Sprite):
# Class for one enemy invader.
def __init__(self, image, position, bullet_group):
# image: relative path to an image pygame can load
# position: (x, y) coordinates on screen
# bullet_group: pygame.sprite.Group to put fired bullets in
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.position = position
self.rect = self.image.get_rect()
self.rect.center = position
self.bullet_group = bullet_group
def update(self, dv, score, collisions):
# Update this enemy. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to increment on death
# collisions: a dictionary of collisions, possibly containing this object
# Handle any collisions given
if self in collisions:
death = False
for bullet in collisions[self]:
if (bullet.origin != self):
bullet.kill()
death = True
if (death == True):
score.increment()
self.kill()
# Update position
self.position = (self.position[0] + dv[0], self.position[1] + dv[1])
self.rect.center = self.position
def y(self):
# Return height (y coordinate).
return self.position[1]
def fire(self):
# (Possibly) fire a bullet down.
if (random.randrange(100) < 2):
bounds = (0-100, 800+100, 0-100, 600+100)
bullet = projectiles.Bullet(os.path.join("Resources", "Enemy Bullet.png"), self.position, (0, 5), bounds, self)
self.bullet_group.add(bullet)
class EnemyColumn(pygame.sprite.Group):
# Class for one column in a formation of enemies.
# Exists so we can easily fire only the lowest enemy in each column
# Remembers its own x coordinate, everything else happens inside the actual enemies
def __init__(self, x_position):
# x_position: integer x coordinate
pygame.sprite.Group.__init__(self)
self.x_position = x_position
def update(self, dv, score, collisions):
# Update this column. Should be called once per frame.
# dv: (x, y) vector for movement this frame
# score: a Score to pass to contained EnemySprites
# collisions: a dictionary of collisions to pass to contained EnemySprites
# Return (x, y), x of this column and y of lowest contained Sprite.
self.x_position += dv[0]
# Update contained sprites
for i in self.sprites():
i.update(dv, score, collisions)
# Compute biggest y, ask that EnemySprite to fire.
max_y = 0
if (len(self) != 0):
for i in self.sprites():
if (i.y() > max_y):
max_y = i.y()
bottom_enemy = i
bottom_enemy.fire()
return self.x_position, max_y
class EnemyFormation(pygame.sprite.Group):
# Class for a whole formation of enemies.
# Contains both EnemyColumns and EnemySprites
# Magic numbers: Base speed stepped horizontally or vertically each frame.
H_STEP = 2
V_STEP = 10
def __init__(self, topleft, layout, bounds, bullet_group):
pygame.sprite.Group.__init__(self)
self.columns = []
columns, rows = layout
# Generate all the enemies and columns.
for i in range(0, columns):
column_x = topleft[0] + i*64
enemy_column = EnemyColumn(topleft[0] + i*64)
for j in range(0, rows):
new_enemy = EnemySprite(os.path.join("resources", "Enemy.png"), (column_x, topleft[1] + j*64), bullet_group)
enemy_column.add(new_enemy)
self.add(new_enemy)
self.columns.append(enemy_column)
# Direction: +1 for right, -1 for left (i.e. +-ve x direction)
self.current_direction = +1
self.left_bound, self.right_bound, self.bottom_bound = bounds
self.total = columns * rows
def update(self, score, collisions):
# Update this formation. Should be called once per frame.
# score: a Score to pass to contained EnemyColumns
# collisions: a dictionary of collisions to pass to contained EnemyColumns
# Returns (bool, bool). First is True if this formation is still in a good state, False if it needs resetting.
# Second is True if this is because it's now empty, False if it has reached the bottom of the screen.
direction_change = too_low = False
# Compute factor to move faster when we have fewer remaining members.
scale = int(float(self.total)/float(len(self)))
# Update columns
for i in self.columns:
x, y = i.update((scale*self.current_direction*self.H_STEP, 0), score, collisions)
# Remove empty columns
if (len(i.sprites()) == 0):
self.columns.remove(i)
# Notice if we've gone too low
elif (y > self.bottom_bound):
too_low = True
# Remember to change direction when we reach screen edges
elif (x < self.left_bound or x > self.right_bound):
direction_change = True
# Indicate we're empty
if (len(self.columns) == 0):
return False, True
# Indicate we reached the bottom of the screen.
elif too_low:
return False, False
# Drop down and change direction
elif direction_change:
self.current_direction *= -1
for i in self.columns:
i.update((scale*self.current_direction*self.H_STEP, self.V_STEP), score, [])
# If we made it here, everything's fine.
return True, True | gpl-2.0 | 800,489,110,435,444,500 | 3,835,152,158,573,891,000 | 41.06962 | 124 | 0.614354 | false |
MERegistro/meregistro | django/contrib/admin/templatetags/admin_list.py | 43 | 12835 | import datetime
from django.conf import settings
from django.contrib.admin.util import lookup_field, display_for_field, label_for_field
from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE
from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
register = Library()
DOT = '.'
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
paginator_number = register.simple_tag(paginator_number)
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
pagination = register.inclusion_tag('admin/pagination.html')(pagination)
def result_headers(cl):
"""
Generates the list column headers.
"""
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
header, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": header,
"class_attrib": mark_safe(' class="action-checkbox-column"')
}
continue
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
admin_order_field = None
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {
"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" />' % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField) or isinstance(f, models.TimeField):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if form and field_name in form.fields:
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
def result_list(cl):
"""
Displays the headers and data list together
"""
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': list(result_headers(cl)),
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(result_list)
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(date_hierarchy)
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
search_form = register.inclusion_tag('admin/search_form.html')(search_form)
def admin_list_filter(cl, spec):
return {'title': spec.title(), 'choices' : list(spec.choices(cl))}
admin_list_filter = register.inclusion_tag('admin/filter.html')(admin_list_filter)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
admin_actions = register.inclusion_tag("admin/actions.html", takes_context=True)(admin_actions)
| bsd-3-clause | 3,904,319,418,195,696,000 | -4,800,956,014,185,393,000 | 40.672078 | 198 | 0.564316 | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/datasets/tests/test_samples_generator.py | 3 | 7262 | import numpy as np
from numpy.testing import assert_equal, assert_approx_equal, \
assert_array_almost_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_less
from .. import make_classification
from .. import make_multilabel_classification
from .. import make_hastie_10_2
from .. import make_regression
from .. import make_blobs
from .. import make_friedman1
from .. import make_friedman2
from .. import make_friedman3
from .. import make_low_rank_matrix
from .. import make_sparse_coded_signal
from .. import make_sparse_uncorrelated
from .. import make_spd_matrix
from .. import make_swiss_roll
from .. import make_s_curve
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_multilabel_classification():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_approx_equal(np.std(y - np.dot(X, c)), 1.0, significant=2)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_approx_equal(np.std(y - np.dot(X, c)), 1.0, significant=2)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, 10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_equal(X[:, 0], t * np.cos(t))
assert_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_equal(X[:, 0], np.sin(t))
assert_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
| agpl-3.0 | -7,246,612,269,710,789,000 | 4,919,047,317,279,834,000 | 37.62766 | 79 | 0.569127 | false |
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver | configs/sim/gscreen_custom/gscreen_handler.py | 25 | 4194 | # This is a handler file for using Gscreen's infrastructure
# to load a completely custom glade screen
# The only things that really matters is that it's saved as a GTK builder project,
# the toplevel window is caller window1 (The default name) and you connect a destroy
# window signal else you can't close down linuxcnc
class HandlerClass:
# this will be pretty standard to gain access to everything
# emc is for control and status of linuxcnc
# data is important data from gscreen and linuxcnc
# widgets is all the widgets from the glade files
# gscreen is for access to gscreens methods
#
# we added setting the gremlin DRO on from the startup,
# a global variable for the number of key presses,
# and make only the active axis buttons visible
def __init__(self, halcomp,builder,useropts,gscreen):
self.emc = gscreen.emc
self.data = gscreen.data
self.widgets = gscreen.widgets
self.gscreen = gscreen
self.nhits = 0
self.widgets.gremlin.set_property('enable_dro',True)
for i in ("x","y","z","a","b","c","u","v","w","s"):
if i in self.data.axis_list:
self.widgets["axis_%s"%i].set_visible(True)
self.widgets.offsetpage1.set_row_visible("1",False)
# This is a new method for a couple of widgets we added callbacks to.
# The argument 'widget' is a reference to the actual widget that called.
# In this way we can use this method on a bunch of widgets without knowing
# their name ahead of time.
def on_button_press(self,widget,data=None):
global nhits
self.nhits += 1
widget.set_label("hits: %d" % self.nhits)
# This method is overriden from gscreen
# We selected this method name in the glade file as a callback.
# Since this method name is the same as one in gscreen,
# gscreen won't connect a callback to it's method.
# Meaning this is the only one called.
def on_estop_clicked(self,*args):
print "estop"
if self.data.estopped:
self.emc.estop_reset(1)
else:
self.emc.machine_off(1)
self.emc.estop(1)
self.widgets.on_label.set_text("Machine Off")
return True
# This is a new method for our new button
# we selected this method name in the glade file as a callback
def on_machine_state_clicked(self,*args):
if self.data.estopped:
return
elif not self.data.machine_on:
self.emc.machine_on(1)
self.widgets.on_label.set_text("Machine On")
else:
self.emc.machine_off(1)
self.widgets.on_label.set_text("Machine Off")
# here we override gscreen's method of hiding the cursor
# by writing a method with the same name that gscreen connects a signal to.
# and our new method in fact calls a sound method and then the hide cursor method
# that are both in gscreen
# So now we get a sound when we hide and show the pointer
def on_hide_cursor(self,widget):
self.gscreen.audio.set_sound(self.data.alert_sound)
self.gscreen.audio.run()
self.gscreen.on_hide_cursor(None)
# every 100 milli seconds this gets called
# we add calls to the regular functions for the widgets we are using.
# and add any extra calls/code
def periodic(self):
self.gscreen.update_mdi_spindle_button()
self.gscreen.update_spindle_bar()
self.gscreen.update_active_gcodes()
self.gscreen.update_active_mcodes()
self.gscreen.update_aux_coolant_pins()
self.gscreen.update_feed_speed_label()
self.gscreen.update_tool_label()
self.gscreen.update_coolant_leds()
self.gscreen.update_estop_led()
self.gscreen.update_machine_on_led()
self.gscreen.update_limit_override()
self.gscreen.update_override_label()
self.gscreen.update_jog_rate_label()
self.gscreen.update_mode_label()
self.gscreen.update_units_button_label()
def get_handlers(halcomp,builder,useropts,gscreen):
return [HandlerClass(halcomp,builder,useropts,gscreen)]
| gpl-2.0 | 7,664,636,657,313,096,000 | 4,838,285,967,001,660,000 | 42.237113 | 85 | 0.653791 | false |
thaihungle/deepexp | rare-mann/mimic_gen.py | 1 | 5981 | import numpy as np
import os
import random
import pickle
class MimicDataLoader(object):
def __init__(self, data_folder, batch_size=1, max_sequence=10, max_iter=None, split = 0.75, train_keep=1):
super(MimicDataLoader, self).__init__()
self.data_folder = data_folder
self.batch_size = batch_size
self.num_step = max_sequence
self.max_iter = max_iter
self.num_iter = 0
self.input_map=pickle.load(open(data_folder+'/dig_map.pkl','rb'))
self.ouput_map = pickle.load(open(data_folder + '/proc_map.pkl', 'rb'))
self.all_input = pickle.load(open(data_folder+'/dig_input.pkl','rb'))
self.all_output = pickle.load(open(data_folder + '/proc_output.pkl', 'rb'))
self.output_size = self.all_output.shape[1]
if len(np.shape(self.all_output))>1:
self.all_output = np.argmax(self.all_output, axis=1)
print(self.all_output[:10])
print(self.all_output.shape)
self.num_samples=self.all_input.shape[0]
print('num samples {}'.format(self.num_samples))
lindex=list(range(self.num_samples))
# random.shuffle(lindex)
self.train_data_indexes = lindex[:int(self.num_samples*split*train_keep)]
self.test_data_indexes = lindex[int(self.num_samples*split):]
self.is_training=True
self.data_offset=0
self.input_size=self.all_input.shape[1]
print('num train samples: {}'.format(len(self.train_data_indexes)))
print('train index: {} ...'.format(self.train_data_indexes[:10]))
print('num test samples: {}'.format(len(self.test_data_indexes)))
print('test index: {} ...'.format(self.test_data_indexes[:10]))
print('num classes: {}'.format(self.output_size))
print('num steps per episode: {}'.format(self.num_step))
print('batch size: {}'.format(self.batch_size))
def fetch_all(self):
train_x=[]
train_y=[]
test_x=[]
test_y=[]
for ind in self.train_data_indexes:
train_x.append(self.all_input[ind])
train_y.append(self.all_output[ind])
for ind in self.test_data_indexes:
test_x.append(self.all_input[ind])
test_y.append(self.all_output[ind])
return np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
def fetch_batch(self, is_training=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
indexes = np.zeros((self.batch_size, self.num_step), dtype=np.int32)
for i in range(self.batch_size):
indexes[i, :] = np.random.choice(len(list_index), self.num_step, replace=False)
# print('-------------')
# print(indexes[:10])
all_inputs=[]
all_outputs=[]
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
def fetch_batch_full(self, is_training, is_rand=True):
if is_training:
list_index=self.train_data_indexes
else:
list_index=self.test_data_indexes
num_t = len(list_index)
indexes = np.zeros((self.batch_size, num_t),dtype=np.int32)
for i in range(self.batch_size):
if is_rand:
indexes[i, :] = np.random.choice(len(list_index), num_t, replace=False)
else:
indexes[i, :] = np.asarray(list(range(len(list_index))))
# indexes = np.zeros((self.batch_size, num_t), dtype=np.int32)
# for i in range(self.batch_size):
# indexes[i,:]=np.arange(num_t)
all_inputs=[]
all_outputs=[]
for s in range(num_t):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b,:]=self.all_input[list_index[indexes[b,s]]]
all_inputs.append(example_inputs.astype('float32'))
example_outputs[b] = self.all_output[list_index[indexes[b, s]]]
all_outputs.append(example_outputs.astype('int32'))
return all_inputs, all_outputs
# indexes just have shape (batch,)
def predict_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
def predict_online_index2data(self, list_index, indexes):
all_inputs = []
all_outputs = []
for s in range(self.num_step):
example_inputs = np.zeros((self.batch_size, self.input_size))
example_outputs = np.zeros(self.batch_size)
for b in range(self.batch_size):
example_inputs[b, :] = self.all_input[list_index[indexes[b]]]
all_inputs.append(example_inputs)
example_outputs[b] = self.all_output[list_index[indexes[b]]]
all_outputs.append(example_outputs)
return all_inputs, all_outputs
| mit | -8,288,277,385,317,826,000 | 1,344,896,336,166,073,900 | 38.609272 | 110 | 0.585521 | false |
persandstrom/home-assistant | homeassistant/components/device_tracker/luci.py | 4 | 5240 | """
Support for OpenWRT (luci) routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.luci/
"""
import json
import logging
import re
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import HomeAssistantError
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_SSL)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean
})
class InvalidLuciTokenError(HomeAssistantError):
"""When an invalid token is detected."""
pass
def get_scanner(hass, config):
"""Validate the configuration and return a Luci scanner."""
scanner = LuciDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class LuciDeviceScanner(DeviceScanner):
"""This class queries a wireless router running OpenWrt firmware."""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
protocol = 'http' if not config[CONF_SSL] else 'https'
self.origin = '{}://{}'.format(protocol, host)
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.refresh_token()
self.mac2name = None
self.success_init = self.token is not None
def refresh_token(self):
"""Get a new token."""
self.token = _get_token(self.origin, self.username, self.password)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
url = '{}/cgi-bin/luci/rpc/uci'.format(self.origin)
result = _req_json_rpc(
url, 'get_all', 'dhcp', params={'auth': self.token})
if result:
hosts = [x for x in result.values()
if x['.type'] == 'host' and
'mac' in x and 'name' in x]
mac2name_list = [
(x['mac'].upper(), x['name']) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the Luci router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking ARP")
url = '{}/cgi-bin/luci/rpc/sys'.format(self.origin)
try:
result = _req_json_rpc(
url, 'net.arptable', params={'auth': self.token})
except InvalidLuciTokenError:
_LOGGER.info("Refreshing token")
self.refresh_token()
return False
if result:
self.last_results = []
for device_entry in result:
# Check if the Flags for each device contain
# NUD_REACHABLE and if so, add it to last_results
if int(device_entry['Flags'], 16) & 0x2:
self.last_results.append(device_entry['HW address'])
return True
return False
def _req_json_rpc(url, method, *args, **kwargs):
"""Perform one JSON RPC operation."""
data = json.dumps({'method': method, 'params': args})
try:
res = requests.post(url, data=data, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from luci")
return
try:
return result['result']
except KeyError:
_LOGGER.exception("No result in response from luci")
return
elif res.status_code == 401:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, check your username and password")
return
elif res.status_code == 403:
_LOGGER.error("Luci responded with a 403 Invalid token")
raise InvalidLuciTokenError
else:
_LOGGER.error("Invalid response from luci: %s", res)
def _get_token(origin, username, password):
"""Get authentication token for the given configuration."""
url = '{}/cgi-bin/luci/rpc/auth'.format(origin)
return _req_json_rpc(url, 'login', username, password)
| apache-2.0 | 3,111,002,906,260,314,000 | -3,889,482,701,693,561,000 | 31.546584 | 79 | 0.605916 | false |
abadger/ansible-modules-core | network/nxos/nxos_vxlan_vtep_vni.py | 19 | 19617 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
required: false
choices: ['true','false']
default: null
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
required: false
choices: ['bgp','static']
default: null
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
required: false
default: null
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
required: false
default: null
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
include_defaults:
description:
- Specify to use or not the complete running configuration
for module operations.
required: false
default: true
choices: ['true','true']
config:
description:
- Configuration string to be used for module operations. If not
specified, the module will use the current running configuration.
required: false
default: null
save:
description:
- Specify to save the running configuration after
module operations.
required: false
default: false
choices: ['true','false']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1",
"multicast_group": "", "peer_list": [],
"suppress_arp": false, "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = ['suppress_arp']
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
REGEX = re.compile(r'(?:interface nve)(?P<value>.*)$', re.M)
value = ''
if 'interface nve' in config:
value = 'nve{0}'.format(REGEX.search(config).group('value'))
return value
def get_custom_value(arg, config, module):
splitted_config = config.splitlines()
if arg == 'assoc_vrf':
value = False
if 'associate-vrf' in config:
value = True
elif arg == 'peer_list':
value = []
REGEX = re.compile(r'(?:peer-ip\s)(?P<peer_value>.*)$', re.M)
for line in splitted_config:
peer_value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line:
peer_value = REGEX.search(line).group('peer_value')
if peer_value:
value.append(peer_value)
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
custom = [
'assoc_vrf',
'peer_list'
]
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'associate-vrf' in temp_config:
parents.append('member vni {0} associate-vrf'.format(
module.params['vni']))
config = netcfg.get_section(parents)
elif 'member vni' in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
if arg in custom:
existing[arg] = get_custom_value(arg, config, module)
else:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if value:
commands.append(command)
else:
commands.append('no {0}'.format(command))
elif key == 'peer-ip' and value != 'default':
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replication_command = 'ingress-replication protocol static'
interface_command = 'interface {0}'.format(module.params['interface'])
if ingress_replication_command in commands:
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command, ingress_replication_command]
candidate.add(static_level_cmds, parents=parents)
commands = [cmd for cmd in commands if 'peer' not in cmd]
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str',
choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
if module.params['assoc_vrf']:
mutually_exclusive_params = ['multicast_group',
'suppress_arp',
'ingress_replication']
for param in mutually_exclusive_params:
if module.params[param]:
module.fail_json(msg='assoc_vrf cannot be used with '
'{0} param'.format(param))
if module.params['peer_list']:
if module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = map(str.strip, peer_list)
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = [
'assoc_vrf',
'interface',
'vni',
'ingress_replication',
'multicast_group',
'peer_list',
'suppress_arp'
]
existing, interface_exist = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not interface_exist:
WARNINGS.append("The proposed NVE interface does not exist. "
"Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on '
'the switch.')
elif (existing and state == 'absent' and
existing['vni'] != module.params['vni']):
module.fail_json(msg="ERROR: VNI delete failed: Could not find"
" vni node for {0}".format(
module.params['vni']),
existing_vni=existing['vni'])
else:
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state, interface_exist = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 568,410,750,692,418,300 | -6,755,775,550,954,640,000 | 32.533333 | 97 | 0.558903 | false |
openprocurement/restkit | restkit/filters.py | 2 | 3801 | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import base64
import re
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from urlparse import urlunparse
from restkit.oauth2 import Request, SignatureMethod_HMAC_SHA1
class BasicAuth(object):
""" Simple filter to manage basic authentification"""
def __init__(self, username, password):
self.credentials = (username, password)
def on_request(self, request):
encode = base64.b64encode("%s:%s" % self.credentials)
request.headers['Authorization'] = 'Basic %s' % encode
def validate_consumer(consumer):
""" validate a consumer agains oauth2.Consumer object """
if not hasattr(consumer, "key"):
raise ValueError("Invalid consumer.")
return consumer
def validate_token(token):
""" validate a token agains oauth2.Token object """
if token is not None and not hasattr(token, "key"):
raise ValueError("Invalid token.")
return token
class OAuthFilter(object):
""" oauth filter """
def __init__(self, path, consumer, token=None, method=None,
realm=""):
""" Init OAuthFilter
:param path: path or regexp. * mean all path on wicth oauth can be
applied.
:param consumer: oauth consumer, instance of oauth2.Consumer
:param token: oauth token, instance of oauth2.Token
:param method: oauth signature method
token and method signature are optionnals. Consumer should be an
instance of `oauth2.Consumer`, token an instance of `oauth2.Toke`
signature method an instance of `oauth2.SignatureMethod`.
"""
if path.endswith('*'):
self.match = re.compile("%s.*" % path.rsplit('*', 1)[0])
else:
self.match = re.compile("%s$" % path)
self.consumer = validate_consumer(consumer)
self.token = validate_token(token)
self.method = method or SignatureMethod_HMAC_SHA1()
self.realm = realm
def on_path(self, request):
path = request.parsed_url.path or "/"
return (self.match.match(path) is not None)
def on_request(self, request):
if not self.on_path(request):
return
params = {}
form = False
parsed_url = request.parsed_url
if request.body and request.body is not None:
ctype = request.headers.iget('content-type')
if ctype is not None and \
ctype.startswith('application/x-www-form-urlencoded'):
# we are in a form try to get oauth params from here
form = True
params = dict(parse_qsl(request.body))
# update params from quey parameters
params.update(parse_qsl(parsed_url.query))
raw_url = urlunparse((parsed_url.scheme, parsed_url.netloc,
parsed_url.path, '', '', ''))
oauth_req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=request.method,
http_url=raw_url, parameters=params,
is_form_encoded=form)
oauth_req.sign_request(self.method, self.consumer, self.token)
if form:
request.body = oauth_req.to_postdata()
request.headers['Content-Length'] = len(request.body)
elif request.method in ('GET', 'HEAD'):
request.original_url = request.url
request.url = oauth_req.to_url()
else:
oauth_headers = oauth_req.to_header(realm=self.realm)
request.headers.update(oauth_headers)
| apache-2.0 | -5,817,007,473,251,960,000 | -56,288,830,965,964,940 | 33.87156 | 75 | 0.598527 | false |
DxCx/nzbToMedia | libs/beets/ui/commands.py | 4 | 50834 | # This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides the default commands for beets' command-line
interface.
"""
from __future__ import print_function
import logging
import os
import time
import itertools
import codecs
import platform
import beets
from beets import ui
from beets.ui import print_, input_, decargs
from beets import autotag
from beets.autotag import recommendation
from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path
from beets.util.functemplate import Template
from beets import library
from beets import config
from beets.util.confit import _package_path
# Global logger.
log = logging.getLogger('beets')
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError('No matching albums found.')
elif not album and not items:
raise ui.UserError('No matching items found.')
return items, albums
# fields: Shows a list of available fields for queries and format strings.
fields_cmd = ui.Subcommand('fields',
help='show fields available for queries and format strings')
def fields_func(lib, opts, args):
def _print_rows(names):
print(" " + "\n ".join(names))
def _show_plugin_fields(album):
plugin_fields = []
for plugin in plugins.find_plugins():
if album:
fdict = plugin.album_template_fields
else:
fdict = plugin.template_fields
plugin_fields += fdict.keys()
if plugin_fields:
print("Template fields from plugins:")
_print_rows(plugin_fields)
print("Item fields:")
_print_rows(library.ITEM_KEYS)
_show_plugin_fields(False)
print("\nAlbum fields:")
_print_rows(library.ALBUM_KEYS)
_show_plugin_fields(True)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# import: Autotagger and importer.
VARIOUS_ARTISTS = u'Various Artists'
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
disambig = []
if info.data_source and info.data_source != 'MusicBrainz':
disambig.append(info.data_source)
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(unicode(info.year))
if info.country:
disambig.append(info.country)
if info.label:
disambig.append(info.label)
if info.albumdisambig:
disambig.append(info.albumdisambig)
if disambig:
return u', '.join(disambig)
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
out = '%.1f%%' % ((1 - dist) * 100)
if dist <= config['match']['strong_rec_thresh'].as_number():
out = ui.colorize('green', out)
elif dist <= config['match']['medium_rec_thresh'].as_number():
out = ui.colorize('yellow', out)
else:
out = ui.colorize('red', out)
return out
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace('album_', '')
key = key.replace('track_', '')
key = key.replace('_', ' ')
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ['...']
return ui.colorize('yellow', '(%s)' % ', '.join(penalties))
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
def show_album(artist, album):
if artist:
album_description = u' %s - %s' % (artist, album)
elif album:
album_description = u' %s' % album
else:
album_description = u' (unknown album)'
print_(album_description)
def format_index(track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums > 1:
return u'{0}-{1}'.format(medium, medium_index)
else:
return unicode(medium_index)
else:
return unicode(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
(cur_album != match.info.album and
match.info.album != VARIOUS_ARTISTS):
artist_l, artist_r = cur_artist or '', match.info.artist
album_l, album_r = cur_album or '', match.info.album
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = u'', u''
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
album_l, album_r = ui.colordiff(album_l, album_r)
print_("Correcting tags from:")
show_album(artist_l, album_l)
print_("To:")
show_album(artist_r, album_r)
else:
print_(u"Tagging:\n {0.artist} - {0.album}".format(match.info))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(info))
# Tracks.
pairs = match.mapping.items()
pairs.sort(key=lambda (_, track_info): track_info.index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains ``(lhs, rhs, width)`` tuples where `width` is the length (in
# characters) of the uncolorized LHS.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# Medium number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
media = match.info.media or 'Media'
if match.info.mediums > 1 and track_info.disctitle:
lhs = '%s %s: %s' % (media, track_info.medium,
track_info.disctitle)
elif match.info.mediums > 1:
lhs = '%s %s' % (media, track_info.medium)
elif track_info.disctitle:
lhs = '%s: %s' % (media, track_info.disctitle)
else:
lhs = None
if lhs:
lines.append((lhs, '', 0))
medium, disctitle = track_info.medium, track_info.disctitle
# Titles.
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
lhs, rhs = cur_title, new_title
else:
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
cur_track, new_track = format_index(item), format_index(track_info)
if cur_track != new_track:
if item.track in (track_info.index, track_info.medium_index):
color = 'lightgray'
else:
color = 'red'
if (cur_track + new_track).count('-') == 1:
lhs_track, rhs_track = ui.colorize(color, cur_track), \
ui.colorize(color, new_track)
else:
color = 'red'
lhs_track, rhs_track = ui.color_diff_suffix(cur_track,
new_track)
templ = ui.colorize(color, u' (#') + u'{0}' + \
ui.colorize(color, u')')
lhs += templ.format(lhs_track)
rhs += templ.format(rhs_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
lhs_length, rhs_length = ui.color_diff_suffix(cur_length,
new_length)
templ = ui.colorize('red', u' (') + u'{0}' + \
ui.colorize('red', u')')
lhs += templ.format(lhs_length)
rhs += templ.format(rhs_length)
lhs_width += len(cur_length) + 3
# Penalties.
penalties = penalty_string(match.distance.tracks[track_info])
if penalties:
rhs += ' %s' % penalties
if lhs != rhs:
lines.append((' * %s' % lhs, rhs, lhs_width))
elif config['import']['detail']:
lines.append((' * %s' % lhs, '', lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if not rhs:
print_(lhs)
elif max_width > col_width:
print_(u'%s ->\n %s' % (lhs, rhs))
else:
pad = max_width - lhs_width
print_(u'%s%s -> %s' % (lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
if match.extra_tracks:
print_('Missing tracks:')
for track_info in match.extra_tracks:
line = ' ! %s (#%s)' % (track_info.title, format_index(track_info))
if track_info.length:
line += ' (%s)' % ui.human_seconds_short(track_info.length)
print_(ui.colorize('yellow', line))
if match.extra_items:
print_('Unmatched tracks:')
for item in match.extra_items:
line = ' ! %s (#%s)' % (item.title, format_index(item))
if item.length:
line += ' (%s)' % ui.human_seconds_short(item.length)
print_(ui.colorize('yellow', line))
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
cur_artist, new_artist = item.artist, match.info.artist
cur_title, new_title = item.title, match.info.title
if cur_artist != new_artist or cur_title != new_title:
cur_artist, new_artist = ui.colordiff(cur_artist, new_artist)
cur_title, new_title = ui.colordiff(cur_title, new_title)
print_("Correcting track tags from:")
print_(" %s - %s" % (cur_artist, cur_title))
print_("To:")
print_(" %s - %s" % (new_artist, new_title))
else:
print_("Tagging track: %s - %s" % (cur_artist, cur_title))
# Data URL.
if match.info.data_url:
print_('URL:\n %s' % match.info.data_url)
# Info line.
info = []
# Similarity.
info.append('(Similarity: %s)' % dist_string(match.distance))
# Penalties.
penalties = penalty_string(match.distance)
if penalties:
info.append(penalties)
# Disambiguation.
disambig = disambig_string(match.info)
if disambig:
info.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(info))
def _summary_judment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return an action or None if the user should be
queried. May also print to the console if a summary judgment is
made.
"""
if config['import']['quiet']:
if rec == recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
})
elif rec == recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
'ask': None,
})
else:
return None
if action == importer.action.SKIP:
print_('Skipping.')
elif action == importer.action.ASIS:
print_('Importing as-is.')
return action
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
Returns the result of the choice, which may SKIP, ASIS, TRACKS, or
MANUAL or a candidate (an AlbumMatch/TrackMatch object).
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Zero candidates.
if not candidates:
if singleton:
print_("No matching recordings found.")
opts = ('Use as-is', 'Skip', 'Enter search', 'enter Id',
'aBort')
else:
print_("No matching release found for {0} tracks."
.format(itemcount))
print_('For help, see: '
'http://beets.readthedocs.org/en/latest/faq.html#nomatch')
opts = ('Use as-is', 'as Tracks', 'Group albums', 'Skip',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts)
if sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 's':
return importer.action.SKIP
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_(u'Finding tags for {0} "{1} - {2}".'.format(
u'track' if singleton else u'album',
item.artist if singleton else cur_artist,
item.title if singleton else cur_album,
))
print_(u'Candidates:')
for i, match in enumerate(candidates):
# Index, metadata, and distance.
line = [
u'{0}.'.format(i + 1),
u'{0} - {1}'.format(
match.info.artist,
match.info.title if singleton else match.info.album,
),
u'({0})'.format(dist_string(match.distance)),
]
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
line.append(penalties)
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
line.append(ui.colorize('lightgray', '(%s)' % disambig))
print_(' '.join(line))
# Ask the user for a choice.
if singleton:
opts = ('Skip', 'Use as-is', 'Enter search', 'enter Id',
'aBort')
else:
opts = ('Skip', 'Use as-is', 'as Tracks', 'Group albums',
'Enter search', 'enter Id', 'aBort')
sel = ui.input_options(opts, numrange=(1, len(candidates)))
if sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 'm':
pass
elif sel == 'e':
return importer.action.MANUAL
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
elif sel == 'g':
return importer.action.ALBUMS
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
if singleton:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'Enter search', 'enter Id', 'aBort')
else:
opts = ('Apply', 'More candidates', 'Skip', 'Use as-is',
'as Tracks', 'Group albums', 'Enter search', 'enter Id',
'aBort')
default = config['import']['default_action'].as_choice({
'apply': 'a',
'skip': 's',
'asis': 'u',
'none': None,
})
if default is None:
require = True
sel = ui.input_options(opts, require=require, default=default)
if sel == 'a':
return match
elif sel == 'g':
return importer.action.ALBUMS
elif sel == 's':
return importer.action.SKIP
elif sel == 'u':
return importer.action.ASIS
elif sel == 't':
assert not singleton
return importer.action.TRACKS
elif sel == 'e':
return importer.action.MANUAL
elif sel == 'b':
raise importer.ImportAbort()
elif sel == 'i':
return importer.action.MANUAL_ID
def manual_search(singleton):
"""Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_('Artist:')
name = input_('Track:' if singleton else 'Album:')
return artist.strip(), name.strip()
def manual_id(singleton):
"""Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = u'Enter {0} ID:'.format('recording' if singleton else 'release')
return input_(prompt).strip()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal.
"""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
print_(displayable_path(task.paths, u'\n') +
u' ({0} items)'.format(len(task.items)))
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
if action == importer.action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
candidates, rec = task.candidates, task.rec
while True:
# Ask for a choice from the user.
choice = choose_candidate(candidates, False, rec, task.cur_artist,
task.cur_album, itemcount=len(task.items))
# Choose which tags to use.
if choice in (importer.action.SKIP, importer.action.ASIS,
importer.action.TRACKS, importer.action.ALBUMS):
# Pass selection to main control flow.
return choice
elif choice is importer.action.MANUAL:
# Try again with manual search terms.
search_artist, search_album = manual_search(False)
_, _, candidates, rec = autotag.tag_album(
task.items, search_artist, search_album
)
elif choice is importer.action.MANUAL_ID:
# Try a manually-entered ID.
search_id = manual_id(False)
if search_id:
_, _, candidates, rec = autotag.tag_album(
task.items, search_id=search_id
)
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(task.item.path)
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
if action == importer.action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
# Ask for a choice.
choice = choose_candidate(candidates, True, rec, item=task.item)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice == importer.action.TRACKS:
assert False # TRACKS is only legal for albums.
elif choice == importer.action.MANUAL:
# Continue in the loop with a new set of candidates.
search_artist, search_title = manual_search(True)
candidates, rec = autotag.tag_item(task.item, search_artist,
search_title)
elif choice == importer.action.MANUAL_ID:
# Ask for a track ID.
search_id = manual_id(True)
if search_id:
candidates, rec = autotag.tag_item(task.item,
search_id=search_id)
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warn("This %s is already in the library!" %
("album" if task.is_album else "item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
log.info('Skipping.')
sel = 's'
else:
sel = ui.input_options(
('Skip new', 'Keep both', 'Remove old')
)
if sel == 's':
# Skip new.
task.set_choice(importer.action.SKIP)
elif sel == 'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == 'r':
# Remove old.
task.remove_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn(u"Import of the directory:\n{0}\n"
"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
# The import command.
def import_files(lib, paths, query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check the user-specified directories.
for path in paths:
fullpath = syspath(normpath(path))
if not config['import']['singletons'] and not os.path.isdir(fullpath):
raise ui.UserError(u'not a directory: {0}'.format(
displayable_path(path)))
elif config['import']['singletons'] and not os.path.exists(fullpath):
raise ui.UserError(u'no such file: {0}'.format(
displayable_path(path)))
# Check parameter consistency.
if config['import']['quiet'] and config['import']['timid']:
raise ui.UserError("can't be both quiet and timid")
# Open the log.
if config['import']['log'].get() is not None:
logpath = config['import']['log'].as_filename()
try:
logfile = codecs.open(syspath(logpath), 'a', 'utf8')
except IOError:
raise ui.UserError(u"could not open log file for writing: %s" %
displayable_path(logpath))
print(u'import started', time.asctime(), file=logfile)
else:
logfile = None
# Never ask for input in quiet mode.
if config['import']['resume'].get() == 'ask' and \
config['import']['quiet']:
config['import']['resume'] = False
session = TerminalImportSession(lib, logfile, paths, query)
try:
session.run()
finally:
# If we were logging, close the file.
if logfile:
print(u'', file=logfile)
logfile.close()
# Emit event.
plugins.send('import', lib=lib, paths=paths)
import_cmd = ui.Subcommand('import', help='import new music',
aliases=('imp', 'im'))
import_cmd.parser.add_option('-c', '--copy', action='store_true',
default=None, help="copy tracks into library directory (default)")
import_cmd.parser.add_option('-C', '--nocopy', action='store_false',
dest='copy', help="don't copy tracks (opposite of -c)")
import_cmd.parser.add_option('-w', '--write', action='store_true',
default=None, help="write new metadata to files' tags (default)")
import_cmd.parser.add_option('-W', '--nowrite', action='store_false',
dest='write', help="don't write metadata (opposite of -w)")
import_cmd.parser.add_option('-a', '--autotag', action='store_true',
dest='autotag', help="infer tags for imported files (default)")
import_cmd.parser.add_option('-A', '--noautotag', action='store_false',
dest='autotag',
help="don't infer tags for imported files (opposite of -a)")
import_cmd.parser.add_option('-p', '--resume', action='store_true',
default=None, help="resume importing if interrupted")
import_cmd.parser.add_option('-P', '--noresume', action='store_false',
dest='resume', help="do not try to resume importing")
import_cmd.parser.add_option('-q', '--quiet', action='store_true',
dest='quiet', help="never prompt for input: skip albums instead")
import_cmd.parser.add_option('-l', '--log', dest='log',
help='file to log untaggable albums for later review')
import_cmd.parser.add_option('-s', '--singletons', action='store_true',
help='import individual tracks instead of full albums')
import_cmd.parser.add_option('-t', '--timid', dest='timid',
action='store_true', help='always confirm all actions')
import_cmd.parser.add_option('-L', '--library', dest='library',
action='store_true', help='retag items matching a query')
import_cmd.parser.add_option('-i', '--incremental', dest='incremental',
action='store_true', help='skip already-imported directories')
import_cmd.parser.add_option('-I', '--noincremental', dest='incremental',
action='store_false', help='do not skip already-imported directories')
import_cmd.parser.add_option('--flat', dest='flat',
action='store_true', help='import an entire tree as a single album')
import_cmd.parser.add_option('-g', '--group-albums', dest='group_albums',
action='store_true', help='group tracks in a folder into seperate albums')
def import_func(lib, opts, args):
config['import'].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config['import']['move'] = False
if opts.library:
query = decargs(args)
paths = []
else:
query = None
paths = args
if not paths:
raise ui.UserError('no path specified')
import_files(lib, paths, query)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
tmpl = Template(ui._pick_format(album, fmt))
if album:
for album in lib.albums(query):
ui.print_obj(album, lib, tmpl)
else:
for item in lib.items(query):
ui.print_obj(item, lib, tmpl)
list_cmd = ui.Subcommand('list', help='query the library', aliases=('ls',))
list_cmd.parser.add_option('-a', '--album', action='store_true',
help='show matching albums instead of tracks')
list_cmd.parser.add_option('-p', '--path', action='store_true',
help='print paths for matched items or albums')
list_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def list_func(lib, opts, args):
if opts.path:
fmt = '$path'
else:
fmt = opts.format
list_items(lib, decargs(args), opts.album, fmt)
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
"""
with lib.transaction():
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
ui.print_obj(item, lib)
ui.print_(ui.colorize('red', u' deleted'))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(u'skipping %s because mtime is up to date (%i)' %
(displayable_path(item.path), item.mtime))
continue
# Read new data.
try:
item.read()
except Exception as exc:
log.error(u'error reading {0}: {1}'.format(
displayable_path(item.path), exc))
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard('albumartist')
# Check for and display changes.
changed = ui.show_model_changes(item,
fields=library.ITEM_KEYS_META)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move()
item.store()
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store()
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug('emptied album %i' % album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.ALBUM_KEYS_ITEM:
album[key] = first_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug('moving album %i' % album_id)
album.move()
update_cmd = ui.Subcommand('update',
help='update the library', aliases=('upd','up',))
update_cmd.parser.add_option('-a', '--album', action='store_true',
help='match albums instead of tracks')
update_cmd.parser.add_option('-M', '--nomove', action='store_false',
default=True, dest='move', help="don't move files in library")
update_cmd.parser.add_option('-p', '--pretend', action='store_true',
help="show all changes but do nothing")
update_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, opts.move, opts.pretend)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
# Show all the items.
for item in items:
ui.print_obj(item, lib)
# Confirm with user.
print_()
if delete:
prompt = 'Really DELETE %i files (y/n)?' % len(items)
else:
prompt = 'Really remove %i items from the library (y/n)?' % \
len(items)
if not ui.input_yn(prompt, True):
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in (albums if album else items):
obj.remove(delete)
remove_cmd = ui.Subcommand('remove',
help='remove matching items from the library', aliases=('rm',))
remove_cmd.parser.add_option("-d", "--delete", action="store_true",
help="also remove files from disk")
remove_cmd.parser.add_option('-a', '--album', action='store_true',
help='match albums instead of tracks')
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete)
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
for item in items:
if exact:
total_size += os.path.getsize(item.path)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
albums.add(item.album)
size_str = '' + ui.human_bytes(total_size)
if exact:
size_str += ' ({0} bytes)'.format(total_size)
print_("""Tracks: {0}
Total time: {1} ({2:.2f} seconds)
Total size: {3}
Artists: {4}
Albums: {5}""".format(total_items, ui.human_seconds(total_time), total_time,
size_str, len(artists), len(albums)))
stats_cmd = ui.Subcommand('stats',
help='show statistics about the library or a query')
stats_cmd.parser.add_option('-e', '--exact', action='store_true',
help='get exact file sizes')
def stats_func(lib, opts, args):
show_stats(lib, decargs(args), opts.exact)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_('beets version %s' % beets.__version__)
# Show plugins.
names = [p.name for p in plugins.find_plugins()]
if names:
print_('plugins:', ', '.join(names))
else:
print_('no plugins loaded')
version_cmd = ui.Subcommand('version',
help='output version information')
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm):
"""Modifies matching items according to key=value assignments."""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
fsets = {}
for mod in mods:
key, value = mod.split('=', 1)
fsets[key] = model_cls._parse(key, value)
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_('Modifying %i %ss.' % (len(objs), 'album' if album else 'item'))
changed = set()
for obj in objs:
for field, value in fsets.iteritems():
obj[field] = value
for field in dels:
del obj[field]
if ui.show_model_changes(obj):
changed.add(obj)
# Still something to do?
if not changed:
print_('No changes to make.')
return
# Confirm action.
if confirm:
extra = ' and write tags' if write else ''
if not ui.input_yn('Really modify%s (Y/n)?' % extra):
return
# Apply changes to database.
with lib.transaction():
for obj in changed:
if move:
cur_path = obj.path
if lib.directory in ancestry(cur_path): # In library?
log.debug('moving object %s' % cur_path)
obj.move()
obj.store()
# Apply tags if requested.
if write:
if album:
changed_items = itertools.chain(*(a.items() for a in changed))
else:
changed_items = changed
for item in changed_items:
try:
item.write()
except library.FileOperationError as exc:
log.error(exc)
modify_cmd = ui.Subcommand('modify',
help='change metadata fields', aliases=('mod',))
modify_cmd.parser.add_option('-M', '--nomove', action='store_false',
default=True, dest='move', help="don't move files in library")
modify_cmd.parser.add_option('-w', '--write', action='store_true',
default=None, help="write new metadata to files' tags (default)")
modify_cmd.parser.add_option('-W', '--nowrite', action='store_false',
dest='write', help="don't write metadata (opposite of -w)")
modify_cmd.parser.add_option('-a', '--album', action='store_true',
help='modify whole albums instead of tracks')
modify_cmd.parser.add_option('-y', '--yes', action='store_true',
help='skip confirmation')
modify_cmd.parser.add_option('-f', '--format', action='store',
help='print with custom format', default=None)
def modify_func(lib, opts, args):
args = decargs(args)
mods = []
dels = []
query = []
for arg in args:
if arg.endswith('!') and '=' not in arg and ':' not in arg:
dels.append(arg[:-1])
elif '=' in arg:
mods.append(arg)
else:
query.append(arg)
if not mods and not dels:
raise ui.UserError('no modifications specified')
write = opts.write if opts.write is not None else \
config['import']['write'].get(bool)
modify_items(lib, mods, dels, query, write, opts.move, opts.album,
not opts.yes)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
action = 'Copying' if copy else 'Moving'
entity = 'album' if album else 'item'
log.info('%s %i %ss.' % (action, len(objs), entity))
for obj in objs:
log.debug('moving: %s' % obj.path)
obj.move(copy, basedir=dest)
obj.store()
move_cmd = ui.Subcommand('move',
help='move or copy items', aliases=('mv',))
move_cmd.parser.add_option('-d', '--dest', metavar='DIR', dest='dest',
help='destination directory')
move_cmd.parser.add_option('-c', '--copy', default=False, action='store_true',
help='copy instead of moving')
move_cmd.parser.add_option('-a', '--album', default=False, action='store_true',
help='match whole albums instead of tracks')
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(dest):
raise ui.UserError('no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album)
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info(u'missing file: {0}'.format(
util.displayable_path(item.path)
))
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except Exception as exc:
log.error(u'error reading {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
# Check for and display changes.
changed = ui.show_model_changes(item, clean_item,
library.ITEM_KEYS_WRITABLE, always=True)
if changed and not pretend:
try:
item.write()
except library.FileOperationError as exc:
log.error(exc)
write_cmd = ui.Subcommand('write', help='write tag information to files')
write_cmd.parser.add_option('-p', '--pretend', action='store_true',
help="show all changes but do nothing")
def write_func(lib, opts, args):
write_items(lib, decargs(args), opts.pretend)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
config_cmd = ui.Subcommand('config',
help='show or edit the user configuration')
config_cmd.parser.add_option('-p', '--paths', action='store_true',
help='show files that configuration was loaded from')
config_cmd.parser.add_option('-e', '--edit', action='store_true',
help='edit user configuration with $EDITOR')
config_cmd.parser.add_option('-d', '--defaults', action='store_true',
help='include the default configuration')
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print(filename)
# Open in editor.
elif opts.edit:
path = config.user_config_path()
if 'EDITOR' in os.environ:
editor = os.environ['EDITOR']
args = [editor, editor, path]
elif platform.system() == 'Darwin':
args = ['open', 'open', '-n', path]
elif platform.system() == 'Windows':
# On windows we can execute arbitrary files. The os will
# take care of starting an appropriate application
args = [path, path]
else:
# Assume Unix
args = ['xdg-open', 'xdg-open', path]
try:
os.execlp(*args)
except OSError:
raise ui.UserError("Could not edit configuration. Please"
"set the EDITOR environment variable.")
# Dump configuration.
else:
print(config.dump(full=opts.defaults))
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
completion_cmd = ui.Subcommand('completion',
help='print shell script that provides command line completion')
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print(line, end='')
if not (os.path.isfile(u'/etc/bash_completion') or
os.path.isfile(u'/usr/share/bash-completion/bash_completion') or
os.path.isfile(u'/usr/share/local/bash-completion/bash_completion')):
log.warn(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.')
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh')
with open(base_script, 'r') as base_script:
yield base_script.read()
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
aliases[alias] = name
options[name] = {'flags': [], 'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = 'flags'
else:
option_type = 'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options['_global'] = {
'flags': ['-v', '--verbose'],
'opts': '-l --library -c --config -d --directory -h --help'.split(' ')
}
# Help subcommand
command_names.append('help')
# Add flags common to all commands
options['_common'] = {
'flags': ['-h', '--help']
}
# Start generating the script
yield "_beet() {\n"
# Command names
yield " local commands='%s'\n" % ' '.join(command_names)
yield "\n"
# Command aliases
yield " local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield " local alias__%s=%s\n" % (alias, cmd)
yield '\n'
# Fields
yield " fields='%s'\n" % ' '.join(
set(library.ITEM_KEYS + library.ALBUM_KEYS))
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = ' '.join(option_list)
yield " local %s__%s='%s'\n" % (option_type, cmd, option_list)
yield ' _beet_dispatch\n'
yield '}\n'
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
| gpl-3.0 | -5,471,255,060,609,797,000 | -7,511,111,609,609,885,000 | 34.925088 | 80 | 0.576189 | false |
arunhotra/tensorflow | tensorflow/python/ops/sparse_ops_test.py | 5 | 7592 | """Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, types.int64))
def _SparseTensor_2x3x4(self, dtype):
ind = np.array([
[0, 0, 1],
[0, 1, 0], [0, 1, 2],
[1, 0, 3],
[1, 1, 1], [1, 1, 3],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, types.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(types.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(types.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(types.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12),
(1, 0, 103), (1, 1, 111), (1, 1, 113), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def _SparseTensor_String5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.string),
constant_op.constant(shape, types.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, ["a", "b", "c", "d", "", "e", "f", ""])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
if __name__ == "__main__":
googletest.main()
| apache-2.0 | -9,150,306,850,168,802,000 | -597,715,569,460,079,700 | 34.148148 | 80 | 0.597998 | false |
boppreh/keyboard | setup.py | 1 | 1333 | """
Usage instructions:
- If you are installing: `python setup.py install`
- If you are developing: `python setup.py sdist --format=zip bdist_wheel --universal bdist_wininst && twine check dist/*`
"""
import keyboard
from setuptools import setup
setup(
name='keyboard',
version=keyboard.version,
author='BoppreH',
author_email='[email protected]',
packages=['keyboard'],
url='https://github.com/boppreh/keyboard',
license='MIT',
description='Hook and simulate keyboard events on Windows and Linux',
keywords = 'keyboard hook simulate hotkey',
# Wheel creation breaks with Windows newlines.
# https://github.com/pypa/setuptools/issues/1126
long_description=keyboard.__doc__.replace('\r\n', '\n'),
long_description_content_type='text/markdown',
install_requires=["pyobjc; sys_platform=='darwin'"], # OSX-specific dependency
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| mit | 8,663,199,252,955,981,000 | -4,824,785,461,456,697,000 | 34.078947 | 121 | 0.651913 | false |
2Minutes/davos-dev | davos/core/utils.py | 1 | 7692 |
import re
import sys
import os
import os.path as osp
from fnmatch import fnmatch
from pytd.gui.dialogs import promptDialog
from pytd.util.logutils import logMsg
from pytd.util.sysutils import importModule, toStr, inDevMode, getCaller
from pytd.util.fsutils import pathSplitDirs, pathResolve, pathNorm, pathJoin
from pytd.util.fsutils import jsonRead, jsonWrite, isDirStat, parseDirContent
from pytd.util.strutils import padded
_VERS_SPLIT_REXP = re.compile(r'-(v[0-9]+)')
def getConfigModule(sProjectName):
try:
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
sConfigModule = sConfPkg + '.' + sProjectName
modobj = importModule(sConfigModule)
except ImportError:
raise ImportError("No config module named '{}'".format(sConfigModule))
return modobj
def versionFromName(sFileName):
vers = _VERS_SPLIT_REXP.findall(sFileName)
return int(vers[-1].strip('v')) if vers else None
def mkVersionSuffix(v):
if not isinstance(v, int):
raise TypeError("argument must be of type <int>. Got {}.".format(type(v)))
return "".join(('-v', padded(v)))
def findVersionFields(s):
return _VERS_SPLIT_REXP.findall(s)
def promptForComment(**kwargs):
sComment = ""
bOk = False
result = promptDialog(title='Please...',
message='Leave a comment: ',
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel',
scrollableField=True,
**kwargs)
if result == 'Cancel':
logMsg("Cancelled !" , warning=True)
elif result == 'OK':
sComment = promptDialog(query=True, text=True)
bOk = True
return sComment, bOk
def projectNameFromPath(p):
sConfPkg = os.environ.get("DAVOS_CONF_PACKAGE", "davos.config")
pkg = importModule(sConfPkg)
sPkgDirPath = os.path.dirname(pkg.__file__)
sDirList = pathSplitDirs(p)
for sFilename in os.listdir(sPkgDirPath):
bIgnored = False
for sPatrn in ("__*", ".*", "*.pyc"):
if fnmatch(sFilename, sPatrn):
bIgnored = True
break
if bIgnored:
continue
sModName = os.path.splitext(sFilename)[0]
m = importModule(sConfPkg + '.' + sModName)
sProjDir = m.project.dir_name
if sProjDir in sDirList:
return sModName
return ""
def splitStep(sTaskName):
return sTaskName.rsplit("|", 1) if ("|" in sTaskName) else ("", sTaskName)
def damasServerPort():
return os.getenv("DAMAS_DEV_PORT", "8443") if inDevMode() else "8443"
def loadPrefs():
global DAVOS_PREFS
try:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
DAVOS_PREFS = jsonRead(p)
except EnvironmentError:
DAVOS_PREFS = {}
return DAVOS_PREFS
def savePrefs():
global DAVOS_PREFS
if DAVOS_PREFS:
p = pathResolve(r"%USERPROFILE%\davos_prefs.json")
jsonWrite(p, DAVOS_PREFS)
def setPref(in_sKey, value):
global DAVOS_PREFS
if "|" not in in_sKey:
DAVOS_PREFS[in_sKey] = value
return
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
sPrevKey = ""
prevPrefs = None
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
prevPrefs[sPrevKey] = {}
currPrefs = prevPrefs[sPrevKey]
if i == iLastKey:
currPrefs[sKey] = value
return
if sKey not in currPrefs:
currPrefs[sKey] = {}
prevPrefs = currPrefs
sPrevKey = sKey
currPrefs = currPrefs[sKey]
def getPref(in_sKey, default=None):
global DAVOS_PREFS
if "|" not in in_sKey:
return DAVOS_PREFS.get(in_sKey, default)
sKeyList = in_sKey.split("|")
iLastKey = len(sKeyList) - 1
currPrefs = DAVOS_PREFS
for i, sKey in enumerate(sKeyList):
if not isinstance(currPrefs, dict):
k = "|".join(sKeyList[:(i + 1)])
logMsg("Not a pref dictionary: '{}'.".format(k), warning=True)
return default
if i == iLastKey:
return currPrefs.get(sKey, default)
if sKey in currPrefs:
currPrefs = currPrefs[sKey]
else:
logMsg("No such pref: '{}'.".format(in_sKey), warning=True)
return default
_ICON_DIR_PATH = ""
def mkIconPath(sRelPath):
global _ICON_DIR_PATH
if (not _ICON_DIR_PATH) or (not osp.exists(_ICON_DIR_PATH)):
p = sys.modules["davos"].__file__
p = osp.abspath(osp.join(osp.dirname(p), "..", "resources", "icon"))
_ICON_DIR_PATH = p
return pathJoin(_ICON_DIR_PATH, sRelPath)
def writePackContent(sPackDirPath, dirStat=None):
sPackDirPath = pathNorm(sPackDirPath)
if not dirStat:
dirStat = os.stat(sPackDirPath)
sJsonPath = mkPackFilePath(sPackDirPath)
iMtime = 0
if not osp.exists(sJsonPath):
iMtime = dirStat.st_mtime
iAtime = dirStat.st_atime
try:
open(sJsonPath, 'a+b').close() # create json file so it is listed by parseDirContent()
dirContent = parseDirContent(sPackDirPath)
jsonWrite(sJsonPath, dirContent, sort_keys=True)
finally:
if iMtime:
os.utime(sPackDirPath, (iAtime, iMtime))
return dirContent
def readPackContent(sPackDirPath, fail=True):
try:
dirContent = jsonRead(mkPackFilePath(sPackDirPath))
except EnvironmentError as e:
if fail:
raise
logMsg(toStr(e), warning=True)
dirContent = parseDirContent(sPackDirPath)
return dirContent
def mkPackFilePath(sPackDirPath):
return pathJoin(sPackDirPath, "_package.json")
_ISPACK_REXP = re.compile(r".+_pkg[^/\w].+", re.I)
def assertPack(p, dirStat=None):
if not dirStat:
dirStat = os.stat(pathNorm(p))
if isPack(p, fail=True, dirStat=dirStat):
return dirStat
return None
def belowPack(p):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
return True if _belowPack(p) else _belowOldPack(p)
else:
return _belowPack(p)
def isPack(p, fail=False, dirStat=None):
p = pathNorm(p)
if os.environ["IN_SEB_MODE"]:
bPackPath = True if _isPack(p) else _isOldPack(p)
else:
bPackPath = _isPack(p)
if not bPackPath:
if fail:
sMsg = ("Directory NOT a package (should start with 'pkg_' or 'lyr_'): '{}'."
.format(osp.basename(p)))
raise EnvironmentError(sMsg)
else:
return False
if dirStat and not isDirStat(dirStat):
if fail:
raise EnvironmentError("Package path NOT a directory: '{}'".format(p))
else:
return False
return True
def _belowPack(p):
p = osp.dirname(p)
for sDirName in pathSplitDirs(p):
if _isPack(sDirName):
return True
return False
def _isPack(p):
sBaseName = osp.basename(p) if "/" in p else p
if "_" not in sBaseName:
return False
sPrefix = sBaseName.split("_", 1)[0]
if not sPrefix:
return False
return (sPrefix.lower() + "_") in ("pkg_", "lyr_")
def _belowOldPack(p):
p = osp.dirname(p)
if "_pkg/" in p.lower():
return True
if _ISPACK_REXP.match(p):
return True
return False
def _isOldPack(p):
sName = osp.basename(p)
if sName.lower().endswith("_pkg"):
return True
if _ISPACK_REXP.match(sName):
return True
return False
| gpl-3.0 | -2,793,248,667,859,270,000 | -8,782,846,375,319,866,000 | 23.341772 | 94 | 0.597634 | false |
openstack/poppy | poppy/metrics/blueflood/services.py | 2 | 4570 | # Copyright (c) 2016 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_context import context as context_utils
from oslo_log import log
from poppy.metrics import base
from poppy.metrics.blueflood.utils import client
from poppy.metrics.blueflood.utils import errors
from poppy.metrics.blueflood.utils import helper
LOG = log.getLogger(__name__)
class ServicesController(base.ServicesController):
def __init__(self, driver):
super(ServicesController, self).__init__(driver)
self.driver = driver
def _result_formatter(self, response):
resp_dict = []
if not response.ok:
LOG.warning("BlueFlood Metrics Response status Code:{0} "
"Response Text: {1} "
"Request URL: {2}".format(response.status_code,
response.text,
response.url))
return resp_dict
else:
serialized_response = response.json()
try:
values = serialized_response['values']
for val in values:
m = {}
m['timestamp'] = helper.datetime_from_epoch(
int(val['timestamp']))
m['count'] = val['sum']
resp_dict.append(m)
except KeyError:
msg = 'content from {0} not conforming ' \
'to API contracts'.format(response.url)
LOG.warning(msg)
raise errors.BlueFloodApiSchemaError(msg)
# sort the resp_dict by timestamp ascending
resp_dict = sorted(resp_dict, key=lambda x: x['timestamp'])
return resp_dict
def read(self, metric_names, from_timestamp, to_timestamp, resolution):
"""read metrics from metrics driver.
"""
curr_resolution = \
helper.resolution_converter_seconds_to_enum(resolution)
context_dict = context_utils.get_current().to_dict()
project_id = context_dict['tenant']
auth_token = None
if self.driver.metrics_conf.use_keystone_auth:
auth_token = context_dict['auth_token']
tenanted_blueflood_url = \
self.driver.metrics_conf.blueflood_url.format(
project_id=project_id
)
from_timestamp = int(helper.datetime_to_epoch(from_timestamp))
to_timestamp = int(helper.datetime_to_epoch(to_timestamp))
urls = []
params = {
'to': to_timestamp,
'from': from_timestamp,
'resolution': curr_resolution
}
for metric_name in metric_names:
tenanted_blueflood_url_with_metric = helper.join_url(
tenanted_blueflood_url, metric_name.strip().replace(" ", ""))
LOG.info("Querying BlueFlood Metric: {0}".format(
tenanted_blueflood_url_with_metric))
urls.append(helper.set_qs_on_url(
tenanted_blueflood_url_with_metric,
**params))
executors = self.driver.metrics_conf.no_of_executors
blueflood_client = client.BlueFloodMetricsClient(token=auth_token,
project_id=project_id,
executors=executors)
results = blueflood_client.async_requests(urls)
reordered_metric_names = []
for result in results:
metric_name = helper.retrieve_last_relative_url(result.url)
reordered_metric_names.append(metric_name)
formatted_results = []
for metric_name, result in zip(reordered_metric_names, results):
formatted_result = self._result_formatter(result)
# NOTE(TheSriram): Tuple to pass the associated metric name, along
# with the formatted result
formatted_results.append((metric_name, formatted_result))
return formatted_results
| apache-2.0 | -2,023,375,627,693,189 | -6,840,494,412,789,371,000 | 37.728814 | 79 | 0.581838 | false |
solarsail/aerosol-tools | clustatlib/clucsv.py | 1 | 3752 | import numpy as np
import os
import os.path
class csvbuilder:
def __init__(self, cs):
self.cs = cs
if not os.path.isdir('csv'):
os.mkdir('csv')
def month_type_csv(self, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.month_type_stat(site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "month," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(12):
content.append("%d,%s" % (i+1, ','.join([str(field) for field in mat[i]])))
content = '\n'.join(content)
with open("csv/month_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def year_type_csv(self, start_year, end_year, site = None):
label = 'all' if site == None else site
values, percentages = self.cs.year_type_stat(start_year, end_year, site)
header = ",".join(["type{},%".format(t) for t in range(1, len(values)+1)])
header = "year," + header
all = []
for i in range(len(values)):
all.append(values[i])
all.append(percentages[i])
mat = np.matrix(all)
mat = mat.transpose().tolist()
content = []
for i in range(start_year, end_year+1):
content.append("%d,%s" % (i, ','.join([str(field) for field in mat[i-start_year]])))
content = '\n'.join(content)
with open("csv/year_type_%s.csv" % label, 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_csv(self):
header = "type,count,percentage%"
all = self.cs.type_stat()
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def site_type_csv(self):
all, types = self.cs.site_type_stat()
header = ",".join(["type{},%".format(t) for t in range(1, types+1)])
header = "site," + header
content = '\n'.join([','.join([str(field) for field in row]) for row in all])
with open("csv/site_type_count.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def type_stat_csv(self):
header = "type,refr440,refr675,refr870,refr1020,refi440,refi675,refi870,refi1020,volmedianradf,stddevf,volconf,volmedianradc,stddevc,volconc,ssa675,ssa870,ssa1020,asy440,asy675,asy870,sphericity"
list1 = self.cs.type_means()
list2 = self.cs.type_stddev()
l = []
for i in range(len(list1)):
l.append(list1[i])
stddevline = list(list2[i])
stddevline[0] = "stddev"
l.append(stddevline)
content = '\n'.join([','.join([str(field) for field in row]) for row in l])
with open("csv/type_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content)))
def distances_csv(self):
clus, dist_mat = self.cs.all_distances()
header = "," + ",".join([str(cid) for cid in clus])
lines = []
first = 1
cur = 0
for clu in clus:
lines.append(str(clu) + ',' * first + ','.join(str(d) for d in dist_mat[cur:cur+len(clus)-first+1]))
cur += len(clus) - first + 1
first += 1
content = '\n'.join(lines)
with open("csv/distance_stat.csv", 'w') as outfile:
outfile.write('\n'.join((header, content))) | gpl-3.0 | 7,627,666,650,547,217,000 | -1,950,193,853,722,707,700 | 41.647727 | 203 | 0.539179 | false |
Benster900/mhn | server/mhn/api/views.py | 9 | 12576 | import json
from StringIO import StringIO
import csv
from uuid import uuid1
from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from flask import Blueprint, request, jsonify, make_response
from bson.errors import InvalidId
from mhn import db, csrf
from mhn.api import errors
from mhn.api.models import (
Sensor, Rule, DeployScript as Script,
DeployScript, RuleSource)
from mhn.api.decorators import deploy_auth, sensor_auth, token_auth
from mhn.common.utils import error_response
from mhn.common.clio import Clio
from mhn.auth import current_user, login_required
api = Blueprint('api', __name__, url_prefix='/api')
# Endpoints for the Sensor resource.
@api.route('/sensor/', methods=['POST'])
@csrf.exempt
@deploy_auth
def create_sensor():
missing = Sensor.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
sensor = Sensor(**request.json)
sensor.uuid = str(uuid1())
sensor.ip = request.remote_addr
Clio().authkey.new(**sensor.new_auth_dict()).post()
try:
db.session.add(sensor)
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SENSOR_EXISTS.format(request.json['name']), 400)
else:
return jsonify(sensor.to_dict())
@api.route('/sensor/', methods=['GET'])
@token_auth
def get_sensors():
req = request.args.to_dict()
if 'api_key' in req:
del req['api_key']
resp = make_response(json.dumps([s.to_dict() for s in Sensor.query.filter_by(**req)]))
resp.headers['Content-Type'] = "application/json"
return resp
@api.route('/sensor/<uuid>/', methods=['PUT'])
@csrf.exempt
def update_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
for field in request.json.keys():
if field in Sensor.editable_fields():
setattr(sensor, field, request.json[field])
elif field in Sensor.fields():
return error_response(
errors.API_FIELD_NOT_EDITABLE.format(field), 400)
else:
return error_response(
errors.API_FIELD_INVALID.format(field), 400)
else:
try:
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SENSOR_EXISTS.format(request.json['name']), 400)
return jsonify(sensor.to_dict())
@api.route('/sensor/<uuid>/', methods=['DELETE'])
@login_required
def delete_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
Clio().authkey.delete(identifier=uuid)
db.session.delete(sensor)
db.session.commit()
return jsonify({})
@api.route('/sensor/<uuid>/connect/', methods=['POST'])
@csrf.exempt
@sensor_auth
def connect_sensor(uuid):
sensor = Sensor.query.filter_by(uuid=uuid).first_or_404()
sensor.ip = request.remote_addr
db.session.commit()
return jsonify(sensor.to_dict())
# Utility functions that generalize the GET
# requests of resources from Mnemosyne.
def _get_one_resource(resource, res_id):
try:
res = resource.get(_id=res_id)
except InvalidId:
res = None
if not res:
return error_response(errors.API_RESOURCE_NOT_FOUND, 404)
else:
return jsonify(res.to_dict())
def _get_query_resource(resource, query):
options = {}
if 'limit' in query:
options['limit'] = int(query['limit'])
results = list(resource.get(options, **query))
return jsonify(
data=[r.to_dict() for r in results],
meta={
'size': len(results),
'query': query,
'options': options
}
)
# Now let's make use these methods in the views.
@api.route('/feed/<feed_id>/', methods=['GET'])
@token_auth
def get_feed(feed_id):
return _get_one_resource(Clio().hpfeed, feed_id)
@api.route('/session/<session_id>/', methods=['GET'])
@token_auth
def get_session(session_id):
return _get_one_resource(Clio().session, session_id)
@api.route('/url/<url_id>/', methods=['GET'])
@token_auth
def get_url(url_id):
return _get_one_resource(Clio().url, url_id)
@api.route('/file/<file_id>/', methods=['GET'])
@token_auth
def get_file(file_id):
return _get_one_resource(Clio().file, file_id)
@api.route('/dork/<dork_id>/', methods=['GET'])
@token_auth
def get_dork(dork_id):
return _get_one_resource(Clio().dork, dork_id)
@api.route('/metadata/<metadata_id>/', methods=['GET'])
@token_auth
def get_metadatum(metadata_id):
return _get_one_resource(Clio().metadata, metadata_id)
@api.route('/feed/', methods=['GET'])
@token_auth
def get_feeds():
return _get_query_resource(Clio().hpfeed, request.args.to_dict())
@api.route('/session/', methods=['GET'])
@token_auth
def get_sessions():
return _get_query_resource(Clio().session, request.args.to_dict())
@api.route('/url/', methods=['GET'])
@token_auth
def get_urls():
return _get_query_resource(Clio().url, request.args.to_dict())
@api.route('/file/', methods=['GET'])
@token_auth
def get_files():
return _get_query_resource(Clio().file, request.args.to_dict())
@api.route('/dork/', methods=['GET'])
@token_auth
def get_dorks():
return _get_query_resource(Clio().dork, request.args.to_dict())
@api.route('/metadata/', methods=['GET'])
@token_auth
def get_metadata():
return _get_query_resource(Clio().metadata, request.args.to_dict())
@api.route('/top_attackers/', methods=['GET'])
@token_auth
def top_attackers():
options = request.args.to_dict()
limit = int(options.get('limit', '1000'))
hours_ago = int(options.get('hours_ago', '4'))
extra = dict(options)
for name in ('hours_ago', 'limit', 'api_key',):
if name in extra:
del extra[name]
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
results = Clio().session._tops(['source_ip', 'honeypot'], top=limit, hours_ago=hours_ago, **extra)
return jsonify(
data=results,
meta={
'size': len(results),
'query': 'top_attackers',
'options': options
}
)
@api.route('/attacker_stats/<ip>/', methods=['GET'])
@token_auth
def attacker_stats(ip):
options = request.args.to_dict()
hours_ago = int(options.get('hours_ago', '720')) # 30 days
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
results = Clio().session.attacker_stats(ip, hours_ago=hours_ago)
return jsonify(
data=results,
meta={
'query': 'attacker_stats',
'options': options
}
)
def get_tags(rec):
tags = [rec['honeypot'], rec['protocol'], 'port-{}'.format(rec['destination_port']),]
meta = rec['meta']
if len(meta) > 0:
meta = meta[0]
else:
meta = {}
for meta_key in ['app', 'os', 'link',]:
value = meta.get(meta_key)
if value:
tags.append(value.replace(',', '').replace('\t', ' '))
return tags
@api.route('/intel_feed.csv/', methods=['GET'])
@token_auth
def intel_feed_csv():
fieldnames = ['source_ip', 'count', 'tags', ]
results = get_intel_feed()
outf = StringIO()
wr = csv.DictWriter(outf, fieldnames=fieldnames, delimiter='\t', lineterminator='\n')
wr.writeheader()
for rec in results['data']:
wr.writerow({
'count': rec['count'],
'source_ip': rec['source_ip'],
'tags': ','.join(get_tags(rec)),
})
response_data = outf.getvalue()
outf.close()
response = make_response(response_data)
response.headers['Content-type'] = 'text/plain'
return response
@api.route('/intel_feed/', methods=['GET'])
@token_auth
def intel_feed():
results = get_intel_feed()
return jsonify(**results)
def get_intel_feed():
options = request.args.to_dict()
limit = int(options.get('limit', '1000'))
hours_ago = int(options.get('hours_ago', '4'))
extra = dict(options)
for name in ('hours_ago', 'limit', 'api_key',):
if name in extra:
del extra[name]
for name in options.keys():
if name not in ('hours_ago', 'limit',):
del options[name]
extra['ne__protocol'] = 'pcap'
results = Clio().session._tops(['source_ip', 'honeypot', 'protocol', 'destination_port'], top=limit, hours_ago=hours_ago, **extra)
results = [r for r in results if r['protocol'] != 'ftpdatalisten']
cache = {}
for r in results:
source_ip = r['source_ip']
if source_ip not in cache:
# TODO: may want to make one big query to mongo here...
cache[source_ip] = [m.to_dict() for m in Clio().metadata.get(ip=r['source_ip'], honeypot='p0f')]
r['meta'] = cache[source_ip]
return {
'data':results,
'meta':{
'size': len(results),
'query': 'intel_feed',
'options': options
}
}
@api.route('/rule/<rule_id>/', methods=['PUT'])
@token_auth
def update_rule(rule_id):
rule = Rule.query.filter_by(id=rule_id).first_or_404()
for field in request.json.keys():
if field in Rule.editable_fields():
setattr(rule, field, request.json[field])
elif field in Rule.fields():
return error_response(
errors.API_FIELD_NOT_EDITABLE.format(field), 400)
else:
return error_response(
errors.API_FIELD_INVALID.format(field), 400)
else:
db.session.commit()
return jsonify(rule.to_dict())
@api.route('/rule/', methods=['GET'])
@sensor_auth
def get_rules():
# Getting active rules.
if request.args.get('plaintext') in ['1', 'true']:
# Requested rendered rules in plaintext.
resp = make_response(Rule.renderall())
resp.headers['Content-Disposition'] = "attachment; filename=mhn.rules"
return resp
else:
# Responding with active rules.
rules = Rule.query.filter_by(is_active=True).\
group_by(Rule.sid).\
having(func.max(Rule.rev))
resp = make_response(json.dumps([ru.to_dict() for ru in rules]))
resp.headers['Content-Type'] = "application/json"
return resp
@api.route('/rulesources/', methods=['POST'])
@login_required
def create_rule_source():
missing = RuleSource.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
rsource = RuleSource(**request.json)
try:
db.session.add(rsource)
db.session.commit()
except IntegrityError:
return error_response(
errors.API_SOURCE_EXISTS.format(request.json['uri']), 400)
else:
return jsonify(rsource.to_dict())
@api.route('/rulesources/<rs_id>/', methods=['DELETE'])
@login_required
def delete_rule_source(rs_id):
source = RuleSource.query.filter_by(id=rs_id).first_or_404()
db.session.delete(source)
db.session.commit()
return jsonify({})
@api.route('/script/', methods=['POST'])
@login_required
def create_script():
missing = Script.check_required(request.json)
if missing:
return error_response(
errors.API_FIELDS_MISSING.format(missing), 400)
else:
script = Script(**request.json)
script.user = current_user
db.session.add(script)
db.session.commit()
return jsonify(script.to_dict())
@api.route('/script/', methods=['PUT', 'PATCH'])
@login_required
def update_script():
script = Script.query.get(request.json.get('id'))
script.user = current_user
for editable in Script.editable_fields():
if editable in request.json:
setattr(script, editable, request.json[editable])
db.session.add(script)
db.session.commit()
return jsonify(script.to_dict())
@api.route('/script/', methods=['GET'])
def get_script():
if request.args.get('script_id'):
script = DeployScript.query.get(request.args.get('script_id'))
else:
script = DeployScript.query.order_by(DeployScript.date.desc()).first()
if request.args.get('text') in ['1', 'true']:
resp = make_response(script.script)
resp.headers['Content-Disposition'] = "attachment; filename=deploy.sh"
return resp
else:
return jsonify(script.to_dict())
| lgpl-2.1 | -8,902,985,821,809,908,000 | -6,350,399,336,479,338,000 | 28.730496 | 134 | 0.607904 | false |
HazyResearch/metal | metal/logging/writer.py | 1 | 4223 | import copy
import json
import os
from collections import defaultdict
from subprocess import check_output
from time import strftime
from metal.utils import recursive_transform
class LogWriter(object):
"""Class for writing simple JSON logs at end of runs, with interface for
storing per-iter data as well.
Config contains:
log_dir: (str) The path to the base log directory, or defaults to
current working directory.
run_dir: (str) The name of the sub-directory, or defaults to the date,
strftime("%Y_%m_%d").
run_name: (str) The name of the run + the time, or defaults to the time,
strftime("%H_%M_%S).
writer_metrics: (list) An optional whitelist of metrics to write,
ignoring all others. (If None, write all available metrics).
Log is saved to 'log_dir/run_dir/{run_name}_H_M_S.json'
"""
def __init__(
self,
log_dir=None,
run_dir=None,
run_name=None,
writer_metrics=[],
verbose=True,
**kwargs,
):
start_date = strftime("%Y_%m_%d")
start_time = strftime("%H_%M_%S")
# Set logging subdirectory + make sure exists
log_dir = log_dir or os.getcwd()
run_dir = run_dir or start_date
if run_name is not None:
run_name = f"{run_name}_{start_time}"
else:
run_name = start_time
self.log_subdir = os.path.join(log_dir, run_dir, run_name)
if not os.path.exists(self.log_subdir):
os.makedirs(self.log_subdir)
# Save other settings
self.writer_metrics = writer_metrics
self.verbose = verbose
# Initialize log
# Note we have a separate section for during-run metrics
commit = check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
self.log_dict = {
"start_date": start_date,
"start_time": start_time,
"commit": str(commit),
"config": None,
"run_log": defaultdict(list),
}
def add_scalar(self, name, val, i):
# Note: Does not handle deduplication of (name, val) entries w same i
if not self.writer_metrics or name in self.write_metrics:
if val is not None:
val = float(val)
self.log_dict["run_log"][name].append((i, val))
return True
else:
return False
def write(self, config=None, metrics=None):
self.write_run_log()
if config is not None:
self.write_config(config)
if metrics is not None:
self.write_metrics(metrics)
def write_log(self):
"""Dump log output to file"""
log_path = os.path.join(self.log_subdir, "log.json")
if self.verbose:
print(f"Writing log to {log_path}")
with open(log_path, "w") as f:
json.dump(self.log_dict, f, indent=1)
def write_config(self, config, config_name="config"):
"""Dump config dict to file"""
config_path = os.path.join(self.log_subdir, f"{config_name}.json")
if self.verbose:
print(f"Writing config to {config_path}")
with open(config_path, "w") as f:
config = self._sanitize_config(config)
json.dump(config, f, indent=1)
def write_metrics(self, metrics):
metrics_path = os.path.join(self.log_subdir, "metrics.json")
if self.verbose:
print(f"Writing metrics to {metrics_path}")
with open(metrics_path, "w") as f:
json.dump(metrics, f, indent=1)
def close(self):
pass
def _sanitize_config(self, config):
config = copy.deepcopy(config)
# Replace individual functions
is_func = lambda x: callable(x)
replace_with_name = lambda f: str(f)
config = recursive_transform(config, is_func, replace_with_name)
# Replace lists of functions
is_func_list = lambda x: isinstance(x, list) and all(is_func(f) for f in x)
replace_with_names = lambda x: [replace_with_name(f) for f in x]
config = recursive_transform(config, is_func_list, replace_with_names)
return config
| apache-2.0 | -6,572,768,036,373,461,000 | 1,134,269,211,625,161,200 | 34.191667 | 83 | 0.582998 | false |
hawkeyexp/plugin.video.netflix | resources/lib/services/nfsession/session/base.py | 1 | 2055 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT
Initialize the netflix session
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
class SessionBase(object):
"""Initialize the netflix session"""
session = None
"""The requests.session object to handle communication to Netflix"""
verify_ssl = True
"""Use SSL verification when performing requests"""
# Functions from derived classes to allow perform particular operations in parent classes
external_func_activate_profile = None # (set by nfsession_op.py)
def __init__(self):
self.verify_ssl = bool(G.ADDON.getSettingBool('ssl_verification'))
self._init_session()
def _init_session(self):
"""Initialize the session to use for all future connections"""
try:
self.session.close()
LOG.info('Session closed')
except AttributeError:
pass
from requests import session
self.session = session()
self.session.max_redirects = 10 # Too much redirects should means some problem
self.session.headers.update({
'User-Agent': common.get_user_agent(enable_android_mediaflag_fix=True),
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'www.netflix.com'
})
LOG.info('Initialized new session')
@property
def auth_url(self):
"""Access rights to make HTTP requests on an endpoint"""
return G.LOCAL_DB.get_value('auth_url', table=TABLE_SESSION)
@auth_url.setter
def auth_url(self, value):
G.LOCAL_DB.set_value('auth_url', value, TABLE_SESSION)
| mit | -279,848,735,146,529,800 | 5,353,606,421,403,422,000 | 33.830508 | 93 | 0.66618 | false |
Wuguanping/Server_Manage_Plugin | Openstack_Plugin/ironic-plugin-pike/ironic/tests/unit/drivers/modules/irmc/test_common.py | 3 | 10969 | # Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for common methods used by iRMC modules.
"""
import mock
from oslo_config import cfg
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
from ironic.tests.unit.objects import utils as obj_utils
class IRMCValidateParametersTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCValidateParametersTestCase, self).setUp()
self.node = obj_utils.create_test_node(
self.context,
driver='fake_irmc',
driver_info=db_utils.get_test_irmc_info())
def test_parse_driver_info(self):
info = irmc_common.parse_driver_info(self.node)
self.assertEqual('1.2.3.4', info['irmc_address'])
self.assertEqual('admin0', info['irmc_username'])
self.assertEqual('fake0', info['irmc_password'])
self.assertEqual(60, info['irmc_client_timeout'])
self.assertEqual(80, info['irmc_port'])
self.assertEqual('digest', info['irmc_auth_method'])
self.assertEqual('ipmitool', info['irmc_sensor_method'])
self.assertEqual('v2c', info['irmc_snmp_version'])
self.assertEqual(161, info['irmc_snmp_port'])
self.assertEqual('public', info['irmc_snmp_community'])
self.assertFalse(info['irmc_snmp_security'])
def test_parse_driver_option_default(self):
self.node.driver_info = {
"irmc_address": "1.2.3.4",
"irmc_username": "admin0",
"irmc_password": "fake0",
}
info = irmc_common.parse_driver_info(self.node)
self.assertEqual('basic', info['irmc_auth_method'])
self.assertEqual(443, info['irmc_port'])
self.assertEqual(60, info['irmc_client_timeout'])
self.assertEqual('ipmitool', info['irmc_sensor_method'])
def test_parse_driver_info_missing_address(self):
del self.node.driver_info['irmc_address']
self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_username(self):
del self.node.driver_info['irmc_username']
self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_password(self):
del self.node.driver_info['irmc_password']
self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_timeout(self):
self.node.driver_info['irmc_client_timeout'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_port(self):
self.node.driver_info['irmc_port'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_auth_method(self):
self.node.driver_info['irmc_auth_method'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_sensor_method(self):
self.node.driver_info['irmc_sensor_method'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_multiple_params(self):
del self.node.driver_info['irmc_password']
del self.node.driver_info['irmc_address']
e = self.assertRaises(exception.MissingParameterValue,
irmc_common.parse_driver_info, self.node)
self.assertIn('irmc_password', str(e))
self.assertIn('irmc_address', str(e))
def test_parse_driver_info_invalid_snmp_version(self):
self.node.driver_info['irmc_snmp_version'] = 'v3x'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_snmp_port(self):
self.node.driver_info['irmc_snmp_port'] = '161'
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_snmp_community(self):
self.node.driver_info['irmc_snmp_version'] = 'v2c'
self.node.driver_info['irmc_snmp_community'] = 100
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_snmp_security(self):
self.node.driver_info['irmc_snmp_version'] = 'v3'
self.node.driver_info['irmc_snmp_security'] = 100
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
def test_parse_driver_info_empty_snmp_security(self):
self.node.driver_info['irmc_snmp_version'] = 'v3'
self.node.driver_info['irmc_snmp_security'] = ''
self.assertRaises(exception.InvalidParameterValue,
irmc_common.parse_driver_info, self.node)
class IRMCCommonMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCCommonMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.info = db_utils.get_test_irmc_info()
self.node = obj_utils.create_test_node(
self.context,
driver='fake_irmc',
driver_info=self.info)
@mock.patch.object(irmc_common, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
def test_get_irmc_client(self, mock_scci):
self.info['irmc_port'] = 80
self.info['irmc_auth_method'] = 'digest'
self.info['irmc_client_timeout'] = 60
mock_scci.get_client.return_value = 'get_client'
returned_mock_scci_get_client = irmc_common.get_irmc_client(self.node)
mock_scci.get_client.assert_called_with(
self.info['irmc_address'],
self.info['irmc_username'],
self.info['irmc_password'],
port=self.info['irmc_port'],
auth_method=self.info['irmc_auth_method'],
client_timeout=self.info['irmc_client_timeout'])
self.assertEqual('get_client', returned_mock_scci_get_client)
def test_update_ipmi_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ipmi_info = {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin0",
"ipmi_password": "fake0",
}
task.node.driver_info = self.info
irmc_common.update_ipmi_properties(task)
actual_info = task.node.driver_info
expected_info = dict(self.info, **ipmi_info)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(irmc_common, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
def test_get_irmc_report(self, mock_scci):
self.info['irmc_port'] = 80
self.info['irmc_auth_method'] = 'digest'
self.info['irmc_client_timeout'] = 60
mock_scci.get_report.return_value = 'get_report'
returned_mock_scci_get_report = irmc_common.get_irmc_report(self.node)
mock_scci.get_report.assert_called_with(
self.info['irmc_address'],
self.info['irmc_username'],
self.info['irmc_password'],
port=self.info['irmc_port'],
auth_method=self.info['irmc_auth_method'],
client_timeout=self.info['irmc_client_timeout'])
self.assertEqual('get_report', returned_mock_scci_get_report)
def test_out_range_port(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'port', 60, 'irmc')
def test_out_range_auth_method(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'auth_method', 'fake', 'irmc')
def test_out_range_sensor_method(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'sensor_method', 'fake', 'irmc')
@mock.patch.object(irmc_common, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
def test_set_secure_boot_mode_enable(self, mock_elcm):
mock_elcm.set_secure_boot_mode.return_value = 'set_secure_boot_mode'
info = irmc_common.parse_driver_info(self.node)
irmc_common.set_secure_boot_mode(self.node, True)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, True)
@mock.patch.object(irmc_common, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
def test_set_secure_boot_mode_disable(self, mock_elcm):
mock_elcm.set_secure_boot_mode.return_value = 'set_secure_boot_mode'
info = irmc_common.parse_driver_info(self.node)
irmc_common.set_secure_boot_mode(self.node, False)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, False)
@mock.patch.object(irmc_common, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
@mock.patch.object(irmc_common, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
def test_set_secure_boot_mode_fail(self, mock_scci, mock_elcm):
irmc_common.scci.SCCIError = Exception
mock_elcm.set_secure_boot_mode.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IRMCOperationError,
irmc_common.set_secure_boot_mode,
task.node, True)
info = irmc_common.parse_driver_info(task.node)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, True)
| apache-2.0 | -1,146,286,623,663,316,000 | 2,740,196,831,515,939,000 | 43.589431 | 78 | 0.631872 | false |
erickt/hue | apps/beeswax/src/beeswax/design.py | 26 | 9530 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The HQLdesign class can (de)serialize a design to/from a QueryDict.
"""
import json
import logging
import os
import re
import urlparse
import django.http
from django import forms
from desktop.lib.django_forms import BaseSimpleFormSet, MultiForm
from desktop.lib.django_mako import render_to_string
from hadoop.cluster import get_hdfs
LOG = logging.getLogger(__name__)
SERIALIZATION_VERSION = '0.4.1'
def hql_query(hql, database='default', query_type=None):
data_dict = json.loads('{"query": {"email_notify": false, "query": null, "type": 0, "is_parameterized": true, "database": "default"}, '
'"functions": [], "VERSION": "0.4.1", "file_resources": [], "settings": []}')
if not (isinstance(hql, str) or isinstance(hql, unicode)):
raise Exception('Requires a SQL text query of type <str>, <unicode> and not %s' % type(hql))
data_dict['query']['query'] = strip_trailing_semicolon(hql)
data_dict['query']['database'] = database
if query_type:
data_dict['query']['type'] = query_type
hql_design = HQLdesign()
hql_design._data_dict = data_dict
return hql_design
class HQLdesign(object):
"""
Represents an HQL design, with methods to perform (de)serialization.
We support queries that aren't parameterized, in case users
want to use "$" natively, but we leave that as an advanced
option to turn off.
"""
_QUERY_ATTRS = [ 'query', 'type', 'is_parameterized', 'email_notify', 'database' ]
_SETTINGS_ATTRS = [ 'key', 'value' ]
_FILE_RES_ATTRS = [ 'type', 'path' ]
_FUNCTIONS_ATTRS = [ 'name', 'class_name' ]
def __init__(self, form=None, query_type=None):
"""Initialize the design from a valid form data."""
if form is not None:
assert isinstance(form, MultiForm)
self._data_dict = {
'query': normalize_form_dict(form.query, HQLdesign._QUERY_ATTRS),
'settings': normalize_formset_dict(form.settings, HQLdesign._SETTINGS_ATTRS),
'file_resources': normalize_formset_dict(form.file_resources, HQLdesign._FILE_RES_ATTRS),
'functions': normalize_formset_dict(form.functions, HQLdesign._FUNCTIONS_ATTRS)
}
if query_type is not None:
self._data_dict['query']['type'] = query_type
def dumps(self):
"""Returns the serialized form of the design in a string"""
dic = self._data_dict.copy()
dic['VERSION'] = SERIALIZATION_VERSION
return json.dumps(dic)
@property
def hql_query(self):
return self._data_dict['query']['query']
@hql_query.setter
def hql_query(self, query):
self._data_dict['query']['query'] = query
@property
def query(self):
return self._data_dict['query'].copy()
@property
def settings(self):
return list(self._data_dict['settings'])
@property
def file_resources(self):
return list(self._data_dict['file_resources'])
@property
def functions(self):
return list(self._data_dict['functions'])
def get_configuration_statements(self):
configuration = []
for f in self.file_resources:
if not urlparse.urlsplit(f['path']).scheme:
scheme = get_hdfs().fs_defaultfs
else:
scheme = ''
configuration.append(render_to_string("hql_resource.mako", dict(type=f['type'], path=f['path'], scheme=scheme)))
for f in self.functions:
configuration.append(render_to_string("hql_function.mako", f))
return configuration
def get_query_dict(self):
# We construct the mform to use its structure and prefix. We don't actually bind data to the forms.
from beeswax.forms import QueryForm
mform = QueryForm()
mform.bind()
res = django.http.QueryDict('', mutable=True)
res.update(denormalize_form_dict(
self._data_dict['query'], mform.query, HQLdesign._QUERY_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['settings'], mform.settings, HQLdesign._SETTINGS_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['file_resources'], mform.file_resources, HQLdesign._FILE_RES_ATTRS))
res.update(denormalize_formset_dict(
self._data_dict['functions'], mform.functions, HQLdesign._FUNCTIONS_ATTRS))
return res
@staticmethod
def loads(data):
"""Returns an HQLdesign from the serialized form"""
dic = json.loads(data)
dic = dict(map(lambda k: (str(k), dic.get(k)), dic.keys()))
if dic['VERSION'] != SERIALIZATION_VERSION:
LOG.error('Design version mismatch. Found %s; expect %s' % (dic['VERSION'], SERIALIZATION_VERSION))
# Convert to latest version
del dic['VERSION']
if 'type' not in dic['query'] or dic['query']['type'] is None:
dic['query']['type'] = 0
if 'database' not in dic['query']:
dic['query']['database'] = 'default'
design = HQLdesign()
design._data_dict = dic
return design
def get_query(self):
return self._data_dict["query"]
@property
def statement_count(self):
return len(self.statements)
def get_query_statement(self, n=0):
return self.statements[n]
@property
def statements(self):
hql_query = strip_trailing_semicolon(self.hql_query)
return [strip_trailing_semicolon(statement.strip()) for statement in split_statements(hql_query)]
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def split_statements(hql):
"""
Split statments at semicolons ignoring the ones inside
quotes and comments. The comment symbols that come
inside quotes should be ignored.
"""
statements = []
current = ''
prev = ''
between_quotes = None
is_comment = None
lines = hql.splitlines()
for line in lines:
for c in line:
current += c
if c in ('"', "'") and prev != '\\' and is_comment is None:
if between_quotes == c:
between_quotes = None
elif between_quotes is None:
between_quotes = c
elif c == '-' and prev == '-' and between_quotes is None and is_comment is None:
is_comment = True
elif c == ';':
if between_quotes is None and is_comment is None:
current = current.strip()
# Strip off the trailing semicolon
current = current[:-1]
if len(current) > 1:
statements.append(current)
current = ''
# This character holds no significance if it was escaped within a string
if prev == '\\' and between_quotes is not None:
c = ''
prev = c
is_comment = None
prev = os.linesep
if current != '':
current += os.linesep
if current and current != ';':
current = current.strip()
statements.append(current)
return statements
def normalize_form_dict(form, attr_list):
"""
normalize_form_dict(form, attr_list) -> A dictionary of (attr, value)
Each attr is a field name. And the value is obtained by looking up the form's data dict.
"""
assert isinstance(form, forms.Form)
res = { }
for attr in attr_list:
res[attr] = form.cleaned_data.get(attr)
return res
def normalize_formset_dict(formset, attr_list):
"""
normalize_formset_dict(formset, attr_list) -> A list of dictionary of (attr, value)
"""
assert isinstance(formset, BaseSimpleFormSet)
res = [ ]
for form in formset.forms:
res.append(normalize_form_dict(form, attr_list))
return res
def denormalize_form_dict(data_dict, form, attr_list):
"""
denormalize_form_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(form, forms.Form)
res = django.http.QueryDict('', mutable=True)
for attr in attr_list:
try:
res[str(form.add_prefix(attr))] = data_dict[attr]
except KeyError:
pass
return res
def denormalize_formset_dict(data_dict_list, formset, attr_list):
"""
denormalize_formset_dict(data_dict, form, attr_list) -> A QueryDict with the attributes set
"""
assert isinstance(formset, BaseSimpleFormSet)
res = django.http.QueryDict('', mutable=True)
for i, data_dict in enumerate(data_dict_list):
prefix = formset.make_prefix(i)
form = formset.form(prefix=prefix)
res.update(denormalize_form_dict(data_dict, form, attr_list))
res[prefix + '-_exists'] = 'True'
res[str(formset.management_form.add_prefix('next_form_id'))] = str(len(data_dict_list))
return res
def __str__(self):
return '%s: %s' % (self.__class__, self.query)
_SEMICOLON_WHITESPACE = re.compile(";\s*$")
def strip_trailing_semicolon(query):
"""As a convenience, we remove trailing semicolons from queries."""
s = _SEMICOLON_WHITESPACE.split(query, 2)
if len(s) > 1:
assert len(s) == 2
assert s[1] == ''
return s[0]
| apache-2.0 | 7,667,168,688,507,626,000 | 2,147,314,313,449,840,600 | 30.766667 | 137 | 0.659286 | false |
playerNaN/NaNPyGameEngine | engine.py | 1 | 5921 | import pygame
import sys
import os
from collections import namedtuple
import time
import resourcemanager
ColorList = namedtuple("ColorList", "black white red green blue")
colors = ColorList((0,0,0),(0xFF,0xFF,0xFF),(0xFF,0,0),(0,0xFF,0),(0,0,0xFF))
PyListener = namedtuple("PyListener", "condition effect")
PyEventListener = namedtuple("PyEventListener","events condition effect")
class Pyengine:
def __init__(self,size):
pygame.init()
self.__size = size
self.__fps = 60
self.__bg = colors.white
self.__fg = colors.black
self.__on_update = []
self.__on_draw = []
self.__keys_down = {}
self.__listeners = []
self.__event_handlers = []
self.__mouse_down = {}
self.__display = None
self.__screen_centerX = size[0]/2
self.__scaleX = 1.0
self.__scaleY = 1.0
self.__screen_centerY = size[1]/2
self.__clock = pygame.time.Clock()
self.__buffer_surface = None
self.__resource_manager = resourcemanager.ResourceManager()
self.__animators = {}
def add_animator(self,name,animator):
self.__animators[name] = animator
def remove_animator(self,name):
del self.__animators[name]
def get_animator(self,name):
return self.__animators[name]
def set_scale_x(self,x):
self.__scaleX = x
def get_scale_x(self):
return self.__scaleX
def set_scale_y(self,y):
self.__scaleY = y
def get_scale_y(self):
return self.__scaleY
def set_scale(self,s):
self.__scaleX = s[0]
self.__scaleY = s[1]
def get_scale(self):
return (self.__scaleX,self.__scaleY)
def set_fg(self,fg):
self.__fg = fg
def get_fg(self):
return self.__fg
def set_bg(self,bg):
self.__bg = bg
def get_bg(self):
return self.__bg
def get_display(self):
return self.__display()
def set_screen_center_x(self,x):
self.__screen_centerX = x
def get_screen_center_x(self):
return self.__screen_centerX
def set_screen_center_y(self,y):
self.__screen_centerY = y
def get_screen_center_y(self):
return self.__screen_centerY
def set_screen_center(self,pos):
self.__screen_centerX = pos[0]
self.__screen_centerY = pos[1]
def get_screen_center(self):
return (self.__screen_centerX,self.__screen_centerY)
def get_buffer_surface(self):
return self.__buffer_surface
def get_resource_manager(self):
return self.__resource_manager
def update_all_animators(self):
ms = self.__clock.get_time()
for i in self.__animators:
self.__animators[i].update(ms)
def draw_all_animators(self):
for i in self.__animators:
self.draw_image(self.__animators[i].get_current_image(),self.__animators[i].get_position())
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.exit()
elif event.type == pygame.KEYDOWN:
self.__keys_down[event.key] = True
elif event.type == pygame.KEYUP:
self.__keys_down[event.key] = False
elif event.type == pygame.MOUSEBUTTONDOWN:
self.__mouse_down = True
elif event.type == pygame.MOUSEBUTTONUP:
self.__mouse_down = False
for handler in self.__event_handlers:
if event.type in handler.events and handler.condition(self,event):
handler.effect(self,event)
def draw_image(self,name,pos):
self.__buffer_surface.blit(self.__resource_manager.get_image(name),pos)
def is_key_down(self,key):
if not key in self.__keys_down:
return False
return self.__keys_down[key]
def is_mouse_button_down(self,button):
if not button in self.__mouse_down:
return False
return self.__mouse_down[button]
def run(self):
screen = pygame.display.set_mode(self.__size)
self.__display = screen
oldTime = time.time()
while True:
spf = 1.0 / self.__fps
self.handle_events()
self.update()
self.draw(screen)
self.__clock.tick(self.__fps)
def exit(self):
pygame.display.quit()
pygame.quit()
sys.exit()
def update(self):
self.update_all_animators()
for l in self.__listeners:
if l.condition(self):
l.effect(self)
def draw(self,display):
self.__buffer_surface = pygame.Surface(display.get_size())
display.fill(colors.red)
self.__buffer_surface.fill(self.__bg)
for od in self.__on_draw:
od(self,self.__buffer_surface)
self.draw_all_animators()
src_size = (self.__size[0]/self.__scaleX,self.__size[1]/self.__scaleY)
top = self.__screen_centerY - src_size[1] / 2
left = self.__screen_centerX - src_size[0] / 2
cropped = pygame.Surface(src_size)
cropped.blit(self.__buffer_surface,(0,0),(left,top,src_size[0],src_size[1]))
cropped = pygame.transform.scale(cropped,self.__size)
display.blit(cropped,(0,0))
pygame.display.update((0,0,self.__size[0],self.__size[1]))
def add_draw_listener(self,f):
self.__on_draw.append(f)
def add_listener(self,condition,effect):
self.__listeners.append(PyListener(condition,effect))
def add_on_update(self,effect):
self.__add_listener(lambda s:True,effect)
def add_event_listener(self,events,condition,effect):
self.__event_handlers.append(PyEventListener(events,condition,effect))
def set_fps(self,fps):
self.__fps = fps
def get_fps(self):
return self.__fps
| unlicense | 8,999,448,359,060,182,000 | 4,162,961,597,106,827,300 | 32.647727 | 103 | 0.575578 | false |
pinnamur/titanium_mobile | support/iphone/provisioner.py | 34 | 3613 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Install a provisioning profile
#
import os, sys, subprocess, re, time, poorjson, types
from xml.dom.minidom import parseString
import codecs
from OpenSSL import crypto
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def make_map(dict):
props = {}
curkey = None
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
curkey = str(getText(i.childNodes)).strip()
elif i.nodeName == 'dict':
props[curkey] = make_map(i)
curkey = None
elif i.nodeName == 'array':
s = i.getElementsByTagName('string')
if len(s):
txt = ''
for t in s:
txt+=getText(t.childNodes)
props[curkey]=txt
else:
props[curkey]=None
curkey = None
else:
props[curkey] = getText(i.childNodes)
curkey = None
return props
def find_dict_element(dict,name):
found = False
for i in dict.childNodes:
if i.nodeType == 1:
if i.nodeName == 'key':
if str(getText(i.childNodes)).strip() == name:
found = True
elif found:
return i
return None
def get_cert(dict):
certs_array = find_dict_element(dict, 'DeveloperCertificates')
if certs_array:
certs_array = certs_array.getElementsByTagName('data')
if not certs_array or not len(certs_array):
return None
cert_text = str(getText(certs_array[0].childNodes)).strip()
cert_text = "-----BEGIN CERTIFICATE-----\n" + cert_text + "\n-----END CERTIFICATE-----\n"
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_text)
return cert
def main(args):
if len(args)!=2:
print "%s <provisioning_file>" % os.path.basename(args[0])
sys.exit(1)
try:
xml = os.path.abspath(os.path.expanduser(dequote(args[1].decode("utf-8"))))
f = open(xml,'rb').read()
b = f.index('<?xml')
e = f.index('</plist>')
xml_content = f[b:e+8]
dom = parseString(xml_content)
dict = dom.getElementsByTagName('dict')[0]
props = make_map(dict)
profile_type = 'unknown'
if len(re.findall('ProvisionedDevices',xml_content)) > 0:
profile_type = 'development'
try:
cert = get_cert(dict)
if cert and re.search('Distribution:', cert.get_subject().commonName):
profile_type = 'adhoc'
except Exception, e:
sys.stderr.write('ERROR: %s\n' % str(e))
else:
profile_type = 'distribution'
name = props['Name']
name = name.decode('string_escape').decode('utf-8')
entitlements = props['Entitlements']
appid = entitlements['application-identifier']
appid_prefix = props['ApplicationIdentifierPrefix']
uuid = props['UUID']
bundle_id = appid.replace(appid_prefix+'.','')
# check to see if xcode is already running
output = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE).communicate()[0]
is_xcode = re.findall(r'Xcode.app',output)
xcode = len(is_xcode) > 0
# now we need to install the cert
# we essentially open xcode causing the cert to be installed
# automagically (but -g tells it to stay in the background)
cmd = "open -g \"%s\"" % xml
os.system(cmd)
# only kill Xcode if it wasn't already running
if xcode == False:
# give it a sec to install before killing it
time.sleep(1.5)
cmd = "killall Xcode"
os.system(cmd)
print poorjson.PoorJSON().dump({'type':profile_type,'appid':bundle_id, 'prefix':appid_prefix, 'name':name, 'uuid': uuid})
sys.exit(0)
except Exception, e:
print e
sys.exit(10)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | 6,335,425,962,775,939,000 | -7,901,828,593,215,580,000 | 24.624113 | 123 | 0.638804 | false |
gsehub/edx-platform | lms/djangoapps/lti_provider/tests/test_tasks.py | 12 | 4312 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import ddt
from django.test import TestCase
from mock import MagicMock, patch
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
import lti_provider.tasks as tasks
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
from student.tests.factories import UserFactory
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super(BaseOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
shard = 4
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id,
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
shard = 4
def setUp(self):
super(SendCompositeOutcomeTest, self).setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.course_grade = MagicMock()
self.course_grade_mock = self.setup_patch(
'lti_provider.tasks.CourseGradeFactory.read', self.course_grade
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.course_grade.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1
)
self.assertEqual(self.course_grade_mock.call_count, 0)
| agpl-3.0 | 5,132,770,016,000,830,000 | -5,600,013,795,663,929,000 | 30.705882 | 87 | 0.606679 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | 1 | 21363 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetRollingUpgradesOperations:
"""VirtualMachineScaleSetRollingUpgradesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _cancel_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
async def begin_cancel(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
async def _start_os_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._start_os_upgrade_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
async def begin_start_os_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
async def _start_extension_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._start_extension_upgrade_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_extension_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore
async def begin_start_extension_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to
the latest available extension version. Instances which are already running the latest
extension versions are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_extension_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_extension_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade'} # type: ignore
async def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get_latest.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest'} # type: ignore
| mit | 4,136,591,692,467,757,000 | -6,836,173,717,841,698,000 | 49.503546 | 234 | 0.654356 | false |
Affix/CouchPotatoServer | libs/pyutil/repeatable_random.py | 106 | 3622 | """
If you execute force_repeatability() then the following things are changed in the runtime:
1. random.random() and its sibling functions, and random.Random.seed() in the random module are seeded with a known seed so that they will return the same sequence on each run.
2. os.urandom() is replaced by a fake urandom that returns a pseudorandom sequence.
3. time.time() is replaced by a fake time that returns an incrementing number. (Original time.time is available as time.realtime.)
Which seed will be used?
If the environment variable REPEATABLE_RANDOMNESS_SEED is set, then it will use that. Else, it will use the current real time. In either case it logs the seed that it used.
Caveats:
1. If some code has acquired a random.Random object before force_repeatability() is executed, then that Random object will produce non-reproducible results. For example, the tempfile module in the Python Standard Library does this.
2. Likewise if some code called time.time() before force_repeatability() was called, then it will have gotten a real time stamp. For example, trial does this. (Then it later subtracts that real timestamp from a faketime timestamp to calculate elapsed time, resulting in a large negative elapsed time.)
3. Fake urandom has an added constraint for performance reasons -- you can't ask it for more than 64 bytes of randomness at a time. (I couldn't figure out how to generate large fake random strings efficiently.)
"""
import os, random, time
if not hasattr(time, "realtime"):
time.realtime = time.time
if not hasattr(os, "realurandom"):
os.realurandom = os.urandom
if not hasattr(random, "realseed"):
random.realseed = random.seed
tdelta = 0
seeded = False
def force_repeatability():
now = 1043659734.0
def faketime():
global tdelta
tdelta += 1
return now + tdelta
time.faketime = faketime
time.time = faketime
from idlib import i2b
def fakeurandom(n):
if n > 64:
raise ("Can't produce more than 64 bytes of pseudorandomness efficiently.")
elif n == 0:
return ''
else:
z = i2b(random.getrandbits(n*8))
x = z + "0" * (n-len(z))
assert len(x) == n
return x
os.fakeurandom = fakeurandom
os.urandom = fakeurandom
global seeded
if not seeded:
SEED = os.environ.get('REPEATABLE_RANDOMNESS_SEED', None)
if SEED is None:
# Generate a seed which is integral and fairly short (to ease cut-and-paste, writing it down, etc.).
t = time.realtime()
subsec = t % 1
t += (subsec * 1000000)
t %= 1000000
SEED = long(t)
import sys
sys.stdout.write("REPEATABLE_RANDOMNESS_SEED: %s\n" % SEED) ; sys.stdout.flush()
sys.stdout.write("In order to reproduce this run of the code, set the environment variable \"REPEATABLE_RANDOMNESS_SEED\" to %s before executing.\n" % SEED) ; sys.stdout.flush()
random.seed(SEED)
def seed_which_refuses(a):
sys.stdout.write("I refuse to reseed to %s. Go away!\n" % (a,)) ; sys.stdout.flush()
return
random.realseed = random.seed
random.seed = seed_which_refuses
seeded = True
import setutil
setutil.RandomSet.DETERMINISTIC = True
def restore_real_clock():
time.time = time.realtime
def restore_real_urandom():
os.urandom = os.realurandom
def restore_real_seed():
random.seed = random.realseed
def restore_non_repeatability():
restore_real_seed()
restore_real_urandom()
restore_real_clock()
| gpl-3.0 | 7,420,245,268,879,277,000 | 5,597,151,340,628,942,000 | 39.244444 | 304 | 0.674213 | false |
chouseknecht/ansible | lib/ansible/module_utils/network/junos/facts/vlans/vlans.py | 21 | 3564 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The junos vlans fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from copy import deepcopy
from ansible.module_utils._text import to_bytes
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.junos.argspec.vlans.vlans import VlansArgs
from ansible.module_utils.network.junos.utils.utils import get_resource_config
from ansible.module_utils.six import string_types
try:
from lxml import etree
HAS_LXML = True
except ImportError:
HAS_LXML = False
class VlansFacts(object):
""" The junos vlans fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = VlansArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for vlans
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not HAS_LXML:
self._module.fail_json(msg='lxml is not installed.')
if not data:
config_filter = """
<configuration>
<vlans>
</vlans>
</configuration>
"""
data = get_resource_config(connection, config_filter=config_filter)
if isinstance(data, string_types):
data = etree.fromstring(to_bytes(data,
errors='surrogate_then_replace'))
resources = data.xpath('configuration/vlans/vlan')
objs = []
for resource in resources:
if resource is not None:
obj = self.render_config(self.generated_spec, resource)
if obj:
objs.append(obj)
facts = {}
if objs:
facts['vlans'] = []
params = utils.validate_config(self.argument_spec,
{'config': objs})
for cfg in params['config']:
facts['vlans'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
config['name'] = utils.get_xml_conf_arg(conf, 'name')
config['vlan_id'] = utils.get_xml_conf_arg(conf, 'vlan-id')
config['description'] = utils.get_xml_conf_arg(conf, 'description')
return utils.remove_empties(config)
| gpl-3.0 | -8,937,951,808,587,705,000 | -8,602,511,696,660,687,000 | 33.601942 | 79 | 0.601571 | false |
caseydavenport/calico-containers | tests/st/calicoctl/test_autodetection.py | 2 | 4864 | # Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.plugins.attrib import attr
from tests.st.test_base import TestBase
from tests.st.utils.docker_host import DockerHost
from tests.st.utils.utils import ETCD_CA, ETCD_CERT, \
ETCD_KEY, ETCD_HOSTNAME_SSL, ETCD_SCHEME, get_ip
from tests.st.utils.exceptions import CommandExecError
if ETCD_SCHEME == "https":
ADDITIONAL_DOCKER_OPTIONS = "--cluster-store=etcd://%s:2379 " \
"--cluster-store-opt kv.cacertfile=%s " \
"--cluster-store-opt kv.certfile=%s " \
"--cluster-store-opt kv.keyfile=%s " % \
(ETCD_HOSTNAME_SSL, ETCD_CA, ETCD_CERT,
ETCD_KEY)
else:
ADDITIONAL_DOCKER_OPTIONS = "--cluster-store=etcd://%s:2379 " % \
get_ip()
class TestAutodetection(TestBase):
@attr('slow')
def test_autodetection(self):
"""
Test using different IP autodetection methods.
We run a multi-host test for this to test explicit selection of
"first-found" and also "interface" and "can-reach" detection methods.
"""
with DockerHost('host1',
additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
start_calico=False) as host1, \
DockerHost('host2',
additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
start_calico=False) as host2, \
DockerHost('host3',
additional_docker_options=ADDITIONAL_DOCKER_OPTIONS,
start_calico=False) as host3:
# Start the node on host1 using first-found auto-detection
# method.
host1.start_calico_node(
"--ip=autodetect --ip-autodetection-method=first-found")
# Attempt to start the node on host2 using can-reach auto-detection
# method using a bogus DNS name. This should fail.
try:
host2.start_calico_node(
"--ip=autodetect --ip-autodetection-method=can-reach=XXX.YYY.ZZZ.XXX")
except CommandExecError:
pass
else:
raise AssertionError("Command expected to fail but did not")
# Start the node on host2 using can-reach auto-detection method
# using the IP address of host1. This should succeed.
host2.start_calico_node(
"--ip=autodetect --ip-autodetection-method=can-reach=" + host1.ip)
# Attempt to start the node on host3 using interface auto-detection
# method using a bogus interface name. This should fail.
try:
host3.start_calico_node(
"--ip=autodetect --ip-autodetection-method=interface=BogusInterface")
except CommandExecError:
pass
else:
raise AssertionError("Command expected to fail but did not")
# Start the node on host2 using can-reach auto-detection method
# using the IP address of host1. This should succeed.
host3.start_calico_node(
"--ip=autodetect --ip-autodetection-method=interface=eth0")
# Create a network and a workload on each host.
network1 = host1.create_network("subnet1")
workload_host1 = host1.create_workload("workload1", network=network1)
workload_host2 = host2.create_workload("workload2", network=network1)
workload_host3 = host3.create_workload("workload3", network=network1)
# Allow network to converge
self.assert_true(workload_host1.check_can_ping(workload_host3.ip, retries=10))
# Check connectivity in both directions
self.assert_ip_connectivity(workload_list=[workload_host1,
workload_host2,
workload_host3],
ip_pass_list=[workload_host1.ip,
workload_host2.ip,
workload_host3.ip])
| apache-2.0 | 4,546,490,483,899,076,600 | 4,522,587,197,821,102,600 | 46.223301 | 90 | 0.576891 | false |
hyperized/ansible | lib/ansible/modules/network/f5/bigip_ucs_fetch.py | 38 | 18824 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_ucs_fetch
short_description: Fetches a UCS file from remote nodes
description:
- This module is used for fetching UCS files from remote machines and
storing them locally in a file tree, organized by hostname. Note that
this module is written to transfer UCS files that might not be present,
so a missing remote UCS won't be an error unless fail_on_missing is
set to 'yes'.
version_added: 2.5
options:
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
create_on_missing:
description:
- Creates the UCS based on the value of C(src) if the file does not already
exist on the remote system.
type: bool
default: yes
dest:
description:
- A directory to save the UCS file into.
type: path
required: True
encryption_password:
description:
- Password to use to encrypt the UCS file if desired.
type: str
fail_on_missing:
description:
- Make the module fail if the UCS file on the remote system is missing.
type: bool
default: no
force:
description:
- If C(no), the file will only be transferred if the destination does not
exist.
type: bool
default: yes
src:
description:
- The name of the UCS file to create on the remote server for downloading
type: str
notes:
- BIG-IP provides no way to get a checksum of the UCS files on the system
via any interface except, perhaps, logging in directly to the box (which
would not support appliance mode). Therefore, the best this module can
do is check for the existence of the file on disk; no check-summing.
- If you are using this module with either Ansible Tower or Ansible AWX, you
should be aware of how these Ansible products execute jobs in restricted
environments. More information can be found here
https://clouddocs.f5.com/products/orchestration/ansible/devel/usage/module-usage-with-tower.html
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Download a new UCS
bigip_ucs_fetch:
src: cs_backup.ucs
dest: /tmp/cs_backup.ucs
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
checksum:
description: The SHA1 checksum of the downloaded file
returned: success or changed
type: str
sample: 7b46bbe4f8ebfee64761b5313855618f64c64109
dest:
description: Location on the ansible host that the UCS was saved to
returned: success
type: str
sample: /path/to/file.txt
src:
description:
- Name of the UCS file on the remote BIG-IP to download. If not
specified, then this will be a randomly generated filename
returned: changed
type: str
sample: cs_backup.ucs
backup_file:
description: Name of backup file created
returned: changed and if backup=yes
type: str
sample: /path/to/file.txt.2015-02-12@22:09~
gid:
description: Group id of the UCS file, after execution
returned: success
type: int
sample: 100
group:
description: Group of the UCS file, after execution
returned: success
type: str
sample: httpd
owner:
description: Owner of the UCS file, after execution
returned: success
type: str
sample: httpd
uid:
description: Owner id of the UCS file, after execution
returned: success
type: int
sample: 100
md5sum:
description: The MD5 checksum of the downloaded file
returned: changed or success
type: str
sample: 96cacab4c259c4598727d7cf2ceb3b45
mode:
description: Permissions of the target UCS, after execution
returned: success
type: str
sample: 0644
size:
description: Size of the target UCS, after execution
returned: success
type: int
sample: 1220
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import download_file
from library.module_utils.network.f5.icontrol import tmos_version
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import download_file
from ansible.module_utils.network.f5.icontrol import tmos_version
class Parameters(AnsibleF5Parameters):
updatables = []
returnables = [
'dest',
'src',
'md5sum',
'checksum',
'backup_file']
api_attributes = []
api_map = {}
@property
def options(self):
result = []
if self.passphrase:
result.append(dict(
passphrase=self.want.passphrase
))
return result
@property
def src(self):
if self._values['src'] is not None:
return self._values['src']
result = next(tempfile._get_candidate_names()) + '.ucs'
self._values['src'] = result
return result
@property
def fulldest(self):
result = None
if os.path.isdir(self.dest):
result = os.path.join(self.dest, self.src)
else:
if os.path.exists(os.path.dirname(self.dest)):
result = self.dest
else:
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(result))
except OSError as e:
if "permission denied" in str(e).lower():
raise F5ModuleError(
"Destination directory {0} is not accessible".format(os.path.dirname(result))
)
raise F5ModuleError(
"Destination directory {0} does not exist".format(os.path.dirname(result))
)
if not os.access(os.path.dirname(result), os.W_OK):
raise F5ModuleError(
"Destination {0} not writable".format(os.path.dirname(result))
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
def exec_module(self):
if self.is_version_v1():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def is_version_v1(self):
"""Checks to see if the TMOS version is less than 12.1.0
Versions prior to 12.1.0 have a bug which prevents the REST
API from properly listing any UCS files when you query the
/mgmt/tm/sys/ucs endpoint. Therefore you need to do everything
through tmsh over REST.
:return: bool
"""
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('12.1.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = Parameters(params=self.module.params)
self.changes = UsableChanges()
def exec_module(self):
result = dict()
self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=True))
return result
def present(self):
if self.exists():
self.update()
else:
self.create()
def update(self):
if os.path.exists(self.want.fulldest):
if not self.want.force:
raise F5ModuleError(
"File '{0}' already exists".format(self.want.fulldest)
)
self.execute()
def _get_backup_file(self):
return self.module.backup_local(self.want.fulldest)
def execute(self):
try:
if self.want.backup:
if os.path.exists(self.want.fulldest):
backup_file = self._get_backup_file()
self.changes.update({'backup_file': backup_file})
self.download()
except IOError:
raise F5ModuleError(
"Failed to copy: {0} to {1}".format(self.want.src, self.want.fulldest)
)
self._set_checksum()
self._set_md5sum()
file_args = self.module.load_file_common_arguments(self.module.params)
return self.module.set_fs_attributes_if_different(file_args, True)
def _set_checksum(self):
try:
result = self.module.sha1(self.want.fulldest)
self.want.update({'checksum': result})
except ValueError:
pass
def _set_md5sum(self):
try:
result = self.module.md5(self.want.fulldest)
self.want.update({'md5sum': result})
except ValueError:
pass
def create(self):
if self.want.fail_on_missing:
raise F5ModuleError(
"UCS '{0}' was not found".format(self.want.src)
)
if not self.want.create_on_missing:
raise F5ModuleError(
"UCS '{0}' was not found".format(self.want.src)
)
if self.module.check_mode:
return True
if self.want.create_on_missing:
self.create_on_device()
self.execute()
return True
def create_on_device(self):
if self.want.passphrase:
params = dict(
command='save',
name=self.want.src,
options=[{'passphrase': self.want.encryption_password}]
)
else:
params = dict(
command='save',
name=self.want.src,
)
uri = "https://{0}:{1}/mgmt/tm/sys/ucs".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def download(self):
self.download_from_device(self.want.dest)
if os.path.exists(self.want.dest):
return True
raise F5ModuleError(
"Failed to download the remote file"
)
class V1Manager(BaseManager):
def __init__(self, *args, **kwargs):
super(V1Manager, self).__init__(**kwargs)
self.remote_dir = '/var/config/rest/madm'
def read_current(self):
result = None
output = self.read_current_from_device()
if 'commandResult' in output:
result = self._read_ucs_files_from_output(output['commandResult'])
return result
def read_current_from_device(self):
params = dict(
command='run',
utilCmdArgs='-c "tmsh list sys ucs"'
)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def _read_ucs_files_from_output(self, output):
search = re.compile(r'filename\s+(.*)').search
lines = output.split("\n")
result = [m.group(1) for m in map(search, lines) if m]
return result
def exists(self):
collection = self.read_current()
base = os.path.basename(self.want.src)
if any(base == os.path.basename(x) for x in collection):
return True
return False
def download_from_device(self, dest):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/madm/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.filename
)
try:
download_file(self.client, url, dest)
except F5ModuleError:
raise F5ModuleError(
"Failed to download the file."
)
if os.path.exists(self.want.dest):
return True
return False
def _move_to_download(self):
move_path = '/var/local/ucs/{0} {1}/{0}'.format(
self.want.filename, self.remote_dir
)
params = dict(
command='run',
utilCmdArgs=move_path
)
uri = "https://{0}:{1}/mgmt/tm/util/unix-mv/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'cannot stat' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
class V2Manager(BaseManager):
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/ucs".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_current(self):
collection = self.read_current_from_device()
if 'items' not in collection:
return []
resources = collection['items']
result = [x['apiRawValues']['filename'] for x in resources]
return result
def exists(self):
collection = self.read_current()
base = os.path.basename(self.want.src)
if any(base == os.path.basename(x) for x in collection):
return True
return False
def download_from_device(self, dest):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/ucs-downloads/{2}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.src
)
try:
download_file(self.client, url, dest)
except F5ModuleError:
raise F5ModuleError(
"Failed to download the file."
)
if os.path.exists(self.want.dest):
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
backup=dict(
default='no',
type='bool'
),
create_on_missing=dict(
default='yes',
type='bool'
),
encryption_password=dict(no_log=True),
dest=dict(
required=True,
type='path'
),
force=dict(
default='yes',
type='bool'
),
fail_on_missing=dict(
default='no',
type='bool'
),
src=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.add_file_common_args = True
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
add_file_common_args=spec.add_file_common_args
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -1,572,810,725,976,748,500 | 3,150,790,362,532,721,000 | 29.75817 | 105 | 0.590629 | false |
muminoff/savollar | savollar/pipelines.py | 1 | 2093 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don"t forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
from elasticsearch import Elasticsearch
from uuid import uuid1
from savollar.models import SavolModel
class ElasticSearchIndexPipeline(object):
def process_item(self, item, spider):
es = Elasticsearch([
{"host": settings["ELASTICSEARCH_HOST"]},
])
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
es.index(
index=settings["ELASTICSEARCH_INDEX"],
doc_type="info",
id=str(uuid1()),
body=dict(item)
)
log.msg("Item indexed to ElasticSearch database %s:%s" %
(settings["ELASTICSEARCH_HOST"], settings["ELASTICSEARCH_PORT"]),
level=log.DEBUG, spider=spider)
return item
class CassandraExportPipleline(object):
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
raise DropItem("Missing %s of item from %s" %(data, item["link"]))
if valid:
model = SavolModel()
model.title = item["title"]
model.question = item["question"]
model.answer = item["answer"]
model.author = item["author"]
model.permalink = item["permalink"]
model.year = int(item["year"])
model.month = int(item["month"])
model.date = int(item["date"])
model.tags = item["title"].split()
model.save()
log.msg("Item exported to Cassandra database %s/%s" %
(settings["CASSANDRA_HOST"], settings["CASSANDRA_KEYSPACE"]),
level=log.DEBUG, spider=spider)
return item
| apache-2.0 | -645,415,126,622,304,100 | -4,105,855,176,324,265,000 | 33.883333 | 85 | 0.565695 | false |
thnee/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_custom_attributes.py | 31 | 8326 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright, (c) 2018, Ansible Project
# Copyright, (c) 2018, Abhijeet Kasurde <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_custom_attributes
short_description: Manage custom attributes from VMware for the given virtual machine
description:
- This module can be used to add, remove and update custom attributes for the given virtual machine.
version_added: 2.7
author:
- Jimmy Conner (@cigamit)
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the virtual machine to work with.
- This is required parameter, if C(uuid) or C(moid) is not supplied.
type: str
state:
description:
- The action to take.
- If set to C(present), then custom attribute is added or updated.
- If set to C(absent), then custom attribute is removed.
default: 'present'
choices: ['present', 'absent']
type: str
uuid:
description:
- UUID of the virtual machine to manage if known. This is VMware's unique identifier.
- This is required parameter, if C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
version_added: '2.9'
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
folder:
description:
- Absolute path to find an existing guest.
- This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found.
type: str
datacenter:
description:
- Datacenter name where the virtual machine is located in.
required: True
type: str
attributes:
description:
- A list of name and value of custom attributes that needs to be manage.
- Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
default: []
type: list
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add virtual machine custom attributes
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: present
attributes:
- name: MyAttribute
value: MyValue
delegate_to: localhost
register: attributes
- name: Add multiple virtual machine custom attributes
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: present
attributes:
- name: MyAttribute
value: MyValue
- name: MyAttribute2
value: MyValue2
delegate_to: localhost
register: attributes
- name: Remove virtual machine Attribute
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: absent
attributes:
- name: MyAttribute
delegate_to: localhost
register: attributes
- name: Remove virtual machine Attribute using Virtual Machine MoID
vmware_guest_custom_attributes:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
moid: vm-42
state: absent
attributes:
- name: MyAttribute
delegate_to: localhost
register: attributes
'''
RETURN = """
custom_attributes:
description: metadata about the virtual machine attributes
returned: always
type: dict
sample: {
"mycustom": "my_custom_value",
"mycustom_2": "my_custom_value_2",
"sample_1": "sample_1_value",
"sample_2": "sample_2_value",
"sample_3": "sample_3_value"
}
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VmAttributeManager(PyVmomi):
def __init__(self, module):
super(VmAttributeManager, self).__init__(module)
def set_custom_field(self, vm, user_fields):
result_fields = dict()
change_list = list()
changed = False
for field in user_fields:
field_key = self.check_exists(field['name'])
found = False
field_value = field.get('value', '')
for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]:
if k == field['name']:
found = True
if v != field_value:
if not self.module.check_mode:
self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
result_fields[k] = field_value
change_list.append(True)
if not found and field_value != "":
if not field_key and not self.module.check_mode:
field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine)
change_list.append(True)
if not self.module.check_mode:
self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
result_fields[field['name']] = field_value
if any(change_list):
changed = True
return {'changed': changed, 'failed': False, 'custom_attributes': result_fields}
def check_exists(self, field):
for x in self.custom_field_mgr:
if x.name == field:
return x
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str'),
name=dict(type='str'),
folder=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
state=dict(type='str', default='present',
choices=['absent', 'present']),
attributes=dict(
type='list',
default=[],
options=dict(
name=dict(type='str', required=True),
value=dict(type='str'),
)
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid', 'moid']
],
)
if module.params.get('folder'):
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = VmAttributeManager(module)
results = {'changed': False, 'failed': False, 'instance': dict()}
# Check if the virtual machine exists before continuing
vm = pyv.get_vm()
if vm:
# virtual machine already exists
if module.params['state'] == "present":
results = pyv.set_custom_field(vm, module.params['attributes'])
elif module.params['state'] == "absent":
results = pyv.set_custom_field(vm, module.params['attributes'])
module.exit_json(**results)
else:
# virtual machine does not exists
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
module.fail_json(msg="Unable to manage custom attributes for non-existing"
" virtual machine %s" % vm_id)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,195,509,160,894,862,000 | -5,624,405,312,372,265,000 | 31.146718 | 130 | 0.619625 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.