code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents Tests for Telegram
InlineQueryResultVideo"""
import sys
if sys.version_info[0:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path.append('.')
import telegram
from tests.base import BaseTest
class InlineQueryResultVideoTest(BaseTest, unittest.TestCase):
"""This object represents Tests for Telegram InlineQueryResultVideo."""
def setUp(self):
self.id = 'id'
self.type = 'video'
self.video_url = 'video url'
self.mime_type = 'mime type'
self.video_width = 10
self.video_height = 15
self.video_duration = 15
self.thumb_url = 'thumb url'
self.title = 'title'
self.caption = 'caption'
self.description = 'description'
self.input_message_content = telegram.InputTextMessageContent('input_message_content')
self.reply_markup = telegram.InlineKeyboardMarkup([[
telegram.InlineKeyboardButton('reply_markup')
]])
self.json_dict = {
'type': self.type,
'id': self.id,
'video_url': self.video_url,
'mime_type': self.mime_type,
'video_width': self.video_width,
'video_height': self.video_height,
'video_duration': self.video_duration,
'thumb_url': self.thumb_url,
'title': self.title,
'caption': self.caption,
'description': self.description,
'input_message_content': self.input_message_content.to_dict(),
'reply_markup': self.reply_markup.to_dict(),
}
def test_video_de_json(self):
video = telegram.InlineQueryResultVideo.de_json(self.json_dict)
self.assertEqual(video.type, self.type)
self.assertEqual(video.id, self.id)
self.assertEqual(video.video_url, self.video_url)
self.assertEqual(video.mime_type, self.mime_type)
self.assertEqual(video.video_width, self.video_width)
self.assertEqual(video.video_height, self.video_height)
self.assertEqual(video.video_duration, self.video_duration)
self.assertEqual(video.thumb_url, self.thumb_url)
self.assertEqual(video.title, self.title)
self.assertEqual(video.description, self.description)
self.assertEqual(video.caption, self.caption)
self.assertDictEqual(video.input_message_content.to_dict(),
self.input_message_content.to_dict())
self.assertDictEqual(video.reply_markup.to_dict(), self.reply_markup.to_dict())
def test_video_to_json(self):
video = telegram.InlineQueryResultVideo.de_json(self.json_dict)
self.assertTrue(self.is_json(video.to_json()))
def test_video_to_dict(self):
video = telegram.InlineQueryResultVideo.de_json(self.json_dict).to_dict()
self.assertTrue(self.is_dict(video))
self.assertDictEqual(self.json_dict, video)
if __name__ == '__main__':
unittest.main()
| franciscod/python-telegram-bot | tests/test_inlinequeryresultvideo.py | Python | gpl-2.0 | 3,826 |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import lineStyles
Light_cnames={'mistyrose':'#FFE4E1','navajowhite':'#FFDEAD','seashell':'#FFF5EE','papayawhip':'#FFEFD5','blanchedalmond':'#FFEBCD','white':'#FFFFFF','mintcream':'#F5FFFA','antiquewhite':'#FAEBD7','moccasin':'#FFE4B5','ivory':'#FFFFF0','lightgoldenrodyellow':'#FAFAD2','lightblue':'#ADD8E6','floralwhite':'#FFFAF0','ghostwhite':'#F8F8FF','honeydew':'#F0FFF0','linen':'#FAF0E6','snow':'#FFFAFA','lightcyan':'#E0FFFF','cornsilk':'#FFF8DC','bisque':'#FFE4C4','aliceblue':'#F0F8FF','gainsboro':'#DCDCDC','lemonchiffon':'#FFFACD','lightyellow':'#FFFFE0','lavenderblush':'#FFF0F5','whitesmoke':'#F5F5F5','beige':'#F5F5DC','azure':'#F0FFFF','oldlace':'#FDF5E6'}
def plot10seperate():
mons=["201603","201604","201605","201606","201607","201608","201609","201610","201611","201612","201701","201702","201703","201704","201705","201706"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
rootpath="F:/workspace/git/TranWeatherProject/data/mesonet_data/"
for mon in mons:
for day in days:
print mon+day
fileName=rootpath+mon+day+".txt"
day_data=[]
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[(i*5.0/60.0) for i in range(1,len(day_data[0][2]),1)]
fig=plt.figure(1)
fig.add_subplot(10,1,1)
plt.plot(X,day_data[0][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[0][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,2)
plt.plot(X,day_data[1][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[1][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,3)
plt.plot(X,day_data[2][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[2][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,4)
plt.plot(X,day_data[3][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[3][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,5)
plt.plot(X,day_data[4][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[4][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,6)
plt.plot(X,day_data[5][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[5][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,7)
plt.plot(X,day_data[6][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[6][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,8)
plt.plot(X,day_data[7][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[7][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,9)
plt.plot(X,day_data[8][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period From 00:00am ~23:59')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[8][0]+" Station Date: "+mon+day +"Temperature")
fig.add_subplot(10,1,10)
plt.plot(X,day_data[9][2],'b*-',linewidth='2.0', markersize=5,label='Temperature')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-20.0,60.0])
plt.xlabel('time Period')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.title(day_data[9][0]+" Station Date: "+mon+day +"Temperature")
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.show()
fig.savefig('F:/workspace/git/TranWeatherProject/outputs/mesonetPlots/'+str(mon+day)+'.png')
plt.close()
import os
def plotSignle():
mons=["201603","201604","201605","201606","201607","201608","201609"]
#mons=["201604"]
#mons=["201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
#days=[""]
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_type="precip"
rootpath="F:/workspace/git/Graph-MP/data/mesonet_data/"+var_type+"/"
for mon in mons:
for day in days:
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
continue
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='1.0', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
if var_type=="wind":
plt.ylim([-5.0,70.0])
plt.ylabel('Avg. Wind Speed(mph)')
plt.title(mon+day +"Every 5min Avg. Wind")
elif type=="temp":
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +"Temperature")
else:
plt.ylim([-1.0,2.0])
plt.ylabel('Precipitation Est (Inch)')
plt.title(mon+day +"Precipitation")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
print len(X)
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/'+var_type+'_plots/'+str(mon+day)+'.png')
plt.close()
def expAvg(fileName):
expAvgs=[]
expMin=[]
expMax=[]
with open(fileName,"r") as oF:
for line in oF.readlines():
expAvgs.append(float(line.strip().split()[0]))
expMin.append(float(line.strip().split()[1]))
expMax.append(float(line.strip().split()[3]))
return expAvgs,expMin,expMax
def plotCaseDays():
dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
mons=["201603","201604","201605","201606","201607","201608","201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_type="temp"
rootpath="F:/workspace/git/TranWeatherProject/data/mesonet_data/"+var_type+"/"
#expRoot="F:/workspace/git/TranWeatherProject/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
# if date not in dates:
# print "Not ",date
# continue
#expAvgs=expAvg(expRoot+mon+day+".txt")
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='2.0', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='2.0', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
if var_type=="wind":
#plt.ylim([-5.0,70.0])
plt.ylabel('Avg. Wind Speed(mph)')
plt.title(mon+day +"Every 5min Avg. Wind")
else:
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +"Temperature")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,every 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/'+var_type+'_CaseStudy/'+str(mon+day)+'.png', dpi=300)
plt.close()
def plotSingleDays():
fileName="F:/workspace/git/Graph-MP/data/mesonet_data/test_4.txt"
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
day_data=[]
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:288])
day_data.append((sta_name,'201603001',data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='1.0', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='1.0', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
# if var_type=="wind":
# #plt.ylim([-5.0,70.0])
# plt.ylabel('Avg. Wind Speed(mph)')
# plt.title(mon+day +"Every 5min Avg. Wind")
# else:
# plt.ylim([-10.0,100.0])
# plt.ylabel('Temperature(F)')
# plt.title(mon+day +"Temperature")
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title('201603001 ' +"Temperature")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/data/mesonet_data/201603001_4.png', dpi=300)
plt.close()
import time
def loadTop(fileName):
results=[]
with open(fileName,"r") as rF:
for i,line in enumerate(rF.readlines()):
terms=line.strip().split(" ")
results.append((int(terms[0]),map(int,terms[1].split(",")),terms[2],map(int,terms[3].split(","))))
if i>19 :
break
return results
def plotCaseDaysSingleStation():
#dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
vars=['i0','i1','i2','i3','i4','i5','i6','i7','i8','i9']
topResults=loadTop("F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/CP/2/20multi_TopK_result-CP_baseMeanDiff_20_s_2_wMax_18_filter_TIncld_0.7_Top.txt")
for result in topResults:
dates=[]
top=result[0]+1
vals=result[1]
dates.append(result[2])
for i,var in enumerate(vars):
if i in vals:
exec "%s=%s"%(vars[i], 1)
else:
exec "%s=%s"%(vars[i], 0)
print i0,i1,i2,i3,i4,i5,i6,i7,i8,i9
# i0=0
# i1=0
# i2=0
# i3=1
# i4=1
# i5=1
# i6=1
# i7=0
# i8=0
# i9=0
mons=["201603","201604","201605","201606","201607","201608","201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_type="wind"
rootpath="F:/workspace/git/Graph-MP/data/mesonet_data/"+var_type+"/"
rootpath2="F:/workspace/git/Graph-MP/data/mesonet_data/temp/"
rootpath3="F:/workspace/git/Graph-MP/data/mesonet_data/precip/"
#expRoot="F:/workspace/git/TranWeatherProject/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
if date not in dates:
#print "Not ",date
continue
#expAvgs=expAvg(expRoot+mon+day+".txt")
fileName=rootpath+mon+day+".txt"
fileName2=rootpath2+mon+day+".txt"
fileName3=rootpath3+mon+day+".txt"
print fileName
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
if not os.path.exists(fileName2):
print "File Not Found",fileName2
continue
if not os.path.exists(fileName3):
print "File Not Found",fileName2
continue
day_data=[]
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
day_data2=[]
with open(fileName2,"r") as df2:
for line in df2.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data2.append((sta_name,mon+day,data))
day_data3=[]
with open(fileName3,"r") as df3:
for line in df3.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data3.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
print day_data[i3][2]
fig=plt.figure(1)
if i0!=0:
plt.plot(X,day_data[0][2],'b-',linewidth='0.5', markersize=5,label='Wind '+sta_names[int(day_data[0][0])]+day_data[0][0])
if i1!=0:
plt.plot(X,day_data[1][2],'r-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
if i2!=0:
plt.plot(X,day_data[2][2],'k-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
if i3!=0:
plt.plot(X,day_data[3][2],'g-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
if i4!=0:
plt.plot(X,day_data[4][2],'y-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
if i5!=0:
plt.plot(X,day_data[5][2],'c-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
if i6!=0:
plt.plot(X,day_data[6][2],'m-',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
if i7!=0:
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
if i8!=0:
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
if i9!=0:
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='0.5', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.axvline(x=result[3][0], ymin=-1.0, ymax=50.0,color='k',linestyle='--')
plt.axvline(x=result[3][1], ymin=-1.0, ymax=50.0,color='k',linestyle='--')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylim([-1.0,50.0])
plt.title("Top"+str(result[0]+1)+" "+mon+day +"Wind")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
plt.yticks(np.arange(-1, 50, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
plt.grid()
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
# fig.subplots_adjust(bottom = 2)
# fig.subplots_adjust(top = 2)
# fig.subplots_adjust(right = 2)
# fig.subplots_adjust(left = 0)
#plt.plot(X,day_data2[i][2],'r-',linewidth='1.0', markersize=5,label='Temp '+sta_names[int(day_data2[i][0])]+day_data2[i][0])
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/mvPlots/'+str(top)+'_wind_'+str(mon+day)+'.png', dpi=300)
fig.clf()
fig=plt.figure(2)
if i0!=0:
plt.plot(X,day_data2[0][2],'b-',linewidth='0.5', markersize=5)
if i1!=0:
plt.plot(X,day_data2[1][2],'r-',linewidth='0.5', markersize=5)
if i2!=0:
plt.plot(X,day_data2[2][2],'k-',linewidth='0.5', markersize=5)
if i3!=0:
plt.plot(X,day_data2[3][2],'g-',linewidth='0.5', markersize=5)
if i4!=0:
plt.plot(X,day_data2[4][2],'y-',linewidth='0.5', markersize=5)
if i5!=0:
plt.plot(X,day_data2[5][2],'c-',linewidth='0.5', markersize=5)
if i6!=0:
plt.plot(X,day_data2[6][2],'m-',linewidth='0.5', markersize=5)
if i7!=0:
plt.plot(X,day_data2[7][2],color ='#B47CC7',linewidth='0.5', markersize=5)
if i8!=0:
plt.plot(X,day_data2[8][2],color='#FBC15E',linewidth='0.5', markersize=5)
if i9!=0:
plt.plot(X,day_data2[9][2],color='#e5ee38',linewidth='0.5', markersize=5)
# if var_type=="wind":
# plt.ylim([-1.0,50.0])
# plt.ylabel('Avg. Wind Speed(mph)')
# plt.title(mon+day +"Every 5min Avg. Wind")
# else:
# plt.ylim([-10.0,100.0])
# plt.ylabel('Temperature(F)')
# plt.title(mon+day +"Temperature")
plt.axvline(x=result[3][0], ymin=-10.0, ymax=100.0,color='k',linestyle='--')
plt.axvline(x=result[3][1], ymin=-10.0, ymax=100.0,color='k',linestyle='--')
plt.ylim([-10.0,100.0])
plt.title("Top"+str(result[0]+1)+" "+mon+day +"Temperature ")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
plt.grid()
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
#
# fig.subplots_adjust(bottom = 0)
# fig.subplots_adjust(top = 1)
# fig.subplots_adjust(right = 1)
# fig.subplots_adjust(left = 0)
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/mvPlots/'+str(top)+'_temp_'+str(mon+day)+'.png', dpi=300)
fig.clf()
fig=plt.figure(3)
if i0!=0:
plt.plot(X,day_data3[0][2],'b-',linewidth='0.5', markersize=5)
if i1!=0:
plt.plot(X,day_data3[1][2],'r-',linewidth='0.5', markersize=5)
if i2!=0:
plt.plot(X,day_data3[2][2],'k-',linewidth='0.5', markersize=5)
if i3!=0:
plt.plot(X,day_data3[3][2],'g-',linewidth='0.5', markersize=5)
if i4!=0:
plt.plot(X,day_data3[4][2],'y-',linewidth='0.5', markersize=5)
if i5!=0:
plt.plot(X,day_data3[5][2],'c-',linewidth='0.5', markersize=5)
if i6!=0:
plt.plot(X,day_data3[6][2],'m-',linewidth='0.5', markersize=5)
if i7!=0:
plt.plot(X,day_data3[7][2],color ='#B47CC7',linewidth='0.5', markersize=5)
if i8!=0:
plt.plot(X,day_data3[8][2],color='#FBC15E',linewidth='0.5', markersize=5)
if i9!=0:
plt.plot(X,day_data3[9][2],color='#e5ee38',linewidth='0.5', markersize=5)
# if var_type=="wind":
# plt.ylim([-1.0,50.0])
# plt.ylabel('Avg. Wind Speed(mph)')
# plt.title(mon+day +"Every 5min Avg. Wind")
# else:
# plt.ylim([-10.0,100.0])
# plt.ylabel('Temperature(F)')
# plt.title(mon+day +"Temperature")
plt.axvline(x=result[3][0], ymin=-0.2, ymax=2.0,color='k',linestyle='--')
plt.axvline(x=result[3][1], ymin=-0.2, ymax=2.0,color='k',linestyle='--')
plt.ylim([-0.2,2.0])
plt.title("Top"+str(result[0]+1)+" "+mon+day +"Precipitation ")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(-0.2, 2.0, 0.5),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,each 5min')
plt.grid()
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=8)
# fig.subplots_adjust(bottom = 0)
# fig.subplots_adjust(top = 1)
# fig.subplots_adjust(right = 1)
# fig.subplots_adjust(left = 0)
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/mesonetPlots/multi_CaseStudy/mvPlots/'+str(top)+'_precip_'+str(mon+day)+'.png', dpi=300)
fig.clf()
plt.close()
def plotAllDays():
root="F:/workspace/git/WeatherTransportationProject/"
#dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
dates=[]
#"201603","201604","201605","201606","201607","201608"
mons=["201609","201610","201611","201612","201701","201702","201703","201704","201705","201706"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
sta_names={0:"BATA",1:"SBRI",2:"WATE",3:"JORD",4:"CSQR",5:"WEST",6:"COLD",7:"SPRA",8:"COBL",9:"STEP"}
var_types=["temp","temp9","press","wind","windDir","windMax","rh","rad"]
#var_types=["wind"]
for var_type in var_types:
rootpath=root+"data/mesonet_data/"+var_type+"/"
#expRoot="F:/workspace/git/Graph-MP/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
# if date in dates:
# print "Not ",date
# continue
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
with open(fileName,"r") as df:
for line in df.readlines():
terms=line.strip().split()
sta_name=terms[0]
data=map(float,terms[1:])
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print sta_names[int(day_data[0][0])]
fig=plt.figure(1)
plt.plot(X,day_data[0][2],'b-',linewidth='1.5', markersize=5,label=sta_names[int(day_data[0][0])]+day_data[0][0])
plt.plot(X,day_data[1][2],'r-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[1][0])])+day_data[1][0])
plt.plot(X,day_data[2][2],'k-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[2][0])])+day_data[2][0])
plt.plot(X,day_data[3][2],'g-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[3][0])])+day_data[3][0])
plt.plot(X,day_data[4][2],'y-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[4][0])])+day_data[4][0])
plt.plot(X,day_data[5][2],'c-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[5][0])])+day_data[5][0])
plt.plot(X,day_data[6][2],'m-',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[6][0])])+day_data[6][0])
plt.plot(X,day_data[7][2],color ='#B47CC7',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[7][0])])+day_data[7][0])
plt.plot(X,day_data[8][2],color='#FBC15E',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[8][0])])+day_data[8][0])
plt.plot(X,day_data[9][2],color='#e5ee38',linewidth='1.5', markersize=5,label=str(sta_names[int(day_data[9][0])])+day_data[9][0])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=1, mode="expand", borderaxespad=0.)
plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
if var_type=="wind":
plt.ylim([-5.0,70.0])
plt.ylabel('Average Wind Speed(mph)')
plt.title(mon+day +" Every 5min Average Wind Speed")
elif var_type=="windMax":
plt.ylim([-5.0,70.0])
plt.ylabel('Max Wind Speed(mph)')
plt.title(mon+day +"Every 5min Max Wind")
elif var_type=="windDir":
#plt.ylim([-5.0,70.0])
plt.ylabel('Max Wind Speed(mph)')
plt.title(mon+day +" Wind Direction Degree")
elif var_type=="temp":
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +" 2m Temperature")
elif var_type=="temp9":
plt.ylim([-10.0,100.0])
plt.ylabel('Temperature(F)')
plt.title(mon+day +" 9m Temperature")
elif var_type=="press":
#plt.ylim([-10.0,100.0])
plt.ylabel('Pressure(mbar)')
plt.title(mon+day +" Pressure")
elif var_type=="rad":
#plt.ylim([-10.0,100.0])
plt.ylabel('Solar Radiation(W/m^2)')
plt.title(mon+day +" Solar Radiation")
elif var_type=="rh":
plt.ylim([0.0,100.0])
plt.ylabel('Relative Humidity %')
plt.title(mon+day +" rh")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time from 00:00 ~23:59,every 5min')
#plt.xlim([0.2,0.0])
plt.legend(loc='best',fontsize=10)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig(root+'/outputs/mesonetPlots/'+var_type+'_plots/'+str(mon+day)+'.png')
plt.close()
def plotTravTimeAllDays():
import matplotlib
#dates=["20160301","20160302","20160308","20160309","20160312","20160313","20160324","20160325","20160328","20160405","20160412","20160419","20160421","20160514","20160529","20160621","20160628","20160813","20160911","20160922"]
dates=[]
mons=["201603","201604","201605","201606","201607","201608","201609"]
days=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']
var_types=["TravelTimeToWest","TravelTimeToWest"]
#var_types=["wind"]
colors=[]
for name, hex in matplotlib.colors.cnames.iteritems():
if name not in Light_cnames.keys():
colors.append(hex)
for var_type in var_types:
rootpath="F:/workspace/git/Graph-MP/data/trafficData/I90_TravelTime/"+var_type+"/"
#expRoot="F:/workspace/git/Graph-MP/data/mesonet_data/mesonetExpData/statExpData/"
for mon in mons:
for day in days:
date=str(mon+day)
# if date in dates:
# print "Not ",date
# continue
fileName=rootpath+mon+day+".txt"
print fileName
day_data=[]
if not os.path.exists(fileName):
print "File Not Found",fileName
continue
with open(fileName,"r") as df:
for idx,line in enumerate(df.readlines()):
terms=line.strip().split()
sta_name="TMC "+str(idx)
data=map(float,terms)
day_data.append((sta_name,mon+day,data))
X=[i for i in range(0,len(day_data[0][2]))]
label=[(str(i)+"\n"+str(i*5/60)+"h") for i in range(0,len(day_data[0][2])+1,12)]
labelY=[str(i) for i in range(0,100+1,5)]
print len(day_data)
fig=plt.figure(1)
for i in range(len(day_data)):
plt.plot(X,day_data[i][2],colors[i],linewidth='0.5', markersize=5,label=day_data[i][0])
# art = []
# lgd = plt.legend(loc=3, bbox_to_anchor=(0, -0.5), ncol=5)
# art.append(lgd)
#plt.plot([0.2,0.1,0.0],[0.5,0.5,0.5])
plt.ylabel('Traveling Time (sec)')
if var_type=="TravelTimeToWest":
plt.title(mon+day +" Travel Time I90 East To West")
else:
plt.title(mon+day +" Travel Time I90 West To East")
#plt.xticks(np.arange(min(X), max(X)+2, 12.0))
plt.xticks(np.arange(min(X), max(X)+2, 12.0),label)
#plt.yticks(np.arange(0, 100, 5.0),labelY)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.xlabel('Time: 00:00 ~ 23:59,every 5min')
#plt.xlim([0.2,0.0])
plt.ylim([0.0,3600.0])
# plt.legend(loc='best',fontsize=10)
plt.grid()
#plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
fig.savefig('F:/workspace/git/Graph-MP/outputs/trafficData/'+var_type+'_plots/'+str(mon+day)+'.png')
plt.close()
plotAllDays()
| newera912/WeatherTransportationProject | target/classes/edu/albany/cs/transWeatherPy/plotMesonetOrgData.py | Python | gpl-2.0 | 43,328 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# If this page isn't working, try executing `chmod +x app.py` in terminal.
# enable debugging
import cgitb, cgi; cgitb.enable()
from classes import Factory
fieldStorage = cgi.FieldStorage()
factory = Factory.Factory()
webApp = factory.makeWebApp(fieldStorage)
def outputHeaders():
print "Content-Type: text/html"
print # signals end of headers
outputHeaders()
print webApp.getOutput()
| OuachitaHillsMinistries/OHCFS | htbin/app.py | Python | gpl-2.0 | 447 |
#
# Place a file called test.txt in the same directory as you place this program...
#
f = open('test.txt')
s = f.read()
print(s) | erikdejonge/python_for_kids | chapter09/opening_a_file.py | Python | gpl-2.0 | 130 |
# GNU Enterprise Common Library - Schema support for PostgreSQL
#
# Copyright 2000-2007 Free Software Foundation
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: Behavior.py 9222 2007-01-08 13:02:49Z johannes $
"""
Schema support plugin for PostgreSQL backends.
"""
__all__ = ['Behavior']
import os
from gnue.common.apps import errors
from gnue.common.datasources import GSchema
from gnue.common.datasources.drivers import DBSIG2
# =============================================================================
# Behavior class
# =============================================================================
class Behavior (DBSIG2.Behavior):
"""
Behavior class for PostgreSQL backends.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, *args, **kwargs):
DBSIG2.Behavior.__init__ (self, *args, **kwargs)
self.__RELKIND = {'v': {'type': 'view', 'name': u_("Views")},
'r': {'type': 'table', 'name': u_("Tables")}}
# Build typemap: {nativetype: (group, fieldtype)}
self.__TYPEMAP = {'date' : ('date', 'date'),
'bool' : ('boolean', 'boolean'),
'string': ('string', 'string')}
for item in ['numeric', 'float4', 'float8', 'money', 'int8',
'int2', 'int4', 'serial']:
self.__TYPEMAP [item] = ('number', 'number')
for item in ['time', 'reltime']:
self.__TYPEMAP [item] = ('date', 'time')
for item in ['timestamp', 'abstime']:
self.__TYPEMAP [item] = ('date', 'datetime')
self._maxIdLength_ = 31
self._alterMultiple_ = False
self._numbers_ = [[(4, 'smallint'), (9, 'integer'), (18, 'bigint')],
"numeric (%s,0)", "numeric (%(length)s,%(scale)s)"]
self._type2native_.update ({'boolean' : 'boolean',
'datetime': 'timestamp without time zone'})
# ---------------------------------------------------------------------------
# Create a new database
# ---------------------------------------------------------------------------
def _createDatabase_ (self):
"""
Create the requested user and database using the tools 'createuser',
'createdb' and 'dropuser'. Of course this function should better make use
of the template1 database using a connection object.
"""
dbname = self.__connection.parameters.get ('dbname')
username = self.__connection.parameters.get ('username', 'gnue')
password = self.__connection.parameters.get ('password')
host = self.__connection.parameters.get ('host')
port = self.__connection.parameters.get ('port')
owner = self.__connection.parameters.get ('owner', username)
ownerpwd = self.__connection.parameters.get ('ownerpwd')
site = ""
if host is not None:
site += " --host=%s" % host
if port is not None:
site += " --port=%s" % port
# First, let's connect to template1 using the given username and password
self.__connection.parameters ['dbname'] = 'template1'
self.__connection.manager.loginToConnection (self.__connection)
# Then have a look wether the requested owner is already available
result = self.__connection.sql ('SELECT usesysid FROM pg_user ' \
'WHERE usename = %(owner)s', {'owner': owner})
if not result:
cmd = 'CREATE USER %s' % owner
if ownerpwd:
cmd += " WITH PASSWORD '%s'" % ownerpwd
self.__connection.sql0 (cmd)
self.__connection.commit ()
# Now go and create that new database
cmd = "ABORT; CREATE DATABASE %s WITH OWNER %s ENCODING = 'UNICODE'; BEGIN"
self.__connection.sql0 (cmd % (dbname, owner))
self.__connection.commit ()
self.__connection.close ()
# Since the newly created database should be available now, connect to it
# using the given owner
self.__connection.parameters ['dbname'] = dbname
self.__connection.parameters ['username'] = owner
if ownerpwd:
self.__connection.parameters ['password'] = ownerpwd
else:
if 'password' in self.__connection.parameters:
del self.__connection.parameters ['password']
self.__connection.manager.loginToConnection (self.__connection)
# ---------------------------------------------------------------------------
# Read the current connection's schema
# ---------------------------------------------------------------------------
def _readSchema_ (self, parent):
"""
Read the connection's schema and build a GSchema object tree connected to
the given parent object (which is of type GSSchema).
"""
tables = self.__readTables (parent)
fields = self.__readFields (tables)
self.__readDefaults (fields)
self.__readKeys (tables)
self.__readConstraints (tables, fields)
# ---------------------------------------------------------------------------
# Read all table-like elements
# ---------------------------------------------------------------------------
def __readTables (self, parent):
mapping = {} # Maps OIDs to GSTable instances
tables = None
views = None
cmd = u"SELECT c.oid, c.relname, c.relkind " \
"FROM pg_class c, pg_namespace n " \
"WHERE n.nspname = 'public' AND n.oid = c.relnamespace AND " \
" c.relkind in (%s) " \
"ORDER BY c.relname" \
% ','.join (["%r" % kind for kind in self.__RELKIND.keys ()])
cursor = self.__connection.makecursor (cmd)
try:
for (oid, relname, relkind) in cursor.fetchall ():
kind = self.__RELKIND [relkind] ['type']
properties = {'id': oid, 'name': relname, 'kind': kind}
if relkind == 'v':
if views is None:
views = GSchema.GSTables (parent, **self.__RELKIND [relkind])
master = views
else:
if tables is None:
tables = GSchema.GSTables (parent, **self.__RELKIND [relkind])
master = tables
table = GSchema.GSTable (master, **properties)
# Maintain a temporary mapping from OID's to GSTable instances so
# adding fields afterwards runs faster
mapping [oid] = table
finally:
cursor.close ()
return mapping
# ---------------------------------------------------------------------------
# Find all fields
# ---------------------------------------------------------------------------
def __readFields (self, tables):
cmd = u"SELECT attrelid, attname, t.typname, attnotnull, " \
" atthasdef, atttypmod, attnum, attlen " \
"FROM pg_attribute a " \
"LEFT OUTER JOIN pg_type t ON t.oid = a.atttypid " \
"WHERE attnum >= 0 AND attisdropped = False " \
"ORDER BY attrelid, attnum"
cursor = self.__connection.makecursor (cmd)
fields = None
result = {}
try:
for rs in cursor.fetchall ():
(relid, name, typename, notnull, hasdef, typemod, attnum, attlen) = rs
# only process attributes from tables we've listed before
if not relid in tables:
continue
attrs = {'id' : "%s.%s" % (relid, attnum),
'name' : name,
'nativetype': typename,
'nullable' : hasdef or not notnull}
if typename.lower () in self.__TYPEMAP:
(group, attrs ['type']) = self.__TYPEMAP [typename.lower ()]
else:
(group, attrs ['type']) = self.__TYPEMAP ['string']
if group == 'number':
if typemod != -1:
value = typemod - 4
attrs ['length'] = value >> 16
attrs ['precision'] = value & 0xFFFF
elif attlen > 0:
attrs ['length'] = len ("%s" % 2L ** (attlen * 8))
elif typemod != -1:
attrs ['length'] = typemod - 4
# Remove obsolete attributes
if group in ['date', 'boolean']:
for item in ['length', 'precision']:
if item in attrs:
del attrs [item]
elif group in ['string']:
if 'precision' in attrs:
del attrs ['precision']
table = tables [relid]
fields = table.findChildOfType ('GSFields')
if fields is None:
fields = GSchema.GSFields (table)
result [attrs ['id']] = GSchema.GSField (fields, **attrs)
finally:
cursor.close ()
return result
# ---------------------------------------------------------------------------
# Read defaults and apply them to the given fields
# ---------------------------------------------------------------------------
def __readDefaults (self, fields):
cmd = u"SELECT adrelid, adnum, adsrc FROM pg_attrdef ORDER BY adrelid"
cursor = self.__connection.makecursor (cmd)
try:
for (relid, fieldnum, source) in cursor.fetchall ():
field = fields.get ("%s.%s" % (relid, fieldnum))
# Skip all defaults of not listed fields
if field is None:
continue
if source [:8] == 'nextval(':
field.defaultwith = 'serial'
elif source == 'now()':
field.defaultwith = 'timestamp'
else:
field.defaultwith = 'constant'
field.default = source.split ('::') [0].strip ("'")
finally:
cursor.close ()
# ---------------------------------------------------------------------------
# Read all indices and associate them with their table/view
# ---------------------------------------------------------------------------
def __readKeys (self, tables):
cmd = u"SELECT indrelid, indkey, indisunique, indisprimary, c.relname " \
"FROM pg_index i LEFT OUTER JOIN pg_class c ON c.oid = indexrelid"
cursor = self.__connection.makecursor (cmd)
try:
for (relid, fieldvec, isUnique, isPrimary, name) in cursor.fetchall ():
# Skip functional indices. A functional index is an index that is built
# upon a fuction manipulating a field upper(userid) vs userid
fields = [int (i) - 1 for i in fieldvec.split ()]
if not fields:
continue
# only process keys of listed tables
table = tables.get (relid)
if table is None:
continue
if isPrimary:
index = GSchema.GSPrimaryKey (table, name = name)
fClass = GSchema.GSPKField
else:
indices = table.findChildOfType ('GSIndexes')
if indices is None:
indices = GSchema.GSIndexes (table)
index = GSchema.GSIndex (indices, unique = isUnique, name = name)
fClass = GSchema.GSIndexField
fieldList = table.findChildrenOfType ('GSField', False, True)
for find in fields:
fClass (index, name = fieldList [find].name)
finally:
cursor.close ()
# ---------------------------------------------------------------------------
# Read all constraints
# ---------------------------------------------------------------------------
def __readConstraints (self, tables, fields):
cmd = u"SELECT conname, conrelid, confrelid, conkey, confkey, contype " \
"FROM pg_constraint WHERE contype in ('f', 'u')"
cursor = self.__connection.makecursor (cmd)
try:
for (name, relid, fkrel, key, fkey, ctype) in cursor.fetchall ():
table = tables.get (relid)
if ctype == 'f':
fktable = tables.get (fkrel)
# We need both ends of a relation to be a valid constraint
if table is None or fktable is None:
continue
parent = table.findChildOfType ('GSConstraints')
if parent is None:
parent = GSchema.GSConstraints (table)
constr = GSchema.GSForeignKey (parent, name = name,
references = fktable.name)
kp = isinstance (key, basestring) and key [1:-1].split (',') or key
fkp = isinstance (fkey, basestring) and fkey [1:-1].split(',') or fkey
k = [fields ["%s.%s" % (relid, i)].name for i in kp]
f = [fields ["%s.%s" % (fkrel, i)].name for i in fkp]
for (name, refname) in zip (k, f):
GSchema.GSFKField (constr, name = name, references = refname)
# Unique-Constraint
elif ctype == 'u':
parent = table.findChildOfType ('GSConstraints') or \
GSchema.GSConstraints (table)
constr = GSchema.GSUnique (parent, name = name)
kp = isinstance (key, basestring) and key [1:-1].split (',') or key
for name in [fields ["%s.%s" % (relid, i)].name for i in kp]:
GSchema.GSUQField (constr, name = name)
# Ok, since we know PostgreSQL automatically creates a unique index
# of the same name, we drop that index since it would only confuse a
# later diff
for ix in table.findChildrenOfType ('GSIndex', False, True):
if ix.name == constr.name:
parent = ix.getParent ()
parent._children.remove (ix)
ix.setParent (None)
finally:
cursor.close ()
# ---------------------------------------------------------------------------
# Handle special defaults
# ---------------------------------------------------------------------------
def _defaultwith_ (self, code, field):
"""
Create a sequence for 'serials' and set the default for 'timestamps'.
@param code: code-triple to get the result
@param field: GSField instance of the field having the default
"""
if field.defaultwith == 'serial':
seq = self._getSequenceName (field)
code [0].append (u"CREATE SEQUENCE %s" % seq)
field.default = "DEFAULT nextval ('%s')" % seq
elif field.defaultwith == 'timestamp':
field.default = "DEFAULT now()"
| HarmonyEnterpriseSolutions/harmony-platform | src/gnue/common/datasources/drivers/sql/postgresql/Behavior.py | Python | gpl-2.0 | 13,533 |
from miasm.core.utils import size2mask
from miasm.expression.expression import ExprInt, ExprCond, ExprCompose, \
TOK_EQUAL
def simp_ext(_, expr):
if expr.op.startswith('zeroExt_'):
arg = expr.args[0]
if expr.size == arg.size:
return arg
return ExprCompose(arg, ExprInt(0, expr.size - arg.size))
if expr.op.startswith("signExt_"):
arg = expr.args[0]
add_size = expr.size - arg.size
new_expr = ExprCompose(
arg,
ExprCond(
arg.msb(),
ExprInt(size2mask(add_size), add_size),
ExprInt(0, add_size)
)
)
return new_expr
return expr
def simp_flags(_, expr):
args = expr.args
if expr.is_op("FLAG_EQ"):
return ExprCond(args[0], ExprInt(0, 1), ExprInt(1, 1))
elif expr.is_op("FLAG_EQ_AND"):
op1, op2 = args
return ExprCond(op1 & op2, ExprInt(0, 1), ExprInt(1, 1))
elif expr.is_op("FLAG_SIGN_SUB"):
return (args[0] - args[1]).msb()
elif expr.is_op("FLAG_EQ_CMP"):
return ExprCond(
args[0] - args[1],
ExprInt(0, 1),
ExprInt(1, 1),
)
elif expr.is_op("FLAG_ADD_CF"):
op1, op2 = args
res = op1 + op2
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUB_CF"):
op1, op2 = args
res = op1 - op2
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_ADD_OF"):
op1, op2 = args
res = op1 + op2
return (((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUB_OF"):
op1, op2 = args
res = op1 - op2
return (((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_EQ_ADDWC"):
op1, op2, op3 = args
return ExprCond(
op1 + op2 + op3.zeroExtend(op1.size),
ExprInt(0, 1),
ExprInt(1, 1),
)
elif expr.is_op("FLAG_ADDWC_OF"):
op1, op2, op3 = args
res = op1 + op2 + op3.zeroExtend(op1.size)
return (((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUBWC_OF"):
op1, op2, op3 = args
res = op1 - (op2 + op3.zeroExtend(op1.size))
return (((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_ADDWC_CF"):
op1, op2, op3 = args
res = op1 + op2 + op3.zeroExtend(op1.size)
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (~(op1 ^ op2)))).msb()
elif expr.is_op("FLAG_SUBWC_CF"):
op1, op2, op3 = args
res = op1 - (op2 + op3.zeroExtend(op1.size))
return (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (op1 ^ op2))).msb()
elif expr.is_op("FLAG_SIGN_ADDWC"):
op1, op2, op3 = args
return (op1 + op2 + op3.zeroExtend(op1.size)).msb()
elif expr.is_op("FLAG_SIGN_SUBWC"):
op1, op2, op3 = args
return (op1 - (op2 + op3.zeroExtend(op1.size))).msb()
elif expr.is_op("FLAG_EQ_SUBWC"):
op1, op2, op3 = args
res = op1 - (op2 + op3.zeroExtend(op1.size))
return ExprCond(res, ExprInt(0, 1), ExprInt(1, 1))
elif expr.is_op("CC_U<="):
op_cf, op_zf = args
return op_cf | op_zf
elif expr.is_op("CC_U>="):
op_cf, = args
return ~op_cf
elif expr.is_op("CC_S<"):
op_nf, op_of = args
return op_nf ^ op_of
elif expr.is_op("CC_S>"):
op_nf, op_of, op_zf = args
return ~(op_zf | (op_nf ^ op_of))
elif expr.is_op("CC_S<="):
op_nf, op_of, op_zf = args
return op_zf | (op_nf ^ op_of)
elif expr.is_op("CC_S>="):
op_nf, op_of = args
return ~(op_nf ^ op_of)
elif expr.is_op("CC_U>"):
op_cf, op_zf = args
return ~(op_cf | op_zf)
elif expr.is_op("CC_U<"):
op_cf, = args
return op_cf
elif expr.is_op("CC_NEG"):
op_nf, = args
return op_nf
elif expr.is_op("CC_EQ"):
op_zf, = args
return op_zf
elif expr.is_op("CC_NE"):
op_zf, = args
return ~op_zf
elif expr.is_op("CC_POS"):
op_nf, = args
return ~op_nf
return expr
| serpilliere/miasm | miasm/expression/simplifications_explicit.py | Python | gpl-2.0 | 4,228 |
#
# Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import logging
import time
import six
from vdsm.storage import constants as sc
from vdsm.storage import exception
# SIZE property was deprecated in metadata v5, but we still need this key to
# read and write legacy metadata. To make sure no other code use it and it's
# used only by metadata code, move it here and make it private.
_SIZE = "SIZE"
ATTRIBUTES = {
sc.DOMAIN: ("domain", str),
sc.IMAGE: ("image", str),
sc.PUUID: ("parent", str),
sc.CAPACITY: ("capacity", int),
sc.FORMAT: ("format", str),
sc.TYPE: ("type", str),
sc.VOLTYPE: ("voltype", str),
sc.DISKTYPE: ("disktype", str),
sc.DESCRIPTION: ("description", str),
sc.LEGALITY: ("legality", str),
sc.CTIME: ("ctime", int),
sc.GENERATION: ("generation", int),
sc.SEQUENCE: ("sequence", int),
}
def _lines_to_dict(lines):
md = {}
errors = []
for line in lines:
# Skip a line if there is invalid value.
try:
line = line.decode("utf-8")
except UnicodeDecodeError as e:
errors.append("Invalid line '{}': {}".format(line, e))
continue
if line.startswith("EOF"):
break
if '=' not in line:
continue
key, value = line.split('=', 1)
md[key.strip()] = value.strip()
return md, errors
def parse(lines):
md, errors = _lines_to_dict(lines)
metadata = {}
if "NONE" in md:
# Before 4.20.34-1 (ovirt 4.2.5) volume metadata could be
# cleared by writing invalid metadata when deleting a volume.
# See https://bugzilla.redhat.com/1574631.
errors.append(str(exception.MetadataCleared()))
return {}, errors
# We work internally in bytes, even if old format store
# value in blocks, we will read SIZE instead of CAPACITY
# from non-converted volumes and use it
if _SIZE in md and sc.CAPACITY not in md:
try:
md[sc.CAPACITY] = int(md[_SIZE]) * sc.BLOCK_SIZE_512
except ValueError as e:
errors.append(str(e))
if sc.GENERATION not in md:
md[sc.GENERATION] = sc.DEFAULT_GENERATION
if sc.SEQUENCE not in md:
md[sc.SEQUENCE] = sc.DEFAULT_SEQUENCE
for key, (name, validate) in ATTRIBUTES.items():
try:
# FIXME: remove pylint skip when bug fixed:
# https://github.com/PyCQA/pylint/issues/5113
metadata[name] = validate(md[key]) # pylint: disable=not-callable
except KeyError:
errors.append("Required key '{}' is missing.".format(name))
except ValueError as e:
errors.append("Invalid '{}' value: {}".format(name, str(e)))
return metadata, errors
def dump(lines):
md, errors = parse(lines)
if errors:
logging.warning(
"Invalid metadata found errors=%s", errors)
md["status"] = sc.VOL_STATUS_INVALID
else:
md["status"] = sc.VOL_STATUS_OK
# Do not include domain in dump output.
md.pop("domain", None)
return md
class VolumeMetadata(object):
log = logging.getLogger('storage.volumemetadata')
def __init__(self, domain, image, parent, capacity, format, type, voltype,
disktype, description="", legality=sc.ILLEGAL_VOL, ctime=None,
generation=sc.DEFAULT_GENERATION,
sequence=sc.DEFAULT_SEQUENCE):
# Storage domain UUID
self.domain = domain
# Image UUID
self.image = image
# UUID of the parent volume or BLANK_UUID
self.parent = parent
# Volume capacity in bytes
self.capacity = capacity
# Format (RAW or COW)
self.format = format
# Allocation policy (PREALLOCATED or SPARSE)
self.type = type
# Relationship to other volumes (LEAF, INTERNAL or SHARED)
self.voltype = voltype
# Intended usage of this volume (unused)
self.disktype = disktype
# Free-form description and may be used to store extra metadata
self.description = description
# Indicates if the volume contents should be considered valid
self.legality = legality
# Volume creation time (in seconds since the epoch)
self.ctime = int(time.time()) if ctime is None else ctime
# Generation increments each time certain operations complete
self.generation = generation
# Sequence number of the volume, increased every time a new volume is
# created in an image.
self.sequence = sequence
@classmethod
def from_lines(cls, lines):
'''
Instantiates a VolumeMetadata object from storage read bytes.
Args:
lines: list of key=value entries given as bytes read from storage
metadata section. "EOF" entry terminates parsing.
'''
metadata, errors = parse(lines)
if errors:
raise exception.InvalidMetadata(
"lines={} errors={}".format(lines, errors))
return cls(**metadata)
@property
def description(self):
return self._description
@description.setter
def description(self, desc):
self._description = self.validate_description(desc)
@property
def capacity(self):
return self._capacity
@capacity.setter
def capacity(self, value):
self._capacity = self._validate_integer("capacity", value)
@property
def ctime(self):
return self._ctime
@ctime.setter
def ctime(self, value):
self._ctime = self._validate_integer("ctime", value)
@property
def generation(self):
return self._generation
@generation.setter
def generation(self, value):
self._generation = self._validate_integer("generation", value)
@property
def sequence(self):
return self._sequence
@sequence.setter
def sequence(self, value):
self._sequence = self._validate_integer("sequence", value)
@classmethod
def _validate_integer(cls, property, value):
if not isinstance(value, six.integer_types):
raise AssertionError(
"Invalid value for metadata property {!r}: {!r}".format(
property, value))
return value
@classmethod
def validate_description(cls, desc):
desc = str(desc)
# We cannot fail when the description is too long, since we must
# support older engine that may send such values, or old disks
# with long description.
if len(desc) > sc.DESCRIPTION_SIZE:
cls.log.warning("Description is too long, truncating to %d bytes",
sc.DESCRIPTION_SIZE)
desc = desc[:sc.DESCRIPTION_SIZE]
return desc
def storage_format(self, domain_version, **overrides):
"""
Format metadata parameters into storage format bytes.
VolumeMetadata is quite restrictive and does not allow
you to make an invalid metadata, but sometimes, for example
for a format conversion, you need some additional fields to
be written to the storage. Those fields can be added using
overrides dict.
Raises MetadataOverflowError if formatted metadata is too long.
"""
info = {
sc.CTIME: str(self.ctime),
sc.DESCRIPTION: self.description,
sc.DISKTYPE: self.disktype,
sc.DOMAIN: self.domain,
sc.FORMAT: self.format,
sc.GENERATION: self.generation,
sc.IMAGE: self.image,
sc.LEGALITY: self.legality,
sc.PUUID: self.parent,
sc.TYPE: self.type,
sc.VOLTYPE: self.voltype,
}
if domain_version < 5:
# Always zero on pre v5 domains
# We need to keep MTIME available on pre v5
# domains, as other code is expecting that
# field to exists and will fail without it.
info[sc.MTIME] = 0
# Pre v5 domains should have SIZE in blocks
# instead of CAPACITY in bytes
info[_SIZE] = self.capacity // sc.BLOCK_SIZE_512
else:
info[sc.CAPACITY] = self.capacity
info[sc.SEQUENCE] = self.sequence
info.update(overrides)
keys = sorted(info.keys())
lines = ["%s=%s\n" % (key, info[key]) for key in keys]
lines.append("EOF\n")
data = "".join(lines).encode("utf-8")
if len(data) > sc.METADATA_SIZE:
raise exception.MetadataOverflowError(data)
return data
# Three defs below allow us to imitate a dictionary
# So intstead of providing a method to return a dictionary
# with values, we return self and mimick dict behaviour.
# In the fieldmap we keep mapping between metadata
# field name and our internal field names
#
# TODO: All dict specific code below should be removed, when rest of VDSM
# will be refactored, to use VolumeMetadata properties, instead of dict
_fieldmap = {
sc.FORMAT: 'format',
sc.TYPE: 'type',
sc.VOLTYPE: 'voltype',
sc.DISKTYPE: 'disktype',
sc.CAPACITY: 'capacity',
sc.CTIME: 'ctime',
sc.DOMAIN: 'domain',
sc.IMAGE: 'image',
sc.DESCRIPTION: 'description',
sc.PUUID: 'parent',
sc.LEGALITY: 'legality',
sc.GENERATION: 'generation',
sc.SEQUENCE: "sequence",
}
def __getitem__(self, item):
try:
value = getattr(self, self._fieldmap[item])
except AttributeError:
raise KeyError(item)
# Some fields needs to be converted to string
if item in (sc.CAPACITY, sc.CTIME):
value = str(value)
return value
def __setitem__(self, item, value):
setattr(self, self._fieldmap[item], value)
def get(self, item, default=None):
try:
return self[item]
except KeyError:
return default
def dump(self):
return {
"capacity": self.capacity,
"ctime": self.ctime,
"description": self.description,
"disktype": self.disktype,
"format": self.format,
"generation": self.generation,
"sequence": self.sequence,
"image": self.image,
"legality": self.legality,
"parent": self.parent,
"type": self.type,
"voltype": self.voltype,
}
| oVirt/vdsm | lib/vdsm/storage/volumemetadata.py | Python | gpl-2.0 | 11,350 |
# Allows the creation of infix operators
# Thanks to Ferdinand Jamitzky
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/384122
class Infix(object):
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
# To create a binary operator just make a function that takes 2 arguments like say
# def my_add (a, b):
# return a + b
#
# Then we get import this...
# from Infix import Infix # Lets us make binary infix style operators
#
# Then we make the operator, lets call it p...
# p = Infix(my_add)
#
# Now to use it just put in
# arg1 |p| arg2
| Kevin-Ray-Johnson/DM_Desk_Calc | Infix.py | Python | gpl-2.0 | 976 |
"""
Represents a group of conduits
Copyright: John Stowers, 2007
License: GPLv2
"""
import traceback
import os
import xml.dom.minidom
import gobject
import logging
log = logging.getLogger("SyncSet")
import conduit
import conduit.Conduit as Conduit
import conduit.Settings as Settings
import conduit.XMLSerialization as XMLSerialization
SETTINGS_VERSION = XMLSerialization.Settings.XML_VERSION
class SyncSet(gobject.GObject):
"""
Represents a group of conduits
"""
__gsignals__ = {
#Fired when a new instantiatable DP becomes available. It is described via
#a wrapper because we do not actually instantiate it till later - to save memory
"conduit-added" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), # The ConduitModel that was added
"conduit-removed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), # The ConduitModel that was removed
}
def __init__(self, moduleManager, syncManager, xmlSettingFilePath="settings.xml"):
gobject.GObject.__init__(self)
self.moduleManager = moduleManager
self.syncManager = syncManager
self.xmlSettingFilePath = xmlSettingFilePath
self.conduits = []
self.moduleManager.connect("dataprovider-available", self.on_dataprovider_available_unavailable)
self.moduleManager.connect("dataprovider-unavailable", self.on_dataprovider_available_unavailable)
# FIXME: temporary hack - need to let factories know about this factory :-\!
self.moduleManager.emit("syncset-added", self)
def _restore_dataprovider(self, cond, wrapperKey, dpName="", dpxml="", trySourceFirst=True):
"""
Adds the dataprovider back onto the canvas at the specifed
location and configures it with the given settings
"""
log.debug("Restoring %s to (source=%s)" % (wrapperKey,trySourceFirst))
wrapper = self.moduleManager.get_module_wrapper_with_instance(wrapperKey)
if dpName:
wrapper.set_name(dpName)
if wrapper is not None:
if dpxml:
for i in dpxml.childNodes:
if i.nodeType == i.ELEMENT_NODE and i.localName == "configuration":
wrapper.set_configuration_xml(xmltext=i.toxml())
cond.add_dataprovider(wrapper, trySourceFirst)
def on_dataprovider_available_unavailable(self, loader, dpw):
"""
Removes all PendingWrappers corresponding to dpw and replaces with new dpw instances
"""
key = dpw.get_key()
for c in self.get_all_conduits():
for dp in c.get_dataproviders_by_key(key):
new = self.moduleManager.get_module_wrapper_with_instance(key)
#retain configuration information
new.set_configuration_xml(dp.get_configuration_xml())
new.set_name(dp.get_name())
c.change_dataprovider(
oldDpw=dp,
newDpw=new
)
def emit(self, *args):
"""
Override the gobject signal emission so that all signals are emitted
from the main loop on an idle handler
"""
gobject.idle_add(gobject.GObject.emit,self,*args)
def create_preconfigured_conduit(self, sourceKey, sinkKey, twoway):
cond = Conduit.Conduit(self.syncManager)
self.add_conduit(cond)
if twoway == True:
cond.enable_two_way_sync()
self._restore_dataprovider(cond, sourceKey, trySourceFirst=True)
self._restore_dataprovider(cond, sinkKey, trySourceFirst=False)
def add_conduit(self, cond):
self.conduits.append(cond)
self.emit("conduit-added", cond)
def remove_conduit(self, cond):
self.emit("conduit-removed", cond)
cond.quit()
self.conduits.remove(cond)
def get_all_conduits(self):
return self.conduits
def get_conduit(self, index):
return self.conduits[index]
def index (self, conduit):
return self.conduits.index(conduit)
def num_conduits(self):
return len(self.conduits)
def clear(self):
for c in self.conduits[:]:
self.remove_conduit(c)
def save_to_xml(self, xmlSettingFilePath=None):
"""
Saves the synchronisation settings (icluding all dataproviders and how
they are connected) to an xml file so that the 'sync set' can
be restored later
"""
if xmlSettingFilePath == None:
xmlSettingFilePath = self.xmlSettingFilePath
log.info("Saving Sync Set to %s" % self.xmlSettingFilePath)
#Build the application settings xml document
doc = xml.dom.minidom.Document()
rootxml = doc.createElement("conduit-application")
rootxml.setAttribute("application-version", conduit.VERSION)
rootxml.setAttribute("settings-version", SETTINGS_VERSION)
doc.appendChild(rootxml)
#Store the conduits
for cond in self.conduits:
conduitxml = doc.createElement("conduit")
conduitxml.setAttribute("uid",cond.uid)
conduitxml.setAttribute("twoway",str(cond.is_two_way()))
conduitxml.setAttribute("autosync",str(cond.do_auto_sync()))
for policyName in Conduit.CONFLICT_POLICY_NAMES:
conduitxml.setAttribute(
"%s_policy" % policyName,
cond.get_policy(policyName)
)
rootxml.appendChild(conduitxml)
#Store the source
source = cond.datasource
if source is not None:
sourcexml = doc.createElement("datasource")
sourcexml.setAttribute("key", source.get_key())
sourcexml.setAttribute("name", source.get_name())
conduitxml.appendChild(sourcexml)
#Store source settings
configxml = xml.dom.minidom.parseString(source.get_configuration_xml())
sourcexml.appendChild(configxml.documentElement)
#Store all sinks
sinksxml = doc.createElement("datasinks")
for sink in cond.datasinks:
sinkxml = doc.createElement("datasink")
sinkxml.setAttribute("key", sink.get_key())
sinkxml.setAttribute("name", sink.get_name())
sinksxml.appendChild(sinkxml)
#Store sink settings
configxml = xml.dom.minidom.parseString(sink.get_configuration_xml())
sinkxml.appendChild(configxml.documentElement)
conduitxml.appendChild(sinksxml)
#Save to disk
try:
file_object = open(xmlSettingFilePath, "w")
file_object.write(doc.toxml())
#file_object.write(doc.toprettyxml())
file_object.close()
except IOError, err:
log.warn("Could not save settings to %s (Error: %s)" % (xmlSettingFilePath, err.strerror))
def restore_from_xml(self, xmlSettingFilePath=None):
"""
Restores sync settings from the xml file
"""
if xmlSettingFilePath == None:
xmlSettingFilePath = self.xmlSettingFilePath
log.info("Restoring Sync Set from %s" % xmlSettingFilePath)
#Check the file exists
if not os.path.isfile(xmlSettingFilePath):
log.info("%s not present" % xmlSettingFilePath)
return
try:
#Open
doc = xml.dom.minidom.parse(xmlSettingFilePath)
#check the xml file is in a version we can read.
if doc.documentElement.hasAttribute("settings-version"):
xml_version = doc.documentElement.getAttribute("settings-version")
try:
xml_version = int(xml_version)
except ValueError, TypeError:
log.error("%s xml file version is not valid" % xmlSettingFilePath)
os.remove(xmlSettingFilePath)
return
if int(SETTINGS_VERSION) < xml_version:
log.warning("%s xml file is incorrect version" % xmlSettingFilePath)
os.remove(xmlSettingFilePath)
return
else:
log.info("%s xml file version not found, assuming too old, removing" % xmlSettingFilePath)
os.remove(xmlSettingFilePath)
return
#Parse...
for conds in doc.getElementsByTagName("conduit"):
#create a new conduit
cond = Conduit.Conduit(self.syncManager, conds.getAttribute("uid"))
self.add_conduit(cond)
#restore conduit specific settings
twoway = Settings.string_to_bool(conds.getAttribute("twoway"))
if twoway == True:
cond.enable_two_way_sync()
auto = Settings.string_to_bool(conds.getAttribute("autosync"))
if auto == True:
cond.enable_auto_sync()
for policyName in Conduit.CONFLICT_POLICY_NAMES:
cond.set_policy(
policyName,
conds.getAttribute("%s_policy" % policyName)
)
#each dataprovider
for i in conds.childNodes:
#keep a ref to the dataproider was added to so that we
#can apply settings to it at the end
#one datasource
if i.nodeType == i.ELEMENT_NODE and i.localName == "datasource":
key = i.getAttribute("key")
name = i.getAttribute("name")
#add to canvas
if len(key) > 0:
self._restore_dataprovider(cond, key, name, i, True)
#many datasinks
elif i.nodeType == i.ELEMENT_NODE and i.localName == "datasinks":
#each datasink
for sink in i.childNodes:
if sink.nodeType == sink.ELEMENT_NODE and sink.localName == "datasink":
key = sink.getAttribute("key")
name = sink.getAttribute("name")
#add to canvas
if len(key) > 0:
self._restore_dataprovider(cond, key, name, sink, False)
except:
log.warn("Error parsing %s. Exception:\n%s" % (xmlSettingFilePath, traceback.format_exc()))
os.remove(xmlSettingFilePath)
def quit(self):
"""
Calls unitialize on all dataproviders
"""
for c in self.conduits:
c.quit()
| GNOME/conduit | conduit/SyncSet.py | Python | gpl-2.0 | 11,163 |
#
# Copyright 2014-2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import six
from vdsm.common import exception
from vdsm.common import xmlutils
from vdsm.virt.vmdevices import network, hwclass
from testlib import VdsmTestCase as TestCaseBase, XMLTestCase
from testlib import permutations, expandPermutations
from monkeypatch import MonkeyClass, MonkeyPatchScope
from testValidation import skipif
from vdsm.common import hooks
from vdsm.common import hostdev
from vdsm.common import libvirtconnection
import hostdevlib
@expandPermutations
@MonkeyClass(libvirtconnection, 'get', hostdevlib.Connection)
@MonkeyClass(hostdev, '_sriov_totalvfs', hostdevlib.fake_totalvfs)
@MonkeyClass(hostdev, '_pci_header_type', lambda _: 0)
@MonkeyClass(hooks, 'after_hostdev_list_by_caps', lambda json: json)
@MonkeyClass(hostdev, '_get_udev_block_mapping',
lambda: hostdevlib.UDEV_BLOCK_MAP)
class HostdevTests(TestCaseBase):
def testProcessDeviceParams(self):
deviceXML = hostdev._process_device_params(
libvirtconnection.get().nodeDeviceLookupByName(
hostdevlib.ADDITIONAL_DEVICE).XMLDesc()
)
self.assertEqual(
hostdevlib.ADDITIONAL_DEVICE_PROCESSED,
deviceXML
)
@skipif(six.PY3, "Not relevant in Python 3 libvirt")
# libvirt in Python 3 returns strings, so we don't deal with
# invalid coding anymore.
def testProcessDeviceParamsInvalidEncoding(self):
deviceXML = hostdev._process_device_params(
libvirtconnection.get().nodeDeviceLookupByName(
hostdevlib.COMPUTER_DEVICE).XMLDesc()
)
self.assertEqual(
hostdevlib.COMPUTER_DEVICE_PROCESSED,
deviceXML
)
def testProcessSRIOV_PFDeviceParams(self):
deviceXML = hostdev._process_device_params(
libvirtconnection.get().nodeDeviceLookupByName(
hostdevlib.SRIOV_PF).XMLDesc()
)
self.assertEqual(
hostdevlib.SRIOV_PF_PROCESSED,
deviceXML
)
def testProcessSRIOV_VFDeviceParams(self):
deviceXML = hostdev._process_device_params(
libvirtconnection.get().nodeDeviceLookupByName(
hostdevlib.SRIOV_VF).XMLDesc()
)
self.assertEqual(hostdevlib.SRIOV_VF_PROCESSED, deviceXML)
def testProcessNetDeviceParams(self):
deviceXML = hostdev._process_device_params(
libvirtconnection.get().nodeDeviceLookupByName(
hostdevlib.NET_DEVICE).XMLDesc()
)
self.assertEqual(hostdevlib.NET_DEVICE_PROCESSED, deviceXML)
def testProcessMdevDeviceParams(self):
deviceXML = hostdev._process_device_params(
libvirtconnection.get().nodeDeviceLookupByName(
hostdevlib.MDEV_DEVICE).XMLDesc()
)
self.assertEqual(hostdevlib.MDEV_DEVICE_PROCESSED, deviceXML)
def testGetDevicesFromLibvirt(self):
libvirt_devices, _ = hostdev._get_devices_from_libvirt()
self.assertEqual(hostdevlib.DEVICES_PROCESSED, libvirt_devices)
self.assertEqual(len(libvirt_devices),
len(hostdevlib.PCI_DEVICES) +
len(hostdevlib.USB_DEVICES) +
len(hostdevlib.SCSI_DEVICES))
@permutations([[''], [('pci',)], [('usb_device',)],
[('pci', 'usb_device')]])
def testListByCaps(self, caps):
devices = hostdev.list_by_caps(caps)
for cap in caps:
self.assertTrue(set(hostdevlib.DEVICES_BY_CAPS[cap].keys()).
issubset(set(devices.keys())))
@permutations([
# addr_type, addr, name
('usb', {'bus': '1', 'device': '2'}, 'usb_1_1'),
('usb', {'bus': '1', 'device': '10'}, 'usb_1_1_4'),
('pci', {'slot': '26', 'bus': '0', 'domain': '0', 'function': '0'},
'pci_0000_00_1a_0'),
('scsi', {'bus': '0', 'host': '1', 'lun': '0', 'target': '0'},
'scsi_1_0_0_0'),
])
def test_device_name_from_address(self, addr_type, addr, name):
# we need to make sure we scan all the devices (hence caps=None)
hostdev.list_by_caps()
self.assertEqual(
hostdev.device_name_from_address(addr_type, addr),
name
)
@MonkeyClass(libvirtconnection, 'get', hostdevlib.Connection.get)
@MonkeyClass(hostdev, '_sriov_totalvfs', hostdevlib.fake_totalvfs)
@MonkeyClass(hostdev, '_pci_header_type', lambda _: 0)
@MonkeyClass(hooks, 'after_hostdev_list_by_caps', lambda json: json)
class HostdevPerformanceTests(TestCaseBase):
def test_3k_storage_devices(self):
with hostdevlib.Connection.use_hostdev_tree():
self.assertEqual(
len(hostdev.list_by_caps()),
len(libvirtconnection.get().listAllDevices())
)
@expandPermutations
@MonkeyClass(libvirtconnection, 'get', hostdevlib.Connection)
@MonkeyClass(hostdev, '_sriov_totalvfs', hostdevlib.fake_totalvfs)
@MonkeyClass(hostdev, '_pci_header_type', lambda _: 0)
class HostdevCreationTests(XMLTestCase):
_PCI_ADDRESS = {'slot': '0x02', 'bus': '0x01', 'domain': '0x0000',
'function': '0x0', 'type': 'pci'}
_PCI_ADDRESS_XML = '<address bus="0x01" domain="0x0000" function="0x0" \
slot="0x02" type="pci"/>'
def setUp(self):
self.conf = {
'vmName': 'testVm',
'vmId': '9ffe28b6-6134-4b1e-8804-1185f49c436f',
'smp': '8', 'maxVCpus': '160',
'memSize': '1024', 'memGuaranteedSize': '512'}
# TODO: next 2 tests should reside in their own module (interfaceTests.py)
def testCreateSRIOVVF(self):
dev_spec = {'type': hwclass.NIC, 'device': 'hostdev',
'hostdev': hostdevlib.SRIOV_VF,
'macAddr': 'ff:ff:ff:ff:ff:ff',
'specParams': {'vlanid': 3},
'bootOrder': '9'}
device = network.Interface(self.log, **dev_spec)
self.assertXMLEqual(
xmlutils.tostring(device.getXML()),
hostdevlib.DEVICE_XML[hostdevlib.SRIOV_VF] % ('',))
def testCreateSRIOVVFWithAddress(self):
dev_spec = {'type': hwclass.NIC, 'device': 'hostdev',
'hostdev': hostdevlib.SRIOV_VF,
'macAddr': 'ff:ff:ff:ff:ff:ff',
'specParams': {'vlanid': 3},
'bootOrder': '9', 'address':
{'slot': '0x02', 'bus': '0x01', 'domain': '0x0000',
'function': '0x0', 'type': 'pci'}}
device = network.Interface(self.log, **dev_spec)
self.assertXMLEqual(
xmlutils.tostring(device.getXML()),
hostdevlib.DEVICE_XML[hostdevlib.SRIOV_VF] % (
self._PCI_ADDRESS_XML
)
)
@expandPermutations
@MonkeyClass(hostdev, '_each_supported_mdev_type', hostdevlib.fake_mdev_types)
@MonkeyClass(hostdev, '_mdev_type_details', hostdevlib.fake_mdev_details)
@MonkeyClass(hostdev, '_mdev_device_vendor', hostdevlib.fake_mdev_vendor)
@MonkeyClass(hostdev, '_mdev_type_devices', hostdevlib.fake_mdev_instances)
@MonkeyClass(hostdev, 'supervdsm', hostdevlib.FakeSuperVdsm())
class TestMdev(TestCaseBase):
def setUp(self):
def make_device(name):
mdev_types = [
hostdevlib.FakeMdevType('incompatible-1', 2),
hostdevlib.FakeMdevType('8q', 1),
hostdevlib.FakeMdevType('4q', 2),
hostdevlib.FakeMdevType('incompatible-2', 2),
]
return hostdevlib.FakeMdevDevice(name=name, vendor='0x10de',
mdev_types=mdev_types)
self.devices = [make_device(name) for name in ('card-1', 'card-2',)]
@permutations([
# (mdev_type, mdev_uuid)*, mdev_placement, instances
[[('4q', '4q-1')],
hostdev.MdevPlacement.COMPACT, [['4q-1'], []]],
[[('8q', '8q-1')],
hostdev.MdevPlacement.SEPARATE, [['8q-1'], []]],
[[('4q', '4q-1'), ('4q', '4q-2')],
hostdev.MdevPlacement.COMPACT, [['4q-1', '4q-2'], []]],
[[('4q', '4q-1'), ('8q', '8q-1')],
hostdev.MdevPlacement.COMPACT, [['4q-1'], ['8q-1']]],
[[('4q', '4q-1'), ('4q', '4q-2')],
hostdev.MdevPlacement.SEPARATE, [['4q-1'], ['4q-2']]],
[[('4q', '4q-1'), ('8q', '8q-1'), ('4q', '4q-2')],
hostdev.MdevPlacement.COMPACT, [['4q-1', '4q-2'], ['8q-1']]],
[[('8q', '8q-1'), ('4q', '4q-1'), ('4q', '4q-2')],
hostdev.MdevPlacement.COMPACT, [['8q-1'], ['4q-1', '4q-2']]],
[[('4q', '4q-1'), ('4q', '4q-2'), ('8q', '8q-1')],
hostdev.MdevPlacement.COMPACT, [['4q-1', '4q-2'], ['8q-1']]],
[[('4q', '4q-1'), ('8q', '8q-1'), ('4q', '4q-2')],
hostdev.MdevPlacement.SEPARATE, [['4q-1', '4q-2'], ['8q-1']]],
])
def test_vgpu_placement(self, mdev_specs, mdev_placement, instances):
with MonkeyPatchScope([
(hostdev, '_each_mdev_device', lambda: self.devices)
]):
for mdev_type, mdev_uuid in mdev_specs:
hostdev.spawn_mdev(mdev_type, mdev_uuid, mdev_placement,
self.log)
for inst, dev in zip(instances, self.devices):
dev_inst = []
for mdev_type in dev.mdev_types:
dev_inst.extend(mdev_type.instances)
self.assertEqual(inst, dev_inst)
@permutations([
[hostdev.MdevPlacement.COMPACT],
[hostdev.MdevPlacement.SEPARATE],
])
def test_unsupported_vgpu_placement(self, placement):
with MonkeyPatchScope([
(hostdev, '_each_mdev_device', lambda: self.devices)
]):
self.assertRaises(
exception.ResourceUnavailable,
hostdev.spawn_mdev, 'unsupported', '1234', placement, self.log
)
| oVirt/vdsm | tests/hostdev_test.py | Python | gpl-2.0 | 10,768 |
from random import choice
from feedparser import parse
from errbot import botcmd, BotPlugin
class DevOpsBorat(BotPlugin):
"""
Quotes from various dev humour related twitter accounts
"""
@botcmd
def borat(self, mess, args):
"""
Random quotes from the DEVOPS_BORAT twitter account
"""
myfeed = parse('http://api.twitter.com/1/statuses/user_timeline.rss?screen_name=DEVOPS_BORAT')
items = myfeed['entries']
return choice(items).description
@botcmd
def jesus(self, mess, args):
"""
Random quotes from the devops_jesus twitter account
"""
myfeed = parse('http://api.twitter.com/1/statuses/user_timeline.rss?screen_name=devops_jesus')
items = myfeed['entries']
return choice(items).description
@botcmd
def yoda(self, mess, args):
"""
Random quotes from the UXYoda twitter account
"""
myfeed = parse('http://api.twitter.com/1/statuses/user_timeline.rss?screen_name=UXYoda')
items = myfeed['entries']
return choice(items).description
| errbotio/err-devops-borat | devops_borat.py | Python | gpl-2.0 | 1,109 |
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <[email protected]>
# Copyright (c) 2011--2015 Red Hat, Inc.
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# wildcard import
# pylint: disable=W0401,W0614
# unused argument
# pylint: disable=W0613
# invalid function name
# pylint: disable=C0103
import logging
import readline
import shlex
from getpass import getpass
from ConfigParser import NoOptionError
from spacecmd.utils import *
from time import sleep
import xmlrpclib
# list of system selection options for the help output
HELP_SYSTEM_OPTS = '''<SYSTEMS> can be any of the following:
name
ssm (see 'help ssm')
search:QUERY (see 'help system_search')
group:GROUP
channel:CHANNEL
'''
HELP_TIME_OPTS = '''Dates can be any of the following:
Explicit Dates:
Dates can be expressed as explicit date strings in the YYYYMMDD[HHMM]
format. The year, month and day are required, while the hours and
minutes are not; the hours and minutes will default to 0000 if no
values are provided.
Deltas:
Dates can be expressed as delta values. For example, '2h' would
mean 2 hours in the future. You can also use negative values to
express times in the past (e.g., -7d would be one week ago).
Units:
s -> seconds
m -> minutes
h -> hours
d -> days
'''
####################
# life of caches in seconds
SYSTEM_CACHE_TTL = 3600
PACKAGE_CACHE_TTL = 86400
ERRATA_CACHE_TTL = 86400
MINIMUM_API_VERSION = 10.8
SEPARATOR = '\n' + '#' * 30 + '\n'
####################
ENTITLEMENTS = ['enterprise_entitled',
'virtualization_host'
]
SYSTEM_SEARCH_FIELDS = ['id', 'name', 'ip', 'hostname',
'device', 'vendor', 'driver', 'uuid']
####################
def help_systems(self):
print HELP_SYSTEM_OPTS
def help_time(self):
print HELP_TIME_OPTS
####################
def help_clear(self):
print 'clear: clear the screen'
print 'usage: clear'
def do_clear(self, args):
os.system('clear')
####################
def help_clear_caches(self):
print 'clear_caches: Clear the internal caches kept for systems' + \
' and packages'
print 'usage: clear_caches'
def do_clear_caches(self, args):
self.clear_system_cache()
self.clear_package_cache()
self.clear_errata_cache()
####################
def help_get_apiversion(self):
print 'get_apiversion: Display the API version of the server'
print 'usage: get_apiversion'
def do_get_apiversion(self, args):
print self.client.api.getVersion()
####################
def help_get_serverversion(self):
print 'get_serverversion: Display the version of the server'
print 'usage: get_serverversion'
def do_get_serverversion(self, args):
print self.client.api.systemVersion()
####################
def help_get_certificateexpiration(self):
print 'get_certificateexpiration: Print the expiration date of the'
print " server's entitlement certificate"
print 'usage: get_certificateexpiration'
def do_get_certificateexpiration(self, args):
date = self.client.satellite.getCertificateExpirationDate(self.session)
print date
####################
def help_list_proxies(self):
print 'list_proxies: List the proxies wihtin the user\'s organization '
print 'usage: list_proxies'
def do_list_proxies(self, args):
proxies = self.client.satellite.listProxies(self.session)
print proxies
####################
def help_get_session(self):
print 'get_session: Show the current session string'
print 'usage: get_session'
def do_get_session(self, args):
if self.session:
print self.session
else:
logging.error('No session found')
####################
def help_help(self):
print 'help: Show help for the given command'
print 'usage: help COMMAND'
####################
def help_history(self):
print 'history: List your command history'
print 'usage: history'
def do_history(self, args):
for i in range(1, readline.get_current_history_length()):
print '%s %s' % (str(i).rjust(4), readline.get_history_item(i))
####################
def help_toggle_confirmations(self):
print 'toggle_confirmations: Toggle confirmation messages on/off'
print 'usage: toggle_confirmations'
def do_toggle_confirmations(self, args):
if self.options.yes:
self.options.yes = False
print 'Confirmation messages are enabled'
else:
self.options.yes = True
logging.warning('Confirmation messages are DISABLED!')
####################
def help_login(self):
print 'login: Connect to a Spacewalk server'
print 'usage: login [USERNAME] [SERVER]'
def do_login(self, args):
(args, _options) = parse_arguments(args)
# logout before logging in again
if len(self.session):
logging.warning('You are already logged in')
return True
# an argument passed to the function get precedence
if len(args) == 2:
server = args[1]
else:
# use the server we were already using
server = self.config['server']
# bail out if not server was given
if not server:
logging.warning('No server specified')
return False
# load the server-specific configuration
self.load_config_section(server)
# an argument passed to the function get precedence
if len(args):
username = args[0]
elif self.config.has_key('username'):
# use the username from before
username = self.config['username']
elif self.options.username:
# use the username from before
username = self.options.username
else:
username = ''
# set the protocol
if self.config.has_key('nossl') and self.config['nossl']:
proto = 'http'
else:
proto = 'https'
server_url = '%s://%s/rpc/api' % (proto, server)
# this will enable spewing out all client/server traffic
verbose_xmlrpc = False
if self.options.debug > 1:
verbose_xmlrpc = True
# connect to the server
logging.debug('Connecting to %s', server_url)
self.client = xmlrpclib.Server(server_url, verbose=verbose_xmlrpc)
# check the API to verify connectivity
try:
self.api_version = self.client.api.getVersion()
logging.debug('Server API Version = %s', self.api_version)
except xmlrpclib.Fault, e:
if self.options.debug > 0:
logging.exception(e)
logging.error('Failed to connect to %s', server_url)
self.client = None
return False
# ensure the server is recent enough
if self.api_version < self.MINIMUM_API_VERSION:
logging.error('API (%s) is too old (>= %s required)',
self.api_version, self.MINIMUM_API_VERSION)
self.client = None
return False
# store the session file in the server's own directory
session_file = os.path.join(self.conf_dir, server, 'session')
# retrieve a cached session
if os.path.isfile(session_file) and not self.options.password:
try:
sessionfile = open(session_file, 'r')
# read the session (format = username:session)
for line in sessionfile:
parts = line.split(':')
# if a username was passed, make sure it matches
if len(username):
if parts[0] == username:
self.session = parts[1]
else:
# get the username from the cache if one
# wasn't passed by the user
username = parts[0]
self.session = parts[1]
sessionfile.close()
except IOError:
logging.error('Could not read %s', session_file)
# check the cached credentials by doing an API call
if self.session:
try:
logging.debug('Using cached credentials from %s', session_file)
self.client.user.listAssignableRoles(self.session)
except xmlrpclib.Fault:
logging.warning('Cached credentials are invalid')
self.current_user = ''
self.session = ''
# attempt to login if we don't have a valid session yet
if not len(self.session):
if len(username):
logging.info('Spacewalk Username: %s', username)
else:
username = prompt_user('Spacewalk Username:', noblank=True)
if self.options.password:
password = self.options.password
# remove this from the options so that if 'login' is called
# again, the user is prompted for the information
self.options.password = None
elif self.config.has_key('password'):
password = self.config['password']
else:
password = getpass('Spacewalk Password: ')
# login to the server
try:
self.session = self.client.auth.login(username, password)
# don't keep the password around
password = None
except xmlrpclib.Fault:
logging.error('Invalid credentials')
return False
try:
# make sure ~/.spacecmd/<server> exists
conf_dir = os.path.join(self.conf_dir, server)
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir, 0700)
# add the new cache to the file
line = '%s:%s\n' % (username, self.session)
# write the new cache file out
sessionfile = open(session_file, 'w')
sessionfile.write(line)
sessionfile.close()
except IOError:
logging.error('Could not write session file')
# load the system/package/errata caches
self.load_caches(server)
# keep track of who we are and who we're connected to
self.current_user = username
self.server = server
logging.info('Connected to %s as %s', server_url, username)
return True
####################
def help_logout(self):
print 'logout: Disconnect from the server'
print 'usage: logout'
def do_logout(self, args):
if self.session:
self.client.auth.logout(self.session)
self.session = ''
self.current_user = ''
self.server = ''
self.do_clear_caches('')
####################
def help_whoami(self):
print 'whoami: Print the name of the currently logged in user'
print 'usage: whoami'
def do_whoami(self, args):
if len(self.current_user):
print self.current_user
else:
logging.warning("You are not logged in")
####################
def help_whoamitalkingto(self):
print 'whoamitalkingto: Print the name of the server'
print 'usage: whoamitalkingto'
def do_whoamitalkingto(self, args):
if len(self.server):
print self.server
else:
logging.warning('Yourself')
####################
def tab_complete_errata(self, text):
options = self.do_errata_list('', True)
options.append('search:')
return tab_completer(options, text)
def tab_complete_systems(self, text):
if re.match('group:', text):
# prepend 'group' to each item for tab completion
groups = ['group:%s' % g for g in self.do_group_list('', True)]
return tab_completer(groups, text)
elif re.match('channel:', text):
# prepend 'channel' to each item for tab completion
channels = ['channel:%s' % s
for s in self.do_softwarechannel_list('', True)]
return tab_completer(channels, text)
elif re.match('search:', text):
# prepend 'search' to each item for tab completion
fields = ['search:%s:' % f for f in self.SYSTEM_SEARCH_FIELDS]
return tab_completer(fields, text)
else:
options = self.get_system_names()
# add our special search options
options.extend(['group:', 'channel:', 'search:'])
return tab_completer(options, text)
def remove_last_history_item(self):
last = readline.get_current_history_length() - 1
if last >= 0:
readline.remove_history_item(last)
def clear_errata_cache(self):
self.all_errata = {}
self.errata_cache_expire = datetime.now()
self.save_errata_cache()
def get_errata_names(self):
return sorted([e.get('advisory_name') for e in self.all_errata])
def get_erratum_id(self, name):
if name in self.all_errata:
return self.all_errata[name]['id']
def get_erratum_name(self, erratum_id):
for erratum in self.all_errata:
if self.all_errata[erratum]['id'] == erratum_id:
return erratum
def generate_errata_cache(self, force=False):
if not force and datetime.now() < self.errata_cache_expire:
return
if not self.options.quiet:
# tell the user what's going on
self.replace_line_buffer('** Generating errata cache **')
channels = self.client.channel.listSoftwareChannels(self.session)
channels = [c.get('label') for c in channels]
for c in channels:
try:
errata = \
self.client.channel.software.listErrata(self.session, c)
except xmlrpclib.Fault:
logging.debug('No access to %s', c)
continue
for erratum in errata:
if erratum.get('advisory_name') not in self.all_errata:
self.all_errata[erratum.get('advisory_name')] = \
{'id': erratum.get('id'),
'advisory_name': erratum.get('advisory_name'),
'advisory_type': erratum.get('advisory_type'),
'date': erratum.get('date'),
'advisory_synopsis': erratum.get('advisory_synopsis')}
self.errata_cache_expire = \
datetime.now() + timedelta(self.ERRATA_CACHE_TTL)
self.save_errata_cache()
if not self.options.quiet:
# restore the original line buffer
self.replace_line_buffer()
def save_errata_cache(self):
save_cache(self.errata_cache_file,
self.all_errata,
self.errata_cache_expire)
def clear_package_cache(self):
self.all_packages_short = {}
self.all_packages = {}
self.all_packages_by_id = {}
self.package_cache_expire = datetime.now()
self.save_package_caches()
def generate_package_cache(self, force=False):
if not force and datetime.now() < self.package_cache_expire:
return
if not self.options.quiet:
# tell the user what's going on
self.replace_line_buffer('** Generating package cache **')
channels = self.client.channel.listSoftwareChannels(self.session)
channels = [c.get('label') for c in channels]
for c in channels:
try:
packages = \
self.client.channel.software.listAllPackages(self.session, c)
except xmlrpclib.Fault:
logging.debug('No access to %s', c)
continue
for p in packages:
if not p.get('name') in self.all_packages_short:
self.all_packages_short[p.get('name')] = ''
longname = build_package_names(p)
if not longname in self.all_packages:
self.all_packages[longname] = [p.get('id')]
else:
self.all_packages[longname].append(p.get('id'))
# keep a reverse dictionary so we can lookup package names by ID
self.all_packages_by_id = {}
for (k, v) in self.all_packages.iteritems():
for i in v:
self.all_packages_by_id[i] = k
self.package_cache_expire = \
datetime.now() + timedelta(seconds=self.PACKAGE_CACHE_TTL)
self.save_package_caches()
if not self.options.quiet:
# restore the original line buffer
self.replace_line_buffer()
def save_package_caches(self):
# store the cache to disk to speed things up
save_cache(self.packages_short_cache_file,
self.all_packages_short,
self.package_cache_expire)
save_cache(self.packages_long_cache_file,
self.all_packages,
self.package_cache_expire)
save_cache(self.packages_by_id_cache_file,
self.all_packages_by_id,
self.package_cache_expire)
# create a global list of all available package names
def get_package_names(self, longnames=False):
self.generate_package_cache()
if longnames:
return self.all_packages.keys()
else:
return self.all_packages_short
def get_package_id(self, name):
self.generate_package_cache()
try:
return set(self.all_packages[name])
except KeyError:
return
def get_package_name(self, package_id):
self.generate_package_cache()
try:
return self.all_packages_by_id[package_id]
except KeyError:
return
def clear_system_cache(self):
self.all_systems = {}
self.system_cache_expire = datetime.now()
self.save_system_cache()
def generate_system_cache(self, force=False, delay=0):
if not force and datetime.now() < self.system_cache_expire:
return
if not self.options.quiet:
# tell the user what's going on
self.replace_line_buffer('** Generating system cache **')
# we might need to wait for some systems to delete
if delay:
sleep(delay)
systems = self.client.system.listSystems(self.session)
self.all_systems = {}
for s in systems:
self.all_systems[s.get('id')] = s.get('name')
self.system_cache_expire = \
datetime.now() + timedelta(seconds=self.SYSTEM_CACHE_TTL)
self.save_system_cache()
if not self.options.quiet:
# restore the original line buffer
self.replace_line_buffer()
def save_system_cache(self):
save_cache(self.system_cache_file,
self.all_systems,
self.system_cache_expire)
def load_caches(self, server):
conf_dir = os.path.join(self.conf_dir, server)
try:
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir, 0700)
except OSError:
logging.error('Could not create directory %s', conf_dir)
return
self.ssm_cache_file = os.path.join(conf_dir, 'ssm')
self.system_cache_file = os.path.join(conf_dir, 'systems')
self.errata_cache_file = os.path.join(conf_dir, 'errata')
self.packages_long_cache_file = os.path.join(conf_dir, 'packages_long')
self.packages_by_id_cache_file = \
os.path.join(conf_dir, 'packages_by_id')
self.packages_short_cache_file = \
os.path.join(conf_dir, 'packages_short')
# load self.ssm from disk
(self.ssm, _ignore) = load_cache(self.ssm_cache_file)
# update the prompt now that we loaded the SSM
self.postcmd(False, '')
# load self.all_systems from disk
(self.all_systems, self.system_cache_expire) = \
load_cache(self.system_cache_file)
# load self.all_errata from disk
(self.all_errata, self.errata_cache_expire) = \
load_cache(self.errata_cache_file)
# load self.all_packages_short from disk
(self.all_packages_short, self.package_cache_expire) = \
load_cache(self.packages_short_cache_file)
# load self.all_packages from disk
(self.all_packages, self.package_cache_expire) = \
load_cache(self.packages_long_cache_file)
# load self.all_packages_by_id from disk
(self.all_packages_by_id, self.package_cache_expire) = \
load_cache(self.packages_by_id_cache_file)
def get_system_names(self):
self.generate_system_cache()
return self.all_systems.values()
# check for duplicate system names and return the system ID
def get_system_id(self, name):
self.generate_system_cache()
try:
# check if we were passed a system instead of a name
system_id = int(name)
if system_id in self.all_systems:
return system_id
except ValueError:
pass
# get a set of matching systems to check for duplicate names
systems = []
for system_id in self.all_systems:
if name == self.all_systems[system_id]:
systems.append(system_id)
if len(systems) == 1:
return systems[0]
elif not len(systems):
logging.warning("Can't find system ID for %s", name)
return 0
else:
logging.warning('Duplicate system profile names found!')
logging.warning("Please reference systems by ID or resolve the")
logging.warning("underlying issue with 'system_delete' or 'system_rename'")
id_list = '%s = ' % name
for system_id in systems:
id_list = id_list + '%i, ' % system_id
logging.warning('')
logging.warning(id_list[:-2])
return 0
def get_system_name(self, system_id):
self.generate_system_cache()
try:
return self.all_systems[system_id]
except KeyError:
return
def get_org_id(self, name):
details = self.client.org.getDetails(self.session, name)
return details.get('id')
def expand_errata(self, args):
if not isinstance(args, list):
args = args.split()
self.generate_errata_cache()
if len(args) == 0:
return self.all_errata
errata = []
for item in args:
if re.match('search:', item):
item = re.sub('search:', '', item)
errata.extend(self.do_errata_search(item, True))
else:
errata.append(item)
matches = filter_results(self.all_errata, errata)
return matches
def expand_systems(self, args):
if not isinstance(args, list):
args = shlex.split(args)
systems = []
system_ids = []
for item in args:
if re.match('ssm', item, re.I):
systems.extend(self.ssm)
elif re.match('group:', item):
item = re.sub('group:', '', item)
members = self.do_group_listsystems("'%s'" % item, True)
if len(members):
systems.extend([re.escape(m) for m in members])
else:
logging.warning('No systems in group %s', item)
elif re.match('search:', item):
query = item.split(':', 1)[1]
results = self.do_system_search(query, True)
if len(results):
systems.extend([re.escape(r) for r in results])
elif re.match('channel:', item):
item = re.sub('channel:', '', item)
members = self.do_softwarechannel_listsystems(item, True)
if len(members):
systems.extend([re.escape(m) for m in members])
else:
logging.warning('No systems subscribed to %s', item)
else:
# translate system IDs that the user passes
try:
sys_id = int(item)
system_ids.append(sys_id)
except ValueError:
# just a system name
systems.append(item)
matches = filter_results(self.get_system_names(), systems)
return list(set(matches + system_ids))
def list_base_channels(self):
all_channels = self.client.channel.listSoftwareChannels(self.session)
base_channels = []
for c in all_channels:
if not c.get('parent_label'):
base_channels.append(c.get('label'))
return base_channels
def list_child_channels(self, system=None, parent=None, subscribed=False):
channels = []
if system:
system_id = self.get_system_id(system)
if not system_id:
return
if subscribed:
channels = \
self.client.system.listSubscribedChildChannels(self.session,
system_id)
else:
channels = self.client.system.listSubscribableChildChannels(
self.session, system_id)
elif parent:
all_channels = \
self.client.channel.listSoftwareChannels(self.session)
for c in all_channels:
if parent == c.get('parent_label'):
channels.append(c)
else:
# get all channels that have a parent
all_channels = \
self.client.channel.listSoftwareChannels(self.session)
for c in all_channels:
if c.get('parent_label'):
channels.append(c)
return [c.get('label') for c in channels]
def user_confirm(self, prompt='Is this ok [y/N]:', nospacer=False,
integer=False, ignore_yes=False):
if self.options.yes and not ignore_yes:
return True
if nospacer:
answer = prompt_user('%s' % prompt)
else:
answer = prompt_user('\n%s' % prompt)
if re.match('y', answer, re.I):
if integer:
return 1
else:
return True
else:
if integer:
return 0
else:
return False
# check if the available API is recent enough
def check_api_version(self, want):
want_parts = [int(i) for i in want.split('.')]
have_parts = [int(i) for i in self.api_version.split('.')]
if len(have_parts) == 2 and len(want_parts) == 2:
if have_parts[0] == want_parts[0]:
# compare minor versions if majors are the same
return have_parts[1] >= want_parts[1]
else:
# only compare major versions if they differ
return have_parts[0] >= want_parts[0]
else:
# compare the whole value
return float(self.api_version) >= float(want)
# replace the current line buffer
def replace_line_buffer(self, msg=None):
# restore the old buffer if we weren't given a new line
if not msg:
msg = readline.get_line_buffer()
# don't print a prompt if there wasn't one to begin with
if len(readline.get_line_buffer()):
new_line = '%s%s' % (self.prompt, msg)
else:
new_line = '%s' % msg
# clear the current line
self.stdout.write('\r'.ljust(len(self.current_line) + 1))
self.stdout.flush()
# write the new line
self.stdout.write('\r%s' % new_line)
self.stdout.flush()
# keep track of what is displayed so we can clear it later
self.current_line = new_line
def load_config_section(self, section):
config_opts = ['server', 'username', 'password', 'nossl']
if not self.config_parser.has_section(section):
logging.debug('Configuration section [%s] does not exist', section)
return
logging.debug('Loading configuration section [%s]', section)
for key in config_opts:
# don't override command-line options
if self.options.__dict__[key]:
# set the config value to the command-line argument
self.config[key] = self.options.__dict__[key]
else:
try:
self.config[key] = self.config_parser.get(section, key)
except NoOptionError:
pass
# handle the nossl boolean
if self.config.has_key('nossl') and isinstance(self.config['nossl'], str):
if re.match('^1|y|true$', self.config['nossl'], re.I):
self.config['nossl'] = True
else:
self.config['nossl'] = False
# Obfuscate the password with asterisks
config_debug = self.config.copy()
if config_debug.has_key('password'):
config_debug['password'] = "*" * len(config_debug['password'])
logging.debug('Current Configuration: %s', config_debug)
| xkollar/spacewalk | spacecmd/src/lib/misc.py | Python | gpl-2.0 | 28,096 |
# -*- encoding: utf-8 -*-
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_order_line(osv.Model):
"""
OpenERP Model : sale_order_line
"""
_inherit = 'sale.order.line'
_columns = {
'att_bro': fields.boolean('Attach Brochure', required=False, help="""If you check this
option, the first attachment related to the product_id marked as brochure will be printed
as extra info with sale order"""),
}
class sale_order(osv.Model):
"""
OpenERP Model : sale_order_line
"""
_inherit = 'sale.order'
def print_with_attachment(self, cr, user, ids, context={}):
for o in self.browse(cr, user, ids, context):
for ol in o.order_line:
if ol.att_bro:
print "Im Here i will go to print %s " % ol.name
return True
def __get_company_object(self, cr, uid):
user = self.pool.get('res.users').browse(cr, uid, uid)
print user
if not user.company_id:
raise except_osv(_('ERROR !'), _(
'There is no company configured for this user'))
return user.company_id
def _get_report_name(self, cr, uid, context):
report = self.__get_company_object(cr, uid).sale_report_id
if not report:
rep_id = self.pool.get("ir.actions.report.xml").search(
cr, uid, [('model', '=', 'sale.order'), ], order="id")[0]
report = self.pool.get(
"ir.actions.report.xml").browse(cr, uid, rep_id)
return report.report_name
def print_quotation(self, cr, uid, ids, context=None):
pq = super(sale_order, self).print_quotation(cr,uid,ids, context)
return {'type': 'ir.actions.report.xml', 'report_name': self._get_report_name(cr, uid,
context), 'datas': pq['datas'], 'nodestroy': True}
| 3dfxsoftware/cbss-addons | sale_multicompany_report/order.py | Python | gpl-2.0 | 1,880 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tristan Fischer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
from xbmcswift2 import Plugin, xbmc, xbmcgui
from resources.lib import scraper
STRINGS = {
'page': 30000,
'search': 30001,
'show_my_favs': 30002,
'no_scraper_found': 30003,
'add_to_my_favs': 30004,
'del_from_my_favs': 30005,
'no_my_favs': 30006,
'use_context_menu': 30007,
'to_add': 30008,
}
plugin = Plugin()
@plugin.route('/')
def show_categories():
items = [{
'label': category['title'],
'path': plugin.url_for(
endpoint='show_path',
path=category['path']
)
} for category in scraper.get_categories()]
items.append({
'label': _('search'),
'path': plugin.url_for('video_search')
})
items.append({
'label': _('show_my_favs'),
'path': plugin.url_for('show_my_favs')
})
return plugin.finish(items)
@plugin.route('/search/')
def video_search():
search_string = __keyboard(_('search'))
if search_string:
__log('search gots a string: "%s"' % search_string)
url = plugin.url_for(
endpoint='video_search_result',
search_string=search_string
)
plugin.redirect(url)
@plugin.route('/search/<search_string>/')
def video_search_result(search_string):
path = scraper.get_search_path(search_string)
return show_path(path)
@plugin.route('/my_favs/')
def show_my_favs():
def context_menu(item_path):
context_menu = [(
_('del_from_my_favs'),
'XBMC.RunPlugin(%s)' % plugin.url_for('del_from_my_favs',
item_path=item_path),
)]
return context_menu
my_fav_items = plugin.get_storage('my_fav_items')
items = my_fav_items.values()
for item in items:
item['context_menu'] = context_menu(item['path'])
if not items:
dialog = xbmcgui.Dialog()
dialog.ok(_('no_my_favs'), _('use_context_menu'), _('to_add'))
return
return plugin.finish(items)
@plugin.route('/path/<path>/')
def show_path(path):
try:
items, next_page, prev_page = scraper.get_path(path)
except NotImplementedError:
plugin.notify(msg=_('no_scraper_found'), title='Path: %s' % path)
else:
return __add_items(items, next_page, prev_page)
def __add_items(entries, next_page=None, prev_page=None):
my_fav_items = plugin.get_storage('my_fav_items')
def context_menu(item_path, video_id):
if not item_path in my_fav_items:
context_menu = [(
_('add_to_my_favs'),
'XBMC.RunPlugin(%s)' % plugin.url_for(
endpoint='add_to_my_favs',
item_path=item_path
),
)]
else:
context_menu = [(
_('del_from_my_favs'),
'XBMC.RunPlugin(%s)' % plugin.url_for(
endpoint='del_from_my_favs',
item_path=item_path
),
)]
return context_menu
def format_episode_title(title):
if fix_show_title and '-' in title and ('Folge' in title or 'Staffel' in title):
title, show = title.rsplit('-', 1)
title = title.replace('Staffel ', 'S').replace(' Folge ', 'E')
title = title.replace('Folge ', 'E').replace('Ganze Folge', '')
return u'%s %s' % (show.strip(), title.strip())
return title
def better_thumbnail(thumb_url):
if 'web/' in thumb_url and not thumb_url.startswith('http://is'):
thumb_url = thumb_url.replace('http://i', 'http://is')
thumb_url = re.sub('mv/web/[0-9]+', 'de', thumb_url)
thumb_url = thumb_url.replace('.jpg', '.jpg_hq.jpg')
return thumb_url
fix_show_title = plugin.get_setting('fix_show_title', bool)
temp_items = plugin.get_storage('temp_items')
temp_items.clear()
items = []
has_icons = False
i = 0
for i, entry in enumerate(entries):
if not has_icons and entry.get('thumb'):
has_icons = True
if entry['is_folder']:
items.append({
'label': entry['title'],
'thumbnail': entry.get('thumb', 'DefaultFolder.png'),
'info': {'count': i + 1},
'path': plugin.url_for(
endpoint='show_path',
path=entry['path']
)
})
else:
items.append({
'label': format_episode_title(entry['title']),
'thumbnail': better_thumbnail(
entry.get('thumb', 'DefaultVideo.png')
),
'icon': entry.get('thumb', 'DefaultVideo.png'),
'info': {
'video_id': entry['video_id'],
'count': i + 1,
'plot': entry.get('description', ''),
'studio': entry.get('author', {}).get('name', ''),
'date': entry.get('date', ''),
'year': int(entry.get('year', 0)),
'rating': float(entry.get('rating', 0)),
'votes': unicode(entry.get('votes')),
'views': unicode(entry.get('views', 0))
},
'stream_info': {
'video': {'duration': entry.get('duration', 0)}
},
'is_playable': True,
'path': plugin.url_for(
endpoint='watch_video',
video_id=entry['video_id']
)
})
if prev_page:
items.append({
'label': '<< %s %s <<' % (_('page'), prev_page['number']),
'info': {'count': 0},
'thumbnail': 'DefaultFolder.png',
'path': plugin.url_for(
endpoint='show_path',
path=prev_page['path'],
update='true',
)
})
if next_page:
items.append({
'label': '>> %s %s >>' % (_('page'), next_page['number']),
'thumbnail': 'DefaultFolder.png',
'info': {'count': i + 2},
'path': plugin.url_for(
endpoint='show_path',
path=next_page['path'],
update='true',
)
})
for item in items:
temp_items[item['path']] = item
item['context_menu'] = context_menu(
item['path'], item['info'].get('video_id')
)
temp_items.sync()
update_on_pageswitch = plugin.get_setting('update_on_pageswitch', bool)
is_update = update_on_pageswitch and 'update' in plugin.request.args
finish_kwargs = {
'sort_methods': ('playlist_order', 'label'),
'update_listing': is_update
}
if has_icons and plugin.get_setting('force_viewmode', bool):
finish_kwargs['view_mode'] = 'thumbnail'
return plugin.finish(items, **finish_kwargs)
@plugin.route('/video/<video_id>/play')
def watch_video(video_id):
video = scraper.get_video(video_id)
if 'hls_playlist' in video:
__log('watch_video using HLS')
video_url = video['hls_playlist']
elif not video['rtmpurl']:
__log('watch_video using FLV')
video_url = video['filepath'] + video['file']
else:
__log('watch_video using RTMPE or RTMPT')
video_url = (
'%(rtmpurl)s '
'tcUrl=%(rtmpurl)s '
'swfVfy=%(swfobj)s '
'pageUrl=%(pageurl)s '
'playpath=%(playpath)s'
) % video
__log('watch_video finished with url: %s' % video_url)
return plugin.set_resolved_url(video_url)
@plugin.route('/my_favs/add/<item_path>')
def add_to_my_favs(item_path):
my_fav_items = plugin.get_storage('my_fav_items')
temp_items = plugin.get_storage('temp_items')
my_fav_items[item_path] = temp_items[item_path]
my_fav_items.sync()
@plugin.route('/my_favs/del/<item_path>')
def del_from_my_favs(item_path):
my_fav_items = plugin.get_storage('my_fav_items')
if item_path in my_fav_items:
del my_fav_items[item_path]
my_fav_items.sync()
def __keyboard(title, text=''):
keyboard = xbmc.Keyboard(text, title)
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
return keyboard.getText()
def _(string_id):
if string_id in STRINGS:
return plugin.get_string(STRINGS[string_id])
else:
plugin.log.warning('String is missing: %s' % string_id)
return string_id
def __log(text):
plugin.log.info(text)
if __name__ == '__main__':
try:
plugin.run()
except scraper.NetworkError:
plugin.notify(msg=_('network_error'))
| noba3/KoTos | addons/plugin.video.myvideo_de/addon.py | Python | gpl-2.0 | 9,503 |
#!/usr/bin/env python3
#
#
#
# This file is part of librix-thinclient.
#
# librix-thinclient is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# librix-thinclient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with librix-thinclient. If not, see <http://www.gnu.org/licenses/>.
import os
from PyQt4 import QtGui
from ltmt.ui.users.add_user.Ui_addUser import Ui_AddUser
class AddUser(QtGui.QDialog):
"""This class provides a add user dialog feature to users page of LTMT"""
def __init__(self, configparser, parent=None):
"""Init method
@param self A AddUser instance
@param parent Parent QtGui.QWidget object
"""
self.configparser = configparser
self.parent = parent
QtGui.QDialog.__init__(self)
self.ui = Ui_AddUser()
self.ui.setupUi(self)
self.parseDefaults()
self.ui.detailsWid.hide()
def parseDefaults(self):
"""Parse some default values for new user accounts
@param self A AddUser instance
"""
with open("/etc/default/useradd", 'r') as ua:
for l in ua:
L = l.strip().split('=')
if len(L) >= 2:
if L[0] == "GROUP":
self.group = L[1]
elif L[0] == "HOME":
self.home = L[1]
elif L[0] == "SHELL":
self.shell = L[1]
def userChanged(self, username):
"""Slot called when user name was changed, updating entries
@param self A AddUser instance
@param username String username
"""
self.ui.initGLine.setText(self.group)
self.ui.homeLine.setText(os.path.join(self.home, username))
self.ui.shellLine.setText(self.shell)
def accept(self):
"""Reimplemented method QtGui.QDialog.accept
Add user to configparser before accept dialog
@param self A AddUser instance
"""
user = self.ui.nameLine.text()
print("__accepted__", user)
if user in self.configparser.getUsersList():
if QtGui.QMessageBox.warning(self, self.tr("Replace User"),
self.tr("Are you sure you want to overwrite \"{0}\" user?")\
.format(user), QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
self.configparser.delUser(user)
else:
return
self.configparser.addUser(user)
if self.ui.syncCheck.isChecked():
self.configparser.setUserSync(user, passwd=self.ui.pwLine.text(),
uid=self.ui.uidSpin.text(), init_group=self.ui.initGLine.text(),
groups=[g.strip() for g in self.ui.groupsLine.text().split(',')],
home=self.ui.homeLine.text(), shell=self.ui.shellLine.text())
QtGui.QDialog.accept(self)
| andrevmatos/Librix-ThinClient | src/ui/users/add_user/addUser.py | Python | gpl-2.0 | 2,905 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Ralph Eisenbach
#
# This plugin is based on the plugin for ZoomPlayer
# by Lars-Peter Voss <[email protected]>
#
# This file is a plugin for EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
eg.RegisterPlugin(
name = "TheaterTek",
author = "SurFan",
version = "0.0.1",
kind = "program",
guid = "{EF830DA5-EF08-4050-BAE0-D5FC0057D149}",
canMultiLoad = True,
createMacrosOnAdd = True,
description = (
'Adds actions to control <a href="http://www.theatertek.com/">TheaterTek</a>.'
'\n\n<p><b>Notice:</b><br>'
'To make it work, you have to enable TCP control in TheaterTek. '
),
url = "http://www.eventghost.net/forum/viewtopic.php?t=559",
icon = (
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACGElEQVR42m1RPWhaURQ+"
"gg6lCjooxnZIH9iARAi9QqwQTcBmioNQcWskwxV5i+AgDqHERZdsEfreUE3BDFJI4XUq"
"EeqeizjUnw6CKO1SUJ4lWKR5PXotjaZ3eefv+873nafT6/XT6dRkMp1+PgUA0Svy9Ozs"
"zfY2cbvdmLpcri/VOsyfDgiAxGP8SpgyxmSQFyUGQAjE2a0yWQJQoBL5i+MNWbeIcCAO"
"qwCNaLMkPhsilFyTa9zjiXtmXQeAcg9AGEoB5mBwAChHk7TBYBAIBNLpNJYvUhfq1x/L"
"Hti/7Yebh6VSCekbbxvomM+dn5df7b9cNY3ckWGkUqkgq9fgxUI2m81kMni0VqtVr9cP"
"PPt3NjCghEpUUhTl5ONJ0BLsdrsIlmVZFEWn09lsNtHYHEABvoO0JlFKY7FYuVxGbi4m"
"l8uFw+F8Pu/z+bCL4DkgBHRtxo0TuH0ymdjt9uMPxxs/N7BSLBbREpeMyxcA7bUGyw9t"
"7Jp2G41Gv9/vdDpcVTKZ5JIIxcMCE64ESzCIw8OrYdfSxTLqsVqttVotkUiYzeZQKKQz"
"Go3j8dhgMBwVjrZ+b/V6PVSMqd/vr1arGHAzKAan2+227vbb5K6Sd5/er68/xlMiIJVK"
"CYJgs9kikQiy4ImeOTZXARyzs/McR1VVLRQKaGBv7wWy+J/O/sx/APjGD39dXio3NyrG"
"o9EoGo0+efCIt/4ArUT50E11E2MAAAAASUVORK5CYII="
),
)
# ===================================================================
# TheaterTek TCP/IP Interface
# ===================================================================
"""\
IP COMMANDS
-----------
TT->AP Sent from TT to client application
AP->TT Sent from client application to TT
TT<-->AP Sent from TT and can be polled by client.
Commands are sent ASCII in the form:
4 byte command, space, {parameter} CRLF
A successful command returns:
Command, space, 0
OR
Command, space, response
An unsuccessful command returns:
Command, space, -1
Example:
0000 // Client app
0000 TheaterTek DVD // Returned value
Enum values
-----------
IP_MEDIASTATE 0=Stopped/NoMedia, 1=Playing, 2=paused, 3=FF, 4=RW
IP_FULLSCREEN 0=Minimized, 1=Windowed, 2=Fullscreen
IP_GETPRIVATE Allows client to set/get a private string up to 1024 bytes on TT. This data persists as long as TT is running.
#define IP_APPLICATION 0 // TT<-->AP Application name
#define IP_VERSION 1 // TT<-->AP Application version
#define IP_FLASH 500 // TT<-->AP OSD Flash message
#define IP_FULLSCREEN 510 // TT<-->AP Fullscreen/windowed status
#define IP_MEDIASTATE 1000 // TT<-->AP State enum
#define IP_MEDIATIME 1010 // TT<-->AP Media time (hh:mm:ss / hh:mm:ss)
#define IP_MEDIAPOS 1020 // AP->TT Set media time (hh:mm:ss)
#define IP_ENDOFMEDIA 1030 // TT->AP Signals end of media
#define IP_FORMAT 1040 // TT->AP (0=NTSC, 1=PAL)
#define IP_GETAR 1300 // TT<-->AP Return Current AR (name)
#define IP_ARCOUNT 1310 // AP->TT AR Count
#define IP_ARNAMES 1320 // AP->TT AR Names (name|name)
#define IP_SETAR 1330 // AP->TT Set Current AR (number)
#define IP_CURFILE 1400 // TT<-->AP Current file
#define IP_DISKINSERTION 1410 // TT->AP Disk inserted
#define IP_DISKEJECTION 1420 // TT->AP Disk ejected
#define IP_DVDUNIQUEID 1500 // AP->TT DVD unique ID
#define IP_DVDTITLE 1510 // TT<-->AP Current Title
#define IP_DVDTITLECOUNT 1520 // AP->TT Title count
#define IP_DVDPLAYTITLE 1530 // AP->TT Play Title
#define IP_DVDCHAPTER 1600 // TT<-->AP Current Chapter
#define IP_DVDCHAPTERCOUNT 1610 // AP->TT Chapter count
#define IP_DVDPLAYCHAPTER 1620 // AP->TT Play chapter
#define IP_DVDPLAYTITCHAP 1630 // AP->TT Play Chapter in Title (Chapter Title)
#define IP_DVDAUDIO 1700 // TT<-->AP Current audio stream
#define IP_DVDSETAUDIO 1710 // AP->TT Set audio stream
#define IP_DVDAUDIOCOUNT 1720 // AP->TT Audio stream count
#define IP_DVDAUDIONAMES 1730 // AP->TT Audio stream names (name|name)
#define IP_DVDSUBTITLE 1800 // TT<-->AP Current subtitle stream
#define IP_DVDSETSUBTITLE 1810 // AP->TT Set subtitle stream, -1 to disable
#define IP_DVDSUBTITLECOUNT 1820 // AP->TT Subtitle stream count
#define IP_DVDSUBTITLENAMES 1830 // AP->TT Subtitle names (name|name)
#define IP_DVDANGLE 1900 // TT<-->AP Current angle
#define IP_DVDSETANGLE 1910 // AP->TT Set angle
#define IP_DVDANGLECOUNT 1920 // AP->TT Angle count
#define IP_DVDMENUMODE 2000 // TT<-->AP Menu mode
#define IP_DOMAIN 2010 // TT->AP DVD Domain
#define IP_GETVOLUME 2100 // TT<-->AP Get Current volume
#define IP_SETVOLUME 2110 // AP->TT Set Current volume
#define IP_GETAUDIOOUTPUT 2120 // AP->TT Get Current audio output
#define IP_SETAUDIOOUTPUT 2130 // AP->TT Set audio output
#define IP_ADDBOOKMARK 2200 // AP->TT Add a bookmark
#define IP_NEXTBOOKMARK 2210 // AP->TT Next bookmark
#define IP_PREVBOOKMARK 2220 // AP->TT Previous bookmark
#define IP_PLAYFILE 3000 // AP->TT Play file
#define IP_ADDFILE 3010 // AP->TT Add file to playlist
#define IP_CLEARLIST 3020 // AP->TT Clear playlist
#define IP_GETINDEX 3030 // AP->TT Current item index
#define IP_PLAYATINDEX 3040 // AP->TT Play item at index
#define IP_GETLISTCOUNT 3050 // AP->TT Current list count
#define IP_GETLIST 3060 // AP->TT Get playlist (name|name)
#define IP_DELATINDEX 3070 // AP->TT Delete file at index
#define IP_SETPRIVATE 4000 // AP->TT Private app string
#define IP_GETPRIVATE 4010 // AP->TT Private app string
#define IP_WM_COMMAND 5000 // AP->TT Internal command
#define IP_KEYPRESS 5010 // AP->TT Key code
#define IP_SENDMSG 5020 // AP->TT Send message
#define IP_POSTMSG 5030 // AP->TT Post message
Auto Killer Commands
--------------------
#define IP_LAUNCH 8000 // AP->AK
#define IP_QUIT 8010 // AP->AK
#define IP_MOUNTDISK 8020 // AP->AK Changer#, Slot#
#define IP_UNMOUNTDISK 8030 // AP->AK Changer# ->Slot#
#define IP_EJECTDISK 8040 // AP->AK Changer#, Slot#
#define IP_GETSLOTDATA 8050 // AP->AK Changer#, Slot#
#define IP_GETDRIVEDATA 8060 // AP->AK Changer# ->DriveData
#define IP_CHECKCHANGED 8070 // AP->AK
#define IP_REBUILDDATA 8080 // AP->AK
#define IP_DATACHANGED 8100 // AK->AP Notification of data change
#define IP_COUNTCHANGERS 8110 // AP->AK
WM_COMMANDS
-----------
#define ID_PLAY 32771
#define ID_STOP 32772
#define ID_PAUSE 32773
#define ID_NEXT 32774
#define ID_PREVIOUS 32775
#define ID_EXIT 32776
#define ID_FF 32777
#define ID_RW 32778
#define ID_MENU_LIST 32779
#define ID_TITLE_MENU 32780
#define ID_FF_1X 32782
#define ID_FF_2X 32784
#define ID_FF_5X 32785
#define ID_FF_10X 32786
#define ID_FF_20X 32787
#define ID_FF_SLOW 32788
#define ID_RW_1X 32790
#define ID_RW_2X 32791
#define ID_RW_5X 32792
#define ID_RW_10X 32793
#define ID_RW_20X 32794
#define ID_ROOT_MENU 32796
#define ID_AUDIO_MENU 32797
#define ID_SUBTITLE_MENU 32798
#define ID_CHAPTER_MENU 32799
#define ID_CC_ON 32804
#define ID_CC_OFF 32805
#define ID_ABOUT 32807
#define ID_SUB_OFF 32808
#define ID_ASPECT_DEFINE 32810
#define ID_ASPECT_ANAM 32811
#define ID_ASPECT_NONANAM 32812
#define ID_ASPECT_LETTERBOX 32813
#define ID_BOOK_ADD 32814
#define ID_BUTTON32819 32819
#define ID_BUTTON32820 32820
#define ID_ONSCREEN 32821
#define ID_VID_BRIGHTNESS 32824
#define ID_VID_CONTRAST 32825
#define ID_VID_HUE 32826
#define ID_VID_SATURATION 32827
#define ID_OVERSCAN 32828
#define ID_VID_GAMMA 32829
#define ID_MENU_CHAPTER 32830
#define ID_MENU_AUDIO 32831
#define ID_MENU_ANGLE 32832
#define ID_MENU_FF 32833
#define ID_MENU_SUBTITLES 32834
#define ID_CLOSED_CAPTIONS 32835
#define ID_BOOK_DELETE 32836
#define ID_ANGLE_MENU 32837
#define ID_RESUME 32838
#define ID_MENU_TITLE 32839
#define ID_SETUP 32841
#define ID_ADJUSTVIDEO 32842
#define ID_ASPECT_LOCK 32843
#define ID_SETSTARTPOINT 32846
#define ID_K_RETURN 32849
#define ID_K_UP 32850
#define ID_K_DOWN 32851
#define ID_K_LEFT 32852
#define ID_K_RIGHT 32853
#define ID_K_FF 32854
#define ID_K_RW 32855
#define ID_K_ESCAPE 32856
#define ID_NEXTAR 32857
#define ID_INFO 32858
#define ID_ARFIRST 32859
#define ID_AR2 32860
#define ID_AR3 32861
#define ID_AR4 32862
#define ID_AR5 32863
#define ID_AR6 32864
#define ID_AR7 32865
#define ID_AR8 32866
#define ID_AR9 32867
#define ID_ARLAST 32868
#define ID_EJECT 32870
#define ID_CONTEXT 32872
#define ID_ALTEXIT 32873
#define ID_MINIMIZE 32874
#define ID_NEXTSUB 32875
#define ID_NEXTAUDIO 32876
#define ID_REPLAY 32877
#define ID_JUMP 32878
#define ID_FRAMESTEP 32879
#define ID_ABREPEAT 32880
#define ID_CHAPTITREP 32881
#define ID_NEXT_ANGLE 32883
#define ID_OPEN 32884
#define ID_NEXT_TIT 32885
#define ID_STATS 32886
#define ID_CAPTURE 32887
#define ID_BK_RESUME 32888
#define ID_DEINTERLACE 32889
#define ID_VOLUP 32891
#define ID_VOLDOWN 32892
#define ID_NEXTDISK 32893
#define ID_SHOWTIME 32894
#define ID_CC_NUDGE_UP 32895
#define ID_CC_NUDGE_DOWN 32896
#define ID_UPGRADE 32897
#define ID_NEXT_FILE 32898
#define ID_PREVIOUS_FILE 32899
#define ID_TSPROG 32901
#define ID_PREV_TIT 32902
#define ID_SLOW 32904
#define ID_CCTOGGLE 32905
#define ID_AR11 32906
#define ID_AR12 32907
#define ID_AR13 32908
#define ID_AR14 32909
#define ID_AR15 32910
#define ID_AR16 32911
#define ID_AR17 32912
#define ID_AR18 32913
#define ID_AR19 32914
#define ID_AR20 32915
#define ID_VMRSTATS 32916
#define ID_LIPDOWN 32917
#define ID_LIPUP 32918
#define ID_MUTE 32919
#define ID_BLANKING 32920
#define ID_TOGGLE 32922
#define ID_MOVELEFT 32924
#define ID_MOVERIGHT 32925
#define ID_MOVEUP 32926
#define ID_MOVEDOWN 32927
#define ID_H_EXPAND 32928
#define ID_H_CONTRACT 32929
#define ID_V_EXPAND 32930
#define ID_V_CONTRACT 32931
#define ID_ZOOM_IN 32932
#define ID_ZOOM_OUT 32933
#define ID_BL_LEFT 32934
#define ID_BL_RIGHT 32935
#define ID_BT_UP 32936
#define ID_BT_DOWN 32937
#define ID_BR_LEFT 32938
#define ID_BR_RIGHT 32939
#define ID_BB_UP 32940
#define ID_BB_DOWN 32941
#define ID_STREAM 32943
"""
import asynchat
import socket
import asyncore
import threading
import new
ttRequests = (
('IP_APPLICATION', '0000', 'Request Application name'),
('IP_VERSION', '0001', 'Request Application version'),
('IP_FULLSCREEN', '0510', 'Request Fullscreen/windowed status'),
('IP_MEDIASTATE', '1000', 'Request MediaState'),
('IP_MEDIATIME', '1010', 'Request Media time'),
('IP_ENDOFMEDIA', '1030', 'End of media'),
('IP_FORMAT', '1040', 'Request Video Format'),
('IP_GETAR', '1300', 'Request Current Aspect Ratio'),
('IP_ARCOUNT', '1310', 'Request Aspect Ratio Count'),
('IP_ARNAMES', '1320', 'ARequest Aspect Ratio Names'),
('IP_CURFILE', '1400', 'Request Current file'),
('IP_DISKINSERTION', '1410', 'Disk inserted'),
('IP_DISKEJECTION', '1420', 'Disk ejected'),
('IP_DVDUNIQUEID', '1500', 'DVD unique ID'),
('IP_DVDTITLE', '1510', 'Request Current Title'),
('IP_DVDTITLECOUNT', '1520', 'Request Title count'),
('IP_DVDCHAPTER', '1600', 'Request Current Chapter'),
('IP_DVDCHAPTERCOUNT', '1610', 'Request Chapter count'),
('IP_DVDAUDIO', '1700', 'Request Current audio stream'),
('IP_DVDAUDIOCOUNT', '1720', 'Request Audio stream count'),
('IP_DVDAUDIONAMES', '1730', 'Request Audio stream names'),
('IP_DVDSUBTITLE', '1800', 'Request Current subtitle stream'),
('IP_DVDSUBTITLECOUNT', '1820', 'Request Subtitle stream count'),
('IP_DVDSUBTITLENAMES', '1830', 'Request Subtitle names (name|name)'),
('IP_DVDANGLE', '1900', 'Request Current angle'),
('IP_DVDANGLECOUNT', '1920', 'Request Angle count'),
('IP_DVDMENUMODE', '2000', 'Request Menu mode'),
('IP_DOMAIN', '2010', 'Request DVD Domain'),
('IP_GETVOLUME', '2100', 'Request Current volume'),
('IP_GETAUDIOOUTPUT', '2120', 'Request Current audio output'),
('IP_GETLISTCOUNT', '3050', 'Request Current list count'),
('IP_GETLIST', '3060', 'Request playlist'),
('IP_GETPRIVATE', '4010', 'Request Private app string'),
('IP_COUNTCHANGERS', '8110', 'CountChangers'),
)
ttCommands = (
('IP_FLASH', '0500', 'OSD Flash message','Message'),
('IP_MEDIAPOS', '1020', 'Set media time', 'Time(hh:mm:ss)'),
('IP_SETAR', '1330', 'Set Current AR', 'AR number'),
('IP_DVDPLAYTITLE', '1530', 'Play Title', 'Title Number'),
('IP_DVDPLAYCHAPTER', '1620', 'Play chapter', 'Chapter number'),
('IP_DVDPLAYTITCHAP', '1630', 'Play Chapter in Title', 'Title/Chapter (space delimited)'),
('IP_DVDSETAUDIO', '1710', 'Set audio stream','Stream number'),
('IP_DVDSETSUBTITLE', '1810', 'Set subtitle stream', 'Stream number (-1 to disable)'),
('IP_DVDSETANGLE', '1910', 'Set angle', 'Angle'),
('IP_SETVOLUME', '2110', 'Set Current volume', 'Volume'),
('IP_SETAUDIOOUTPUT', '2130', 'Set audio output', 'Audio Output'),
('IP_ADDBOOKMARK', '2200', 'Add a bookmark', ''),
('IP_NEXTBOOKMARK', '2210', 'Next bookmark', ''),
('IP_PREVBOOKMARK', '2220', 'Previous bookmark', ''),
('IP_PLAYFILE', '3000', 'Play file', 'Filename'),
('IP_ADDFILE', '3010', 'Add file to playlist', 'Filename'),
('IP_CLEARLIST', '3020', 'Clear playlist', ''),
('IP_PLAYATINDEX', '3040', 'Play item at index', 'Index'),
('IP_GETINDEX', '3030', 'Current item index', 'Index'),
('IP_DELATINDEX', '3070', 'Delete file at index', 'Index'),
('IP_SETPRIVATE', '4000', 'Private app string', 'String'),
('IP_KEYPRESS', '5010', 'Key code', 'Key-Code'),
('ID_PLAY', '32771', 'Play', ''),
('ID_STOP', '32772', 'Stop', ''),
('ID_PAUSE', '32773', 'Pause', ''),
('ID_NEXT', '32774', 'Next', ''),
('ID_PREVIOUS', '32775', 'Previous', ''),
('ID_EXIT', '32776', 'Exit', ''),
('ID_FF', '32777', 'FastForward', ''),
('ID_RW', '32778', 'Fast Rewind', ''),
('ID_MENU_LIST', '32779', 'Menu List', ''),
('ID_TITLE_MENU', '32780', 'Title Menu', ''),
('ID_FF_1X', '32782', 'Normal Play', ''),
('ID_FF_2X', '32784', 'Fast Forward 2x', ''),
('ID_FF_5X', '32785', 'Fast Forward 5x', ''),
('ID_FF_10X', '32786', 'Fast Forward 10x', ''),
('ID_FF_20X', '32787', 'Fast Forward 20x', ''),
('ID_FF_SLOW', '32788', 'Fast Forward Slow', ''),
('ID_RW_1X', '32790', 'Reverse Play', ''),
('ID_RW_2X', '32791', 'Fast Reverse 2X', ''),
('ID_RW_5X', '32792', 'Faste Reverse 5X', ''),
('ID_RW_10X', '32793', 'Fast Reverse 10X', ''),
('ID_RW_20X', '32794', 'Fast Reverse 20X', ''),
('ID_ROOT_MENU', '32796', 'Root Menu', ''),
('ID_AUDIO_MENU', '32797', 'Audio Menu', ''),
('ID_SUBTITLE_MENU', '32798', 'Subtitle Menu', ''),
('ID_CHAPTER_MENU', '32799', 'Chapter Menu', ''),
('ID_CC_ON', '32804', 'Closed Captions On', ''),
('ID_CC_OFF', '32805', 'Closed Captions Off', ''),
('ID_ABOUT', '32807', 'About', ''),
('ID_SUB_OFF', '32808', 'Subtitles Off', ''),
('ID_ASPECT_DEFINE', '32810', 'Define Aspect Ratio', ''),
('ID_ASPECT_ANAM', '32811', 'AR anamorph', ''),
('ID_ASPECT_NONANAM', '32812', 'AR non anamorph', ''),
('ID_ASPECT_LETTERBOX', '32813', 'AR Letterbox', ''),
('ID_BOOK_ADD', '32814', 'Add Bookmark', ''),
('ID_BUTTON32819', '32819', 'BUTTON32819', ''),
('ID_BUTTON32820', '32820', 'BUTTON32820', ''),
('ID_ONSCREEN', '32821', 'On Screen', ''),
('ID_VID_BRIGHTNESS', '32824', 'Brightness', ''),
('ID_VID_CONTRAST', '32825', 'Contrast', ''),
('ID_VID_HUE', '32826', 'Hue', ''),
('ID_VID_SATURATION', '32827', 'Saturation', ''),
('ID_OVERSCAN', '32828', 'Overscan', ''),
('ID_VID_GAMMA', '32829', 'Gamma', ''),
('ID_MENU_CHAPTER', '32830', 'Menu Chapter', ''),
('ID_MENU_AUDIO', '32831', 'Menu Audio', ''),
('ID_MENU_ANGLE', '32832', 'Menu Angle', ''),
('ID_MENU_FF', '32833', 'Menu FF', ''),
('ID_MENU_SUBTITLES', '32834', 'Menu Subtitles', ''),
('ID_CLOSED_CAPTIONS', '32835', 'Closed Captions', ''),
('ID_BOOK_DELETE', '32836', 'Delete Bookmark', ''),
('ID_ANGLE_MENU', '32837', 'Angle Menu', ''),
('ID_RESUME', '32838', 'Resume', ''),
('ID_MENU_TITLE', '32839', 'Menu Title', ''),
('ID_SETUP', '32841', 'Setup', ''),
('ID_ADJUSTVIDEO', '32842', 'Adjust Video', ''),
('ID_ASPECT_LOCK', '32843', 'Lock Aspect ratio', ''),
('ID_SETSTARTPOINT', '32846', 'Set Startpoint', ''),
('ID_K_RETURN', '32849', 'Key Return', ''),
('ID_K_UP', '32850', 'Key Up', ''),
('ID_K_DOWN', '32851', 'Key Down', ''),
('ID_K_LEFT', '32852', 'Key Left', ''),
('ID_K_RIGHT', '32853', 'Key Right', ''),
('ID_K_FF', '32854', 'Key FastForward', ''),
('ID_K_RW', '32855', 'Key Rewind', ''),
('ID_K_ESCAPE', '32856', 'Key Escape', ''),
('ID_NEXTAR', '32857', 'Next Aspect ratio', ''),
('ID_INFO', '32858', 'Info', ''),
('ID_ARFIRST', '32859', 'First Aspect Ratio', ''),
('ID_AR2', '32860', 'Aspect ratio 2', ''),
('ID_AR3', '32861', 'Aspect ratio 3', ''),
('ID_AR4', '32862', 'Aspect ratio 4', ''),
('ID_AR5', '32863', 'Aspect ratio 5', ''),
('ID_AR6', '32864', 'Aspect ratio 6', ''),
('ID_AR7', '32865', 'Aspect ratio 7', ''),
('ID_AR8', '32866', 'Aspect ratio 8', ''),
('ID_AR9', '32867', 'Aspect ratio 9', ''),
('ID_ARLAST', '32868', 'Last Aspect ratio', ''),
('ID_EJECT', '32870', 'Eject', ''),
('ID_CONTEXT', '32872', 'Context', ''),
('ID_ALTEXIT', '32873', 'ALT Exit', ''),
('ID_MINIMIZE', '32874', 'Minimize', ''),
('ID_NEXTSUB', '32875', 'Next Subtitle', ''),
('ID_NEXTAUDIO', '32876', 'Next Audio', ''),
('ID_REPLAY', '32877', 'Replay', ''),
('ID_JUMP', '32878', 'Jump', ''),
('ID_FRAMESTEP', '32879', 'Framestep', ''),
('ID_ABREPEAT', '32880', 'A/B-Repeat', ''),
('ID_CHAPTITREP', '32881', 'Chapter Title Repeat', ''),
('ID_NEXT_ANGLE', '32883', 'Next Angle', ''),
('ID_OPEN', '32884', 'Open', ''),
('ID_NEXT_TIT', '32885', 'Next Title', ''),
('ID_STATS', '32886', 'Statistics', ''),
('ID_CAPTURE', '32887', 'Capture', ''),
('ID_BK_RESUME', '32888', 'BK Resume', ''),
('ID_DEINTERLACE', '32889', 'Deinterlace', ''),
('ID_VOLUP', '32891', 'Volume Up', ''),
('ID_VOLDOWN', '32892', 'Volume Down', ''),
('ID_NEXTDISK', '32893', 'Next Disk', ''),
('ID_SHOWTIME', '32894', 'Show Time', ''),
('ID_CC_NUDGE_UP', '32895', 'CC Nudge Up', ''),
('ID_CC_NUDGE_DOWN', '32896', 'CC Nudge Down', ''),
('ID_UPGRADE', '32897', 'Upgrade', ''),
('ID_NEXT_FILE', '32898', 'Next File', ''),
('ID_PREVIOUS_FILE', '32899', 'Previous File', ''),
('ID_TSPROG', '32901', 'TSPROG', ''),
('ID_PREV_TIT', '32902', 'Previous Title', ''),
('ID_SLOW', '32904', 'Slow', ''),
('ID_CCTOGGLE', '32905', 'Closed Captions Toggle', ''),
('ID_AR11', '32906', 'Aspect ratio 11', ''),
('ID_AR12', '32907', 'Aspect ratio 12', ''),
('ID_AR13', '32908', 'Aspect ratio 13', ''),
('ID_AR14', '32909', 'Aspect ratio 14', ''),
('ID_AR15', '32910', 'Aspect ratio 15', ''),
('ID_AR16', '32911', 'Aspect ratio 16', ''),
('ID_AR17', '32912', 'Aspect ratio 17', ''),
('ID_AR18', '32913', 'Aspect ratio 18', ''),
('ID_AR19', '32914', 'Aspect ratio 19', ''),
('ID_AR20', '32915', 'Aspect ratio 20', ''),
('ID_VMRSTATS', '32916', 'VMR Statistics', ''),
('ID_LIPDOWN', '32917', 'Lipsync down', ''),
('ID_LIPUP', '32918', 'Lipsync Up', ''),
('ID_MUTE', '32919', 'Mute', ''),
('ID_BLANKING', '32920', 'Blanking', ''),
('ID_TOGGLE', '32922', 'Toggle', ''),
('ID_MOVELEFT', '32924', 'Move Left', ''),
('ID_MOVERIGHT', '32925', 'Move Right', ''),
('ID_MOVEUP', '32926', 'Move Up', ''),
('ID_MOVEDOWN', '32927', 'Move Down', ''),
('ID_H_EXPAND', '32928', 'Horizontal Expand', ''),
('ID_H_CONTRACT', '32929', 'Horizontal Contract', ''),
('ID_V_EXPAND', '32930', 'Vertical Expand', ''),
('ID_V_CONTRACT', '32931', 'Vertical Contract', ''),
('ID_ZOOM_IN', '32932', 'Zoom In', ''),
('ID_ZOOM_OUT', '32933', 'Zoom Out', ''),
('ID_BL_LEFT', '32934', 'BL_LEFT', ''),
('ID_BL_RIGHT', '32935', 'BL_RIGHT', ''),
('ID_BT_UP', '32936', 'BT_UP', ''),
('ID_BT_DOWN', '32937', 'BT_DOWN', ''),
('ID_BR_LEFT', '32938', 'BR_LEFT', ''),
('ID_BR_RIGHT', '32939', 'BR_RIGHT', ''),
('ID_BB_UP', '32940', 'BB_UP', ''),
('ID_BB_DOWN', '32941', 'BB_DOWN', ''),
('ID_STREAM', 32943, 'STREAM', ''),
)
ttAutoKillerAndChangerCommands = (
('IP_LAUNCH', '8000', 'Launch AutoKiller'),
('IP_QUIT', '8010', 'Quit Autokiller'),
('IP_MOUNTDISK', '8020', 'Mount Disk', 'Changer/Slot (comma delimited)'),
('IP_UNMOUNTDISK', '8030', 'Unmount Disk', 'Changer/Slot (comma delimited)'),
('IP_EJECTDISK', '8040', 'Eject Disk', 'Changer/Slot (comma delimited)'),
('IP_GETSLOTDATA', '8050', 'GETSLOTDATA', 'Changer, Slot'),
('IP_GETDRIVEDATA', '8060', 'GETDRIVEDATA', 'Changer ->DriveData'),
('IP_CHECKCHANGED', '8070', 'CHECKCHANGED'),
('IP_REBUILDDATA', '8080', 'REBUILDDATA'),
('IP_DATACHANGED', '8100', 'Notification of data change'),
)
class TheaterTekSession(asynchat.async_chat):
"""
Handles a Theatertek TCP/IP session.
"""
def __init__ (self, plugin, address):
self.plugin = plugin
# Call constructor of the parent class
asynchat.async_chat.__init__(self)
# Set up input line terminator
self.set_terminator('\r\n')
# Initialize input data buffer
self.buffer = ''
# create and connect a socket
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
eg.RestartAsyncore()
self.settimeout(1.0)
try:
self.connect(address)
except:
pass
def handle_connect(self):
"""
Called when the active opener's socket actually makes a connection.
"""
self.plugin.TriggerEvent("Connected")
def handle_expt(self):
# connection failed
self.plugin.isSessionRunning = False
self.plugin.TriggerEvent("NoConnection")
self.close()
def handle_close(self):
"""
Called when the channel is closed.
"""
self.plugin.isSessionRunning = False
self.plugin.TriggerEvent("ConnectionLost")
self.close()
def collect_incoming_data(self, data):
"""
Called with data holding an arbitrary amount of received data.
"""
self.buffer = self.buffer + data
def found_terminator(self):
"""
Called when the incoming data stream matches the termination
condition set by set_terminator.
"""
# call the plugins handler method
self.plugin.ValueUpdate(self.buffer)
# reset the buffer
self.buffer = ''
class stdAction(eg.ActionClass):
def __call__(self):
self.plugin.DoCommand(self.value)
class stdActionWithStringParameter(eg.ActionWithStringParameter):
def __call__(self, Param):
self.plugin.DoCommand(self.value + " " + Param)
class wmAction(eg.ActionClass):
def __call__(self):
self.plugin.DoCommand("5000 " + self.value)
class TheaterTek(eg.PluginClass):
def __init__(self):
self.host = "localhost"
self.port = 2663
self.isSessionRunning = False
self.timeline = ""
self.waitStr = None
self.waitFlag = threading.Event()
self.PlayState = -1
self.lastMessage = {}
self.lastSubtitleNum = 0
self.lastSubtitlesEnabled = False
self.lastAudioTrackNum = 0
group = self.AddGroup('Requests')
for className, scancode, descr in ttRequests:
clsAttributes = dict(name=descr, value=scancode)
cls = new.classobj(className, (stdAction,), clsAttributes)
group.AddAction(cls)
group = self.AddGroup('Commands')
for className, scancode, descr, ParamDescr in ttCommands:
clsAttributes = dict(name=descr, value=scancode)
if ParamDescr == "":
if className[0:3] == "IP_":
cls = new.classobj(className, (stdAction,), clsAttributes)
else:
cls = new.classobj(className, (wmAction,), clsAttributes)
else:
cls = new.classobj(className, (stdActionWithStringParameter,), clsAttributes)
cls.parameterDescription = ParamDescr
group.AddAction(cls)
def __start__(
self,
host="localhost",
port=2663,
dummy1=None,
dummy2=None,
useNewEvents=False
):
self.host = host
self.port = port
self.events = self.ttEvents
ttEvents = {
"0000": "ApplicationName",
"0001": "Version",
"0500": "OSD",
"0510": (
"WindowState",
{
"0": "Minimized",
"1": "Windowed",
"2": "Fullscreen"
},
),
"1000": (
"MediaState",
{
"0": "Stopped",
"1": "Playing",
"2": "Paused",
"3": "FF",
"4": "RW"
},
),
"1010": "MediaTime",
"1030": "EndOfMedia",
"1040": (
"Format",
{
"0": "NTSC",
"1": "PAL",
},
),
"1300": "AspectRatio",
"1310": "AspectRatioCount",
"1320": "AspectRatioNames",
"1400": "Currentfile",
"1410": "DiskInserted",
"1420": "DiskEjected",
"1500": "DVDUniqueID",
"1510": "CurrentTitle",
"1520": "TitleCount",
"1600": "CurrentChapter",
"1610": "ChapterCount",
"1700": "CurrentAudioStream",
"1720": "AudioStreamCount",
"1730": "AudioStreamNames",
"1800": "CurrentSubtitleStream",
"1820": "SubtitleStreamCount",
"1830": "SubtitleNames",
"1900": "CurrentAngle",
"1920": "AngleCount",
"2000": (
"MenuMode",
{
"0": "Off",
"1": "On",
},
),
"2010": "DVDDomain",
"2100": "CurrentVolume",
"2120": "CurrentAudioOutput",
"3050": "CurrentListCount",
"3060": "Playlist",
"4010": "PrivateAppString",
"8110": "CountChangers",
}
def ValueUpdate(self, text):
if text == self.waitStr:
self.waitStr = None
self.waitFlag.set()
return
header = text[0:4]
state = text[5:].decode('utf-8')
self.lastMessage[header] = state
ttEvent = self.ttEvents.get(header, None)
if ttEvent is not None:
if type(ttEvent) == type({}):
eventString = ttEvent.get(state, None)
if eventString is not None:
self.TriggerEvent(eventString)
else:
self.TriggerEvent(header, [state])
elif type(ttEvent) == type(()):
suffix2 = ttEvent[1].get(state, None)
if suffix2 is not None:
self.TriggerEvent(ttEvent[0] + "." + suffix2)
else:
self.TriggerEvent(ttEvent[0] + "." + str(state))
else:
if state == "":
self.TriggerEvent(ttEvent)
else:
self.TriggerEvent(ttEvent, [state])
return
else:
self.TriggerEvent(header, [state])
@eg.LogIt
def DoCommand(self, cmdstr):
self.waitFlag.clear()
self.waitStr = cmdstr
if not self.isSessionRunning:
self.session = TheaterTekSession(self, (self.host, self.port))
self.isSessionRunning = True
try:
self.session.sendall(cmdstr + "\r\n")
except:
self.isSessionRunning = False
self.TriggerEvent('close')
self.session.close()
self.waitFlag.wait(1.0)
self.waitStr = None
self.waitFlag.set()
def SetOSD(self, text):
self.DoCommand("1200 " + text)
def Configure(
self,
host="localhost",
port=2663,
dummy1=None,
dummy2=None
):
panel = eg.ConfigPanel(self)
hostEdit = panel.TextCtrl(host)
portEdit = panel.SpinIntCtrl(port, max=65535)
panel.AddLine("TCP/IP host:", hostEdit)
panel.AddLine("TCP/IP port:", portEdit)
while panel.Affirmed():
panel.SetResult(
hostEdit.GetValue(),
portEdit.GetValue(),
None,
None
)
class MyCommand(eg.ActionWithStringParameter):
name = "Raw Command"
def __call__(self, cmd):
self.plugin.DoCommand(cmd)
| EventGhost/EventGhost | plugins/TheaterTek/__init__.py | Python | gpl-2.0 | 31,794 |
import itertools
import sys
from flask import abort, g, render_template, request, redirect, Blueprint, flash, url_for, current_app
from flask.ext.login import login_required, current_user
from realms.lib.util import to_canonical, remove_ext, gravatar_url
from .models import PageNotFound
blueprint = Blueprint('wiki', __name__)
@blueprint.route("/_commit/<sha>/<path:name>")
def commit(name, sha):
if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous():
return current_app.login_manager.unauthorized()
cname = to_canonical(name)
data = g.current_wiki.get_page(cname, sha=sha)
if not data:
abort(404)
return render_template('wiki/page.html', name=name, page=data, commit=sha)
@blueprint.route(r"/_compare/<path:name>/<regex('\w+'):fsha><regex('\.{2,3}'):dots><regex('\w+'):lsha>")
def compare(name, fsha, dots, lsha):
if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous():
return current_app.login_manager.unauthorized()
diff = g.current_wiki.compare(name, fsha, lsha)
return render_template('wiki/compare.html',
name=name, diff=diff, old=fsha, new=lsha)
@blueprint.route("/_revert", methods=['POST'])
@login_required
def revert():
cname = to_canonical(request.form.get('name'))
commit = request.form.get('commit')
message = request.form.get('message', "Reverting %s" % cname)
if not current_app.config.get('ALLOW_ANON') and current_user.is_anonymous():
return dict(error=True, message="Anonymous posting not allowed"), 403
if cname in current_app.config.get('WIKI_LOCKED_PAGES'):
return dict(error=True, message="Page is locked"), 403
try:
sha = g.current_wiki.revert_page(cname,
commit,
message=message,
username=current_user.username,
email=current_user.email)
except PageNotFound as e:
return dict(error=True, message=e.message), 404
if sha:
flash("Page reverted")
return dict(sha=sha)
@blueprint.route("/_history/<path:name>")
def history(name):
if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous():
return current_app.login_manager.unauthorized()
hist = g.current_wiki.get_history(name)
for item in hist:
item['gravatar'] = gravatar_url(item['author_email'])
return render_template('wiki/history.html', name=name, history=hist)
@blueprint.route("/_edit/<path:name>")
@login_required
def edit(name):
cname = to_canonical(name)
page = g.current_wiki.get_page(name)
if not page:
# Page doesn't exist
return redirect(url_for('wiki.create', name=cname))
name = remove_ext(page['path'])
g.assets['js'].append('editor.js')
return render_template('wiki/edit.html',
name=name,
content=page.get('data'),
info=page.get('info'),
sha=page.get('sha'),
partials=page.get('partials'))
@blueprint.route("/_create/", defaults={'name': None})
@blueprint.route("/_create/<path:name>")
@login_required
def create(name):
cname = to_canonical(name) if name else ""
if cname and g.current_wiki.get_page(cname):
# Page exists, edit instead
return redirect(url_for('wiki.edit', name=cname))
g.assets['js'].append('editor.js')
return render_template('wiki/edit.html',
name=cname,
content="",
info={})
def _get_subdir(path, depth):
parts = path.split('/', depth)
if len(parts) > depth:
return parts[-2]
def _tree_index(items, path=""):
depth = len(path.split("/"))
items = filter(lambda x: x['name'].startswith(path), items)
items = sorted(items, key=lambda x: x['name'])
for subdir, items in itertools.groupby(items, key=lambda x: _get_subdir(x['name'], depth)):
if not subdir:
for item in items:
yield dict(item, dir=False)
else:
size = 0
ctime = sys.maxint
mtime = 0
for item in items:
size += item['size']
ctime = min(item['ctime'], ctime)
mtime = max(item['mtime'], mtime)
yield dict(name=path + subdir + "/",
mtime=mtime,
ctime=ctime,
size=size,
dir=True)
@blueprint.route("/_index", defaults={"path": ""})
@blueprint.route("/_index/<path:path>")
def index(path):
if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous():
return current_app.login_manager.unauthorized()
items = g.current_wiki.get_index()
if path:
path = to_canonical(path) + "/"
return render_template('wiki/index.html', index=_tree_index(items, path=path), path=path)
@blueprint.route("/<path:name>", methods=['POST', 'PUT', 'DELETE'])
@login_required
def page_write(name):
cname = to_canonical(name)
if not cname:
return dict(error=True, message="Invalid name")
if not current_app.config.get('ALLOW_ANON') and current_user.is_anonymous():
return dict(error=True, message="Anonymous posting not allowed"), 403
if request.method == 'POST':
# Create
if cname in current_app.config.get('WIKI_LOCKED_PAGES'):
return dict(error=True, message="Page is locked"), 403
sha = g.current_wiki.write_page(cname,
request.form['content'],
message=request.form['message'],
create=True,
username=current_user.username,
email=current_user.email)
elif request.method == 'PUT':
edit_cname = to_canonical(request.form['name'])
if edit_cname in current_app.config.get('WIKI_LOCKED_PAGES'):
return dict(error=True, message="Page is locked"), 403
if edit_cname != cname:
g.current_wiki.rename_page(cname, edit_cname)
sha = g.current_wiki.write_page(edit_cname,
request.form['content'],
message=request.form['message'],
username=current_user.username,
email=current_user.email)
return dict(sha=sha)
elif request.method == 'DELETE':
# DELETE
if cname in current_app.config.get('WIKI_LOCKED_PAGES'):
return dict(error=True, message="Page is locked"), 403
sha = g.current_wiki.delete_page(cname,
username=current_user.username,
email=current_user.email)
return dict(sha=sha)
@blueprint.route("/", defaults={'name': 'home'})
@blueprint.route("/<path:name>")
def page(name):
if current_app.config.get('PRIVATE_WIKI') and current_user.is_anonymous():
return current_app.login_manager.unauthorized()
cname = to_canonical(name)
if cname != name:
return redirect(url_for('wiki.page', name=cname))
data = g.current_wiki.get_page(cname)
if data:
return render_template('wiki/page.html', name=cname, page=data, partials=data.get('partials'))
else:
return redirect(url_for('wiki.create', name=cname))
| drptbl/realms-wiki-vagrant | realms/modules/wiki/views.py | Python | gpl-2.0 | 7,675 |
class StopSendingException(Exception):
"""
pre_send exception
"""
| ilstreltsov/django-db-mailer | dbmail/exceptions.py | Python | gpl-2.0 | 78 |
import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
date_range, Index, DatetimeIndex, isnull)
from pandas.io.pytables import _tables
try:
_tables()
except ImportError as e:
raise nose.SkipTest(e)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
from numpy.testing.decorators import slow
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w,f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class TestHDFStore(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestHDFStore, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
super(TestHDFStore, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
def test_factory_fun(self):
try:
with get_store(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with get_store(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_context(self):
try:
with HDFStore(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with HDFStore(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(self.path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_conv_read_write(self):
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(self.path, key,**kwargs)
return read_hdf(self.path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(self.path,'table',append=True)
result = read_hdf(self.path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(self.path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=True,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# append to False
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# formats
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format=None)
assert_frame_equal(store.select('df'),df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')
#File path doesn't exist
path = ""
self.assertRaises(IOError, read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
_maybe_remove(store,'df')
store.put('df',df)
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2',df)
pandas.set_option('io.hdf.default_format','table')
_maybe_remove(store,'df')
store.put('df',df)
self.assertTrue(store.get_storer('df').is_table)
_maybe_remove(store,'df2')
store.append('df2',df)
self.assertTrue(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format',None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
df.to_hdf(path,'df')
with get_store(path) as store:
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)
pandas.set_option('io.hdf.default_format','table')
df.to_hdf(path,'df3')
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df3').is_table)
df.to_hdf(path,'df4',append=True)
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format',None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
self.assertTrue(set(
store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001,1,2,0,0)
df['datetime2'] = datetime.datetime(2001,1,3,0,0)
df.ix[3:6,['obj1']] = np.nan
df = df.consolidate().convert_objects()
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.create_group(store._handle.root,'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df',df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assertIn('a', store)
self.assertIn('b', store)
self.assertNotIn('c', store)
self.assertIn('foo/bar', store)
self.assertIn('/foo/bar', store)
self.assertNotIn('/foo/b', store)
self.assertNotIn('bar', store)
# GH 2694
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assertIn('node())', store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assertEqual(store.root.a._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.b._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.df1._v_attrs.pandas_version, '0.15.2')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no version
# info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r','r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path,mode=mode)
self.assertEqual(store._handle.mode, mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r','r+']:
def f():
with HDFStore(path,mode=mode) as store:
pass
self.assertRaises(IOError, f)
else:
with HDFStore(path,mode=mode) as store:
self.assertEqual(store._handle.mode, mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r','r+']:
self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)
df.to_hdf(path,'df',mode='w')
else:
df.to_hdf(path,'df',mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)
else:
result = read_hdf(path,'df',mode=mode)
assert_frame_equal(result,df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assertFalse(store.is_open)
# truncation ok here
store.open('w')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 0)
store.close()
self.assertFalse(store.is_open)
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'r')
store.close()
self.assertFalse(store.is_open)
# reopen as append
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
# reopen as append (again)
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)
store['df'] = df
store.append('df2',df)
tm.assert_frame_equal(store['df'],df)
tm.assert_frame_equal(store['df2'],df)
store.close()
# the file should not have actually been written
self.assertFalse(os.path.exists(path))
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store,'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode','path','handle','complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode','path','handle','complib']:
getattr(store,"_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df',df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected,df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),
'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),
'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),
'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},
index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assertIsNone(result.name)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assertIsNone(result.name)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assertEqual(result.name, ns.name)
# select on the values
expected = ns[ns>60]
result = store.select('ns',Term('foo>60'))
tm.assert_series_equal(result,expected)
# select on the index and values
expected = ns[(ns>70) & (ns.index<90)]
result = store.select('ns',[Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result,expected)
# multi-index
mi = DataFrame(np.random.randn(5,1),columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5,'C'] = 'bar'
mi.set_index(['C','B'],inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format,index):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df',df,format=format)
assert_frame_equal(df,store['df'])
for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
tm.makeDateIndex ]:
check('table',index)
check('fixed',index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed',tm.makePeriodIndex)
#check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table',index)
check('fixed',index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
check('fixed',index)
def test_encoding(self):
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
df.loc[3,'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),
'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15,['A1','B','D','E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:,'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:,'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:,'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20)},
index=np.arange(20))
df.ix[0:15,:] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table',False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table',True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar'},
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
#GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df['index'] = range(10)
df['index'] += i*10
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1]*len(df),dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index',inplace=True)
store.append('df',df)
# test a different ordering but with more fields (like invalid combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
store.append('df',df)
# store additonal fields in different blocks
df['int16_2'] = Series([1]*len(df),dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.]*len(df),dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assertTrue(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B','A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:,'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.ix[0,'A'] = 1.
df_new.ix[0,'B'] = -1.
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
df_new['string2'] = 'foo'
df_new.loc[2:5,'string2'] = np.nan
df_new.loc[7:8,'string2'] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc.convert_objects()
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc, data_columns=['B', 'C',
'string', 'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
result = store.select('df_dc', [ Term('B>0') ])
expected = df_dc[df_dc.B>0]
tm.assert_frame_equal(result,expected)
result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1',p)
tm.assert_panel_equal(store.select('p1'),p)
store.append('p2',p,data_columns=True)
tm.assert_panel_equal(store.select('p2'),p)
result = store.select('p2',where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
tm.assert_frame_equal(result.to_frame(),expected)
result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
tm.assert_frame_equal(result.to_frame(),expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t,column):
return getattr(store.get_storer(t).table.cols,column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path,'df',format='table')
result = read_hdf(path,'df',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
df = DataFrame(np.arange(12).reshape(3,4), columns=index)
with ensure_clean_store(self.path) as store:
store.put('df',df)
tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df,df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
with ensure_clean_store(self.path) as store:
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date',None,None]))
store.append('s',s)
tm.assert_series_equal(store.select('s'),s)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
self.assertRaises(ValueError, store.append, 'df',df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
self.assertRaises(ValueError, store.append, 'df',df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select('df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"),s)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df',df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsuported data types
self.assertRaises(TypeError, store.put,'abc',None)
self.assertRaises(TypeError, store.put,'abc','123')
self.assertRaises(TypeError, store.put,'abc',123)
self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path,mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result,obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df',df_empty)
self.assertRaises(KeyError,store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10,3),columns=list('ABC'))
store.append('df',df)
assert_frame_equal(store.select('df'),df)
store.append('df',df_empty)
assert_frame_equal(store.select('df'),df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2',df)
assert_frame_equal(store.select('df2'),df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p',p_empty)
self.assertRaises(KeyError,store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3,4,5),items=list('ABC'))
store.append('p',p)
assert_panel_equal(store.select('p'),p)
store.append('p',p_empty)
assert_panel_equal(store.select('p'),p)
# store
store.put('p2',p_empty)
assert_panel_equal(store.select('p2'),p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001,1,2),index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes,store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes,store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes,store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in
['float32','float64','int32','int64','int16','int8'] ]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,
'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,
'datetime64[ns]' : 2})
result.sort()
expected.sort()
tm.assert_series_equal(result,expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
### currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
def compare(a,b):
tm.assert_frame_equal(a,b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
store.append('df_tz',df,data_columns=['A'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# select with tz aware
compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5))
store.append('df_tz',df)
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz',df,data_columns=['A','B'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# can't append with diff timezone
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df',df)
result = store.select('df')
assert_frame_equal(result,df)
_maybe_remove(store, 'df')
store.append('df',df)
result = store.select('df')
assert_frame_equal(result,df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01', datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pandas.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed',s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table',s)
result = store.select('table')
assert_series_equal(result, s)
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
tm._skip_if_no_dateutil()
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues.
from pandas.tslib import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
def compare(a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
# select with tz aware
compare(store.select('df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5))
store.append('df_tz', df)
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
compare(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
import platform
if platform.system() == "Windows":
raise nose.SkipTest("timezone setting not supported on windows")
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ']=tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))
df['C'] = df['A']-df['B']
df.ix[3:5,'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df',df,data_columns=True)
result = store.select('df')
assert_frame_equal(result,df)
result = store.select('df',Term("C<100000"))
assert_frame_equal(result,df)
result = store.select('df',Term("C","<",-3*86400))
assert_frame_equal(result,df.iloc[3:])
result = store.select('df',"C<'-3D'")
assert_frame_equal(result,df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df',"C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result,df.iloc[6:])
result = store.select('df',"C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result,df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2',df)
result = store.select('df2')
assert_frame_equal(result,df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEqual(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEqual(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEqual(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEqual(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEqual(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
self.assertTrue(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
self.assertTrue(n == 120-32)
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32//4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
self.assertTrue(n == 32)
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32//4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
self.assertTrue(n == 32)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32//4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
self.assertTrue(n == 120-32)
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32//4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
self.assertTrue(n == 120-32)
result = store.select('wp5')
expected = wp.reindex(major_axis=wp.major_axis[:16//4].union(wp.major_axis[-16//4:]))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
self.assertTrue(n == 0)
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
date = wp.major_axis.take(np.arange(0,30,3))
crit = Term('major_axis=date')
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
self.assertTrue(n == 28)
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(wp.major_axis[np.arange(0,20,3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
self.assertTrue(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
self.assertTrue(n == 56)
n = store.remove('wp', where=[crit2])
self.assertTrue(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])).difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4,'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df','df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df','index>')
self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table',data_columns=True)
# check ok
read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path,'dfq',where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table')
self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, table=True)
store.put('p4d', p4d, table=True)
store.put('wpneg', wpneg, table=True)
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [
dict(field='major_axis', op='>', value='20121114'),
[ dict(field='major_axis', op='>', value='20121114') ],
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
with tm.assertRaisesRegexp(TypeError, 'Only named functions are supported'):
store.select('wp', Term('major_axis == (lambda x: x)("20130101")'))
# check USub node parsing
res = store.select('wpneg', Term('items == -1'))
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assertRaisesRegexp(NotImplementedError,
'Unary addition not supported'):
store.select('wpneg', Term('items == +1'))
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_backwards_compat_without_term_object(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', ('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0),
datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
result = store.select('wp', [('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
store.put('df', df, table=True)
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
from datetime import datetime
# technically an error, but allow it
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
result = store.select('df','index>datetime(2013,1,5)')
assert_frame_equal(result,expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assertTrue(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=['A'])
expected = df.loc[:,['A']]
assert_frame_equal(result,expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['A']]
result = store.select('df',columns=['A'])
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['B','A']]
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df',df)
store.append('df',df)
expected = df.loc[:,['B','A']]
expected = concat([expected, expected])
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(
np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5,2), columns =['A','B'])
df['object'] = 'foo'
df.ix[4:5,'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns = True)
expected = df[df.boolv == True].reindex(columns=['A','boolv'])
for v in [True,'true',1]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False ].reindex(columns=['A','boolv'])
for v in [False,'false',0]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
store.append('df1',df,data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values']>2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values']>2.0]
store.append('df2',df,data_columns=True,index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
#store.append('df3',df,data_columns=True)
#result = store.select(
# 'df3', where='values>2.0')
#tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values']>2.0]
store.append('df4',df,data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]
tm.assert_frame_equal(expected, result)
selector = range(100,200)
result = store.select('df', [Term('B=selector')])
expected = df[ df.B.isin(selector) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[ df.ts.isin(selector.values) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [ s for s in store.select('df',iterator=True) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=100) ]
self.assertEqual(len(results), 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df_non_table')
self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
results = [ s for s in read_hdf(path,'df',chunksize=100) ]
result = concat(results)
self.assertEqual(len(results), 5)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1',df1,data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2',df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [ s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
#expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
#results = []
#for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
# results.append(s)
#result = concat(results)
#tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [ s for s in store.select('df',chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
self.assertEqual(0, len(results))
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize=int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize-1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be 1, is 10
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be []
tm.assert_equal(0, len(results))
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(A = Series(lrange(3),
index=date_range('2000-1-1',periods=3,freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store,'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df,result)
for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assertEqual(getattr(getattr(df,idx),attr,None),
getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(A = Series(lrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
self.assertIsNone(store.get_storer('data').info['index']['freq'])
# this is ok
_maybe_remove(store,'df2')
df2 = DataFrame(dict(A = Series(lrange(3),
index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))
store.append('df2',df2)
df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('df2',df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
idx = date_range('2000-1-1',periods=3,freq='H')
idx.name = 'foo'
df = DataFrame(dict(A = Series(lrange(3), index=idx)))
df.to_hdf(path,'data',mode='w',append=True)
self.assertEqual(read_hdf(path,'data').index.name, 'foo')
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
idx2 = date_range('2001-1-1',periods=3,freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))
df2.to_hdf(path,'data',append=True)
self.assertIsNone(read_hdf(path,'data').index.name)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df,format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
self.assertEqual(crit1.env.scope['date'], date)
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True, data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string!='bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:,df.columns-['A','B']]
tm.assert_frame_equal(result, expected)
# in
result = store.select('df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({ 'A' : [1,1,2,2,3] })
parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])
selection = read_hdf(pp,'df',where='A=[2,3]')
hist = DataFrame(np.random.randn(25,1),columns=['data'],
index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],
names=['l1','l2']))
hist.to_hdf(hh,'df',mode='w',format='table')
expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))
# list like
result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist()
# sccope with list like
store = HDFStore(hh)
result = store.select('df',where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh,'df',where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index
result = read_hdf(hh,'df',where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df',where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True)
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7,'x'] = ''
store.append('df',df,data_columns=['x'])
result = store.select('df',Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result,expected)
try:
result = store.select('df',Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result,expected)
except Exception as detail:
com.pprint_thing("[{0}]".format(detail))
com.pprint_thing(store)
com.pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x=='','x'] = np.nan
store.append('df2',df2,data_columns=['x'])
result = store.select('df2',Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result,expected)
# int ==/!=
df['int'] = 1
df.ix[2:7,'int'] = 2
store.append('df3',df,data_columns=['int'])
result = store.select('df3',Term('int=2'))
expected = df[df.int==2]
assert_frame_equal(result,expected)
result = store.select('df3',Term('int!=2'))
expected = df[df.int!=2]
assert_frame_equal(result,expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where = ['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assertIsInstance(result,Series)
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all() == True)
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all() == True)
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all() == True)
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assertIsInstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))
store.append('df',df)
c = store.select_column('df','index')
where = c[DatetimeIndex(c).month==5].index
expected = df.iloc[where]
# locations
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# boolean
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)
# selection with filter
selection = date_range('20000101',periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result,expected)
# list
df = DataFrame(np.random.randn(10,2))
store.append('df2',df)
result = store.select('df2',where=[0,3,5])
expected = df.iloc[[0,3,5]]
tm.assert_frame_equal(result,expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2',where=where)
expected = df.loc[where]
tm.assert_frame_equal(result,expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result,expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df2'], where=['A>0', 'B>0'], selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_nan_selection_bug_4858(self):
# GH 4858; nan selection bug, only works for pytables >= 3.1
if LooseVersion(tables.__version__) < '3.1.0':
raise nose.SkipTest('tables version does not support fix for nan selection bug: GH 4858')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols = range(6), values = range(6)), dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols = ['13.0','14.0','15.0'], values = [3.,4.,5.]), index=[3,4,5])
# write w/o the index on that particular column
store.append('df',df, data_columns=True,index=['cols'])
result = store.select('df',where='values>2.0')
assert_frame_equal(result,expected)
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
crit = Term('columns=df.columns[:75:2]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75:2]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
# single
store = HDFStore(path)
self.assertNotIn('CLOSED', str(store))
self.assertTrue(store.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assertNotIn('CLOSED', str(store1))
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store1.is_open)
self.assertTrue(store2.is_open)
store1.close()
self.assertIn('CLOSED', str(store1))
self.assertFalse(store1.is_open)
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store2.is_open)
store2.close()
self.assertIn('CLOSED', str(store1))
self.assertIn('CLOSED', str(store2))
self.assertFalse(store1.is_open)
self.assertFalse(store2.is_open)
# nested close
store = HDFStore(path,mode='w')
store.append('df',df)
store2 = HDFStore(path)
store2.append('df2',df)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
# double closing
store = HDFStore(path,mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda : 'df' in store)
self.assertRaises(ClosedFileError, lambda : len(store))
self.assertRaises(ClosedFileError, lambda : store['df'])
self.assertRaises(ClosedFileError, lambda : store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
finally:
safe_close(store)
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
finally:
safe_close(store)
def test_legacy_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')
store['a']
store['b']
store['c']
store['d']
finally:
safe_close(store)
def test_legacy_table_read(self):
# legacy table types
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
finally:
safe_close(store)
def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
str(store)
for k in store.keys():
store.select(k)
finally:
safe_close(store)
def test_legacy_0_11_read(self):
# legacy from 0.11
try:
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
store = HDFStore(tm.get_data_path(path), 'r')
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
finally:
safe_close(store)
def test_copy(self):
def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assertEqual(set(keys), set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assertEqual(orig_t.nrows, new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assertTrue(new_t[a.name].is_indexed)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
df = tm.makeDataFrame()
try:
st = HDFStore(self.path)
st.append('df', df, data_columns = ['A'])
st.close()
do_copy(f = self.path)
do_copy(f = self.path, propindexes = False)
finally:
safe_remove(self.path)
def test_legacy_table_write(self):
raise nose.SkipTest("cannot write legacy tables")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEqual(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning,f)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b', 'test & test', 'c' , 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# basic
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
df = DataFrame({"s":s, "vals":[1,2,3,4,5,6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# dtypes
s = Series([1,1,2,2,3,4,5]).astype('category')
store.append('si',s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1,1,np.nan,2,3,4,5]).astype('category')
store.append('si2',s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2',df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# make sure the metadata is ok
self.assertTrue('/df2 ' in str(store))
self.assertTrue('/df2/meta/values_block_0/meta' in str(store))
self.assertTrue('/df2/meta/values_block_1/meta' in str(store))
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'],ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where = ['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where = ['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# appending with same categories is ok
store.append('df3', df)
df = concat([df,df])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
self.assertRaises(ValueError, lambda : store.append('df3', df3))
# remove
# make sure meta data is removed (its a recursive removal so should be)
result = store.select('df3/meta/s/meta')
self.assertIsNotNone(result)
store.remove('df3')
self.assertRaises(KeyError, lambda : store.select('df3/meta/s/meta'))
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/io/tests/test_pytables.py | Python | gpl-2.0 | 175,872 |
import time
import os
import ContentDb
from Debug import Debug
from Config import config
class ContentDbDict(dict):
def __init__(self, site, *args, **kwargs):
s = time.time()
self.site = site
self.cached_keys = []
self.log = self.site.log
self.db = ContentDb.getContentDb()
self.db_id = self.db.needSite(site)
self.num_loaded = 0
super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database
self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids)))
def loadItem(self, key):
try:
self.num_loaded += 1
if self.num_loaded % 100 == 0:
if config.verbose:
self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack()))
else:
self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key))
content = self.site.storage.loadJson(key)
dict.__setitem__(self, key, content)
except IOError:
if dict.get(self, key):
self.__delitem__(key) # File not exists anymore
raise KeyError(key)
self.addCachedKey(key)
self.checkLimit()
return content
def getItemSize(self, key):
return self.site.storage.getSize(key)
# Only keep last 10 accessed json in memory
def checkLimit(self):
if len(self.cached_keys) > 10:
key_deleted = self.cached_keys.pop(0)
dict.__setitem__(self, key_deleted, False)
def addCachedKey(self, key):
if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char
self.cached_keys.append(key)
def __getitem__(self, key):
val = dict.get(self, key)
if val: # Already loaded
return val
elif val is None: # Unknown key
raise KeyError(key)
elif val is False: # Loaded before, but purged from cache
return self.loadItem(key)
def __setitem__(self, key, val):
self.addCachedKey(key)
self.checkLimit()
size = self.getItemSize(key)
self.db.setContent(self.site, key, val, size)
dict.__setitem__(self, key, val)
def __delitem__(self, key):
self.db.deleteContent(self.site, key)
dict.__delitem__(self, key)
try:
self.cached_keys.remove(key)
except ValueError:
pass
def iteritems(self):
for key in dict.keys(self):
try:
val = self[key]
except Exception as err:
self.log.warning("Error loading %s: %s" % (key, err))
continue
yield key, val
def items(self):
back = []
for key in dict.keys(self):
try:
val = self[key]
except Exception as err:
self.log.warning("Error loading %s: %s" % (key, err))
continue
back.append((key, val))
return back
def values(self):
back = []
for key, val in dict.iteritems(self):
if not val:
try:
val = self.loadItem(key)
except Exception:
continue
back.append(val)
return back
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
except Exception as err:
self.site.bad_files[key] = self.site.bad_files.get(key, 1)
dict.__delitem__(self, key)
self.log.warning("Error loading %s: %s" % (key, err))
return default
def execute(self, query, params={}):
params["site_id"] = self.db_id
return self.db.execute(query, params)
if __name__ == "__main__":
import psutil
process = psutil.Process(os.getpid())
s_mem = process.memory_info()[0] / float(2 ** 20)
root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27"
contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root)
print "Init len", len(contents)
s = time.time()
for dir_name in os.listdir(root + "/data/users/")[0:8000]:
contents["data/users/%s/content.json" % dir_name]
print "Load: %.3fs" % (time.time() - s)
s = time.time()
found = 0
for key, val in contents.iteritems():
found += 1
assert key
assert val
print "Found:", found
print "Iteritem: %.3fs" % (time.time() - s)
s = time.time()
found = 0
for key in contents.keys():
found += 1
assert key in contents
print "In: %.3fs" % (time.time() - s)
print "Len:", len(contents.values()), len(contents.keys())
print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem
| OliverCole/ZeroNet | src/Content/ContentDbDict.py | Python | gpl-2.0 | 4,975 |
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = 'ZenPacks.community.DistributedCollectors'
VERSION = '1.7'
AUTHOR = 'Egor Puzanov'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.community']
PACKAGES = ['ZenPacks', 'ZenPacks.community', 'ZenPacks.community.DistributedCollectors']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.5'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| anksp21/Community-Zenpacks | ZenPacks.community.DistributedCollectors/setup.py | Python | gpl-2.0 | 2,617 |
# -*- encoding: utf-8 -*-
import os
from abjad import abjad_configuration
from abjad.demos import desordre
def test_demos_desordre_01():
lilypond_file = desordre.make_desordre_lilypond_file() | mscuthbert/abjad | abjad/demos/desordre/test/test_demos_desordre.py | Python | gpl-3.0 | 198 |
import numpy as np
import os
from galry import log_debug, log_info, log_warn, get_color
from fontmaps import load_font
from visual import Visual
__all__ = ['TextVisual']
VS = """
gl_Position.x += (offset - text_width / 2) * spacing.x / window_size.x;
gl_Position.y -= index * spacing.y / window_size.y;
gl_Position.xy = gl_Position.xy + posoffset / window_size;
gl_PointSize = point_size;
flat_text_map = text_map;
"""
def FS(background_transparent=True):
if background_transparent:
background_transparent_shader = "letter_alpha"
else:
background_transparent_shader = "1."
fs = """
// relative coordinates of the pixel within the sprite (in [0,1])
float x = gl_PointCoord.x;
float y = gl_PointCoord.y;
// size of the corresponding character
float w = flat_text_map.z;
float h = flat_text_map.w;
// display the character at the left of the sprite
float delta = h / w;
x = delta * x;
if ((x >= 0) && (x <= 1))
{
// coordinates of the character in the font atlas
vec2 coord = flat_text_map.xy + vec2(w * x, h * y);
float letter_alpha = texture2D(tex_sampler, coord).a;
out_color = color * letter_alpha;
out_color.a = %s;
}
else
out_color = vec4(0, 0, 0, 0);
""" % background_transparent_shader
return fs
class TextVisual(Visual):
"""Template for displaying short text on a single line.
It uses the following technique: each character is rendered as a sprite,
i.e. a pixel with a large point size, and a single texture for every point.
The texture contains a font atlas, i.e. all characters in a given font.
Every point comes with coordinates that indicate which small portion
of the font atlas to display (that portion corresponds to the character).
This is all done automatically, thanks to a font atlas stored in the
`fontmaps` folder. There needs to be one font atlas per font and per font
size. Also, there is a configuration text file with the coordinates and
size of every character. The software used to generate font maps is
AngelCode Bitmap Font Generator.
For now, there is only the Segoe font.
"""
def position_compound(self, coordinates=None):
"""Compound variable with the position of the text. All characters
are at the exact same position, and are then shifted in the vertex
shader."""
if coordinates is None:
coordinates = (0., 0.)
if type(coordinates) == tuple:
coordinates = [coordinates]
coordinates = np.array(coordinates)
position = np.repeat(coordinates, self.textsizes, axis=0)
return dict(position=position)
def text_compound(self, text):
"""Compound variable for the text string. It changes the text map,
the character position, and the text width."""
coordinates = self.coordinates
if "\n" in text:
text = text.split("\n")
if type(text) == list:
self.textsizes = [len(t) for t in text]
text = "".join(text)
if type(coordinates) != list:
coordinates = [coordinates] * len(self.textsizes)
index = np.repeat(np.arange(len(self.textsizes)), self.textsizes)
text_map = self.get_map(text)
# offset for all characters in the merging of all texts
offset = np.hstack((0., np.cumsum(text_map[:, 2])[:-1]))
# for each text, the cumsum of the length of all texts strictly
# before
d = np.hstack(([0], np.cumsum(self.textsizes)[:-1]))
# compensate the offsets for the length of each text
offset -= np.repeat(offset[d], self.textsizes)
text_width = 0.
else:
self.textsizes = len(text)
text_map = self.get_map(text)
offset = np.hstack((0., np.cumsum(text_map[:, 2])[:-1]))
text_width = offset[-1]
index = np.zeros(len(text))
self.size = len(text)
d = dict(text_map=text_map, offset=offset, text_width=text_width,
index=index)
d.update(self.position_compound(coordinates))
return d
def initialize_font(self, font, fontsize):
"""Initialize the specified font at a given size."""
self.texture, self.matrix, self.get_map = load_font(font, fontsize)
def initialize(self, text, coordinates=(0., 0.), font='segoe', fontsize=24,
color=None, letter_spacing=None, interline=0., autocolor=None,
background_transparent=True,
prevent_constrain=False, depth=None, posoffset=None):
"""Initialize the text template."""
if prevent_constrain:
self.constrain_ratio = False
if autocolor is not None:
color = get_color(autocolor)
if color is None:
color = self.default_color
self.size = len(text)
self.primitive_type = 'POINTS'
self.interline = interline
text_length = self.size
self.initialize_font(font, fontsize)
self.coordinates = coordinates
point_size = float(self.matrix[:,4].max() * self.texture.shape[1])
# template attributes and varyings
self.add_attribute("position", vartype="float", ndim=2, data=np.zeros((self.size, 2)))
self.add_attribute("offset", vartype="float", ndim=1)
self.add_attribute("index", vartype="float", ndim=1)
self.add_attribute("text_map", vartype="float", ndim=4)
self.add_varying("flat_text_map", vartype="float", flat=True, ndim=4)
if posoffset is None:
posoffset = (0., 0.)
self.add_uniform('posoffset', vartype='float', ndim=2, data=posoffset)
# texture
self.add_texture("tex_sampler", size=self.texture.shape[:2], ndim=2,
ncomponents=self.texture.shape[2],
data=self.texture)
# pure heuristic (probably bogus)
if letter_spacing is None:
letter_spacing = (100 + 17. * fontsize)
self.add_uniform("spacing", vartype="float", ndim=2,
data=(letter_spacing, interline))
self.add_uniform("point_size", vartype="float", ndim=1,
data=point_size)
# one color per
if isinstance(color, np.ndarray) and color.ndim > 1:
self.add_attribute('color0', vartype="float", ndim=4, data=color)
self.add_varying('color', vartype="float", ndim=4)
self.add_vertex_main('color = color0;')
else:
self.add_uniform("color", vartype="float", ndim=4, data=color)
self.add_uniform("text_width", vartype="float", ndim=1)
# compound variables
self.add_compound("text", fun=self.text_compound, data=text)
self.add_compound("coordinates", fun=self.position_compound, data=coordinates)
# vertex shader
self.add_vertex_main(VS, after='viewport')
# fragment shader
self.add_fragment_main(FS(background_transparent))
self.depth = depth | DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/galry/visuals/text_visual.py | Python | gpl-3.0 | 7,555 |
"""
# Notes:
- This simulation seeks to emulate the CUBA benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008).
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_CUBA_nosyn_250/pbsout/brian_benchmark_CUBA_nosyn_250.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 250
defaultclock.dt = 0.01*ms
taum=20*ms
Vt = -50*mV
Vr = -60*mV
El = -49*mV
# The model
eqs = Equations('''
dv/dt = ((v-El))/taum : volt
''')
P = NeuronGroup(cells, model=eqs,threshold="v>Vt",reset="v=Vr",refractory=5*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Initialization
P.v = Vr
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
| asoplata/dynasim-benchmark-brette-2007 | output/Brian2/brian2_benchmark_CUBA_nosyn_250/brian2_benchmark_CUBA_nosyn_250.py | Python | gpl-3.0 | 2,003 |
# OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
# Import Blender's python API. This only works when the script is being
# run from the context of Blender. Blender contains it's own version of Python
# with this library pre-installed.
import bpy
# Load a font
def load_font(font_path):
""" Load a new TTF font into Blender, and return the font object """
# get the original list of fonts (before we add a new one)
original_fonts = bpy.data.fonts.keys()
# load new font
bpy.ops.font.open(filepath=font_path)
# get the new list of fonts (after we added a new one)
for font_name in bpy.data.fonts.keys():
if font_name not in original_fonts:
return bpy.data.fonts[font_name]
# no new font was added
return None
# Debug Info:
# ./blender -b test.blend -P demo.py
# -b = background mode
# -P = run a Python script within the context of the project file
# Init all of the variables needed by this script. Because Blender executes
# this script, OpenShot will inject a dictionary of the required parameters
# before this script is executed.
params = {
'title' : 'Oh Yeah! OpenShot!',
'extrude' : 0.1,
'bevel_depth' : 0.02,
'spacemode' : 'CENTER',
'text_size' : 1.5,
'width' : 1.0,
'fontname' : 'Bfont',
'color' : [0.8,0.8,0.8],
'alpha' : 1.0,
'alpha_mode' : 'TRANSPARENT',
'output_path' : '/tmp/',
'fps' : 24,
'quality' : 90,
'file_format' : 'PNG',
'color_mode' : 'RGBA',
'horizon_color' : [0.57, 0.57, 0.57],
'resolution_x' : 1920,
'resolution_y' : 1080,
'resolution_percentage' : 100,
'start_frame' : 20,
'end_frame' : 25,
'animation' : True,
}
#INJECT_PARAMS_HERE
# The remainder of this script will modify the current Blender .blend project
# file, and adjust the settings. The .blend file is specified in the XML file
# that defines this template in OpenShot.
#----------------------------------------------------------------------------
# Modify Text / Curve settings
#print (bpy.data.curves.keys())
text_object = bpy.data.curves["Title"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.body = params["title"]
text_object.align = params["spacemode"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
# Get font object
font = None
if params["fontname"] != "Bfont":
# Add font so it's available to Blender
font = load_font(params["fontname"])
else:
# Get default font
font = bpy.data.fonts["Bfont"]
text_object.font = font
text_object = bpy.data.curves["Subtitle"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.body = params["sub_title"]
text_object.align = params["spacemode"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
# set the font
text_object.font = font
# Change the material settings (color, alpha, etc...)
material_object = bpy.data.materials["Text"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
# Set the render options. It is important that these are set
# to the same values as the current OpenShot project. These
# params are automatically set by OpenShot
bpy.context.scene.render.filepath = params["output_path"]
bpy.context.scene.render.fps = params["fps"]
#bpy.context.scene.render.quality = params["quality"]
try:
bpy.context.scene.render.file_format = params["file_format"]
bpy.context.scene.render.color_mode = params["color_mode"]
except:
bpy.context.scene.render.image_settings.file_format = params["file_format"]
bpy.context.scene.render.image_settings.color_mode = params["color_mode"]
try:
bpy.context.scene.render.alpha_mode = params["alpha_mode"]
except:
pass
bpy.data.worlds[0].horizon_color = params["horizon_color"]
bpy.context.scene.render.resolution_x = params["resolution_x"]
bpy.context.scene.render.resolution_y = params["resolution_y"]
bpy.context.scene.render.resolution_percentage = params["resolution_percentage"]
bpy.context.scene.frame_start = params["start_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Animation Speed (use Blender's time remapping to slow or speed up animation)
animation_speed = int(params["animation_speed"]) # time remapping multiplier
new_length = int(params["end_frame"]) * animation_speed # new length (in frames)
bpy.context.scene.frame_end = new_length
bpy.context.scene.render.frame_map_old = 1
bpy.context.scene.render.frame_map_new = animation_speed
if params["start_frame"] == params["end_frame"]:
bpy.context.scene.frame_start = params["end_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Render the current animation to the params["output_path"] folder
bpy.ops.render.render(animation=params["animation"])
| XXLRay/libreshot | libreshot/blender/scripts/blinds.py | Python | gpl-3.0 | 5,718 |
"""
WSGI config for horario project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asignacion_horario.settings")
application = get_wsgi_application()
| hbkfabio/horario | asignacion_horario/wsgi.py | Python | gpl-3.0 | 403 |
"""
Python script "setup.py"
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
[email protected]
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to [email protected]
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Verifies sample data, scripts, modules, documents, auxiliary files.
Verifies availability of python dependencies used by various scripts.
Uncompresses certain large example data files
Builds directory structure for script output products.
DEPENDENCIES: all software package source dependencies are polled here
USAGE: '$ python setup.py'
"""
import os
import sys
import glob
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
txt_files = ['ACKNOWLEDGEMENTS.txt', 'CITATION.txt', 'DISCLAIMER.txt',
'LICENSE_GnuGPLv3.txt']
md_files = ['README.md']
main_dirs = ['data', 'docs', 'htcondor', 'source', 'tools']
#
scripts = ['process_NCEI_00.py', 'process_NCEI_01.py',
'process_NCEI_02a.py', 'process_NCEI_02b.py',
'process_NCEI_03_chill_d.py', 'process_NCEI_03_chill_dd.py',
'process_NCEI_03_grow_dd.py', 'process_NCEI_03_grow_dd_base0.py',
'process_NCEI_03_prcp_03d.py', 'process_NCEI_03_prcp_07d.py',
'process_NCEI_03_prcp_120d.py', 'process_NCEI_03_prcp_15d.py',
'process_NCEI_03_prcp_180d.py', 'process_NCEI_03_prcp_30d.py',
'process_NCEI_03_prcp_365d.py', 'process_NCEI_03_prcp_60d.py',
'process_NCEI_03_prcp_90d.py', 'process_NCEI_03_prcp_90d_nd0.py',
'process_NCEI_03_prcp_90d_nd10.py',
'process_NCEI_03_prcp_90d_nd25.py',
'process_NCEI_03_preprocess.py', 'process_NCEI_03_tavg_03d.py',
'process_NCEI_03_tavg_07d.py', 'process_NCEI_03_tavg_15d.py',
'process_NCEI_03_tavg_30d.py', 'process_NCEI_03_tavg_60d.py',
'process_NCEI_03_tavg_90d.py', 'process_NCEI_03_tavg_frz.py',
'process_NCEI_03_tmax_03d.py', 'process_NCEI_03_tmax_07d.py',
'process_NCEI_03_tmax_15d.py', 'process_NCEI_03_tmax_30d.py',
'process_NCEI_03_tmax_60d.py', 'process_NCEI_03_tmax_90d.py',
'process_NCEI_03_tmax_frz.py', 'process_NCEI_03_tmin_03d.py',
'process_NCEI_03_tmin_07d.py', 'process_NCEI_03_tmin_15d.py',
'process_NCEI_03_tmin_30d.py', 'process_NCEI_03_tmin_60d.py',
'process_NCEI_03_tmin_90d.py', 'process_NCEI_03_tmin_frz.py',
'process_NCEI_03_vpd_03d.py', 'process_NCEI_03_vpd_07d.py',
'process_NCEI_03_vpd_15d.py', 'process_NCEI_03_vpd_30d.py',
'process_NCEI_03_vpd_60d.py', 'process_NCEI_03_vpd_90d.py',
'process_NCEI_04a.py', 'process_NCEI_04b.py', 'process_NCEI_05.py',
'process_NCEI_06.py', 'process_NCEI_07.py', 'process_NCEI_08.py',
'process_NCEI_09.py', 'process_NCEI_10.py', 'process_NCEI_11.py',
'process_NCEI_12.py', 'process_NCEI_13.py', 'process_NCEI_14.py',
'process_NCEI_15.py']
#
modules = ['Date_Convert.py', 'Interpolation.py', 'Plots.py',
'process_NCEI_03_aux.py', 'Read_Header_Files.py', 'Stats.py',
'Teleconnections.py', 'UTM_Geo_Convert.py']
#
htcondor = ['process_NCEI_00.sh', 'process_NCEI_00.sub',
'process_NCEI_01.sh', 'process_NCEI_01.sub',
'process_NCEI_02a.sh', 'process_NCEI_02a.sub',
'process_NCEI_02b.sh', 'process_NCEI_02b.sub',
'process_NCEI_02b_dag.sub', 'process_NCEI_03_chill_d.sh',
'process_NCEI_03_chill_dd.sh', 'process_NCEI_03_dag_gen.py',
'process_NCEI_03_generic.sub', 'process_NCEI_03_grow_dd.sh',
'process_NCEI_03_grow_dd_base0.sh', 'process_NCEI_03_prcp_03d.sh',
'process_NCEI_03_prcp_07d.sh', 'process_NCEI_03_prcp_120d.sh',
'process_NCEI_03_prcp_15d.sh', 'process_NCEI_03_prcp_180d.sh',
'process_NCEI_03_prcp_30d.sh', 'process_NCEI_03_prcp_365d.sh',
'process_NCEI_03_prcp_60d.sh', 'process_NCEI_03_prcp_90d.sh',
'process_NCEI_03_prcp_90d_nd0.sh',
'process_NCEI_03_prcp_90d_nd10.sh',
'process_NCEI_03_prcp_90d_nd25.sh',
'process_NCEI_03_preprocess.sh', 'process_NCEI_03_tavg_03d.sh',
'process_NCEI_03_tavg_07d.sh', 'process_NCEI_03_tavg_15d.sh',
'process_NCEI_03_tavg_30d.sh', 'process_NCEI_03_tavg_60d.sh',
'process_NCEI_03_tavg_90d.sh', 'process_NCEI_03_tavg_frz.sh',
'process_NCEI_03_tmax_03d.sh', 'process_NCEI_03_tmax_07d.sh',
'process_NCEI_03_tmax_15d.sh', 'process_NCEI_03_tmax_30d.sh',
'process_NCEI_03_tmax_60d.sh', 'process_NCEI_03_tmax_90d.sh',
'process_NCEI_03_tmax_frz.sh', 'process_NCEI_03_tmin_03d.sh',
'process_NCEI_03_tmin_07d.sh', 'process_NCEI_03_tmin_15d.sh',
'process_NCEI_03_tmin_30d.sh', 'process_NCEI_03_tmin_60d.sh',
'process_NCEI_03_tmin_90d.sh', 'process_NCEI_03_tmin_frz.sh',
'process_NCEI_03_vpd_03d.sh', 'process_NCEI_03_vpd_07d.sh',
'process_NCEI_03_vpd_15d.sh', 'process_NCEI_03_vpd_30d.sh',
'process_NCEI_03_vpd_60d.sh', 'process_NCEI_03_vpd_90d.sh',
'process_NCEI_04a.sh', 'process_NCEI_04a.sub',
'process_NCEI_04b.sh', 'process_NCEI_04b.sub',
'process_NCEI_05.sh', 'process_NCEI_05.sub',
'process_NCEI_06.sh', 'process_NCEI_06.sub',
'process_NCEI_07.sh', 'process_NCEI_07.sub',
'process_NCEI_08.sh', 'process_NCEI_08.sub',
'process_NCEI_09.sh', 'process_NCEI_09.sub']
#
dependencies = ['os', 'sys', 'datetime', 'glob', 'numpy', 'pandas', 'h5py',
'matplotlib', 'matplotlib.pyplot', 'gdal', 'osgeo.osr',
'scipy.interpolate', 'scipy.ndimage', 'scipy.stats',
'mpl_toolkits', 'mpl_toolkits.basemap', 'pickle']
#
gz_data_files = ['EPA_L4_Ecoregions_WLS_UTM15N.bil.gz',
'NCEI_WLS_19830101-20151031.csv.gz',
'NLCD_2011_WLS_UTM15N.bil.gz']
#
data_files = ['EPA_L4_Ecoregions_WLS_polygonIDs.txt',
'EPA_L4_Ecoregions_WLS_UTM15N.bil',
'EPA_L4_Ecoregions_WLS_UTM15N.hdr',
'NCEI_WLS_19830101-20151031.csv',
'NCEP_CPC_AO_indices.csv',
'NCEP_CPC_ENSO_indices.csv',
'NCEP_CPC_NAO_indices.csv',
'NCEP_CPC_PNA_indices.csv',
'NLCD_2011_WLS_UTM15N.bil',
'NLCD_2011_WLS_UTM15N.hdr',
'NOAA_ESRL_AMO_indices.csv',
'NOAA_ESRL_PDO_indices.csv',
'NSIDC_MIFL_Superior_Ice.csv',
'Query_locations_dates_sample.csv']
#
doc_files = ['How_to_get_NCEI_GHCND_data.txt',
'NCEI_GHCND_documentation.pdf']
#
tools = ['query_NCEI_grids.py', 'orientation_maps.py']
#
add_dirs = ['analyses', 'grids', 'images']
#
analyses_dirs = ['annual_maps', 'cluster_maps', 'ecoregion_maps',
'figures', 'summary_maps']
#
os.system('rm .DS_Store')
os.system('rm */.DS_Store')
os.system('rm ._*')
os.system('rm */._*')
#
message('checking for auxiliary files that should accompany this software')
txts_present = glob.glob('*.txt')
mds_present = glob.glob('*.md')
absent = 0
for txt in txt_files:
if txt in txts_present:
message('- found auxiliary file \'%s\' as expected' % txt)
else:
message('- auxiliary file \'%s\' is absent' % txt)
absent += 1
for md in md_files:
if md in mds_present:
message('- found auxiliary file \'%s\' as expected' % md)
else:
message('- auxiliary file \'%s\' is absent' % md)
absent += 1
if absent > 0:
message('- you don\'t need them to run things, but you do need them to \
understand things')
message('- you should probably download this package again from scratch')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for top-level directories that should already exist')
dirs_present = [d.replace('/', '') for d in glob.glob('*/')]
absent = 0
for dirname in main_dirs:
if dirname in dirs_present:
message('- found main directory \'%s\' as expected' % dirname)
else:
message('- main directory \'%s\' is absent' % dirname)
absent += 1
if absent > 0:
message('- you should download this package again from scratch')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for main scripts and modules that comprise this software')
src_present = glob.glob('source/*')
absent = 0
for srcfile in scripts:
srcfile = 'source/%s' % srcfile
if srcfile in src_present:
message('- found script \'%s\' as expected' % srcfile)
else:
message('- script \'%s\' is absent' % srcfile)
absent += 1
for srcfile in modules:
srcfile = 'source/%s' % srcfile
if srcfile in src_present:
message('- found module \'%s\' as expected' % srcfile)
else:
message('- module \'%s\' is absent' % srcfile)
absent += 1
if absent > 0:
message('- you should download this package again from scratch')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for script-based tools that accompany this software')
src_present = glob.glob('tools/*')
absent = 0
for srcfile in tools:
srcfile = 'tools/%s' % srcfile
if srcfile in src_present:
message('- found script \'%s\' as expected' % srcfile)
else:
message('- script \'%s\' is absent' % srcfile)
absent += 1
if absent > 0:
message('- if you need these tools, you should download this package \
again from scratch')
message(' ')
#
message('checking for HTCondor example files that accompany this software')
src_present = glob.glob('htcondor/*')
absent = 0
for srcfile in htcondor:
srcfile = 'htcondor/%s' % srcfile
if srcfile in src_present:
message('- found htcondor file \'%s\' as expected' % srcfile)
else:
message('- htcondor file \'%s\' is absent' % srcfile)
absent += 1
if absent > 0:
message('- if you need these files, you should download this package \
again from scratch')
message(' ')
#
message('checking for essential python package dependencies for this software')
err = 0
#
try:
import os
message('- python dependency \'os\' is available')
except ImportError:
message('- essential python dependency \'os\' is not available')
err += 1
#
try:
import sys
message('- python dependency \'sys\' is available')
except ImportError:
message('- essential python dependency \'sys\' is not available')
err += 1
#
try:
import datetime
message('- python dependency \'datetime\' is available')
except ImportError:
message('- essential python dependency \'datetime\' is not available')
err += 1
#
try:
import glob
message('- python dependency \'glob\' is available')
except ImportError:
message('- essential python dependency \'glob\' is not available')
err += 1
#
try:
import pickle
message('- python dependency \'pickle\' is available')
except ImportError:
message('- essential python dependency \'pickle\' is not available')
err += 1
#
try:
import numpy
message('- python dependency \'numpy\' is available')
except ImportError:
message('- essential python dependency \'numpy\' is not available')
err += 1
#
try:
import pandas
message('- python dependency \'pandas\' is available')
except ImportError:
message('- essential python dependency \'pandas\' is not available')
err += 1
#
try:
import h5py
message('- python dependency \'h5py\' is available')
except ImportError:
message('- essential python dependency \'h5py\' is not available')
err += 1
#
try:
import gdal
message('- python dependency \'gdal\' is available')
except ImportError:
message('- essential python dependency \'gdal\' is not available')
err += 1
#
try:
import osgeo.osr
message('- python dependency \'osgeo.osr\' is available')
except ImportError:
message('- essential python dependency \'osgeo.osr\' is not available')
err += 1
#
try:
import scipy.interpolate
message('- python dependency \'scipy.interpolate\' is available')
except ImportError:
message('- essential python dependency \'scipy.interpolate\' is not \
available')
err += 1
#
try:
import scipy.ndimage
message('- python dependency \'scipy.ndimage\' is available')
except ImportError:
message('- essential python dependency \'scipy.ndimage\' is not available')
err += 1
#
try:
import scipy.stats
message('- python dependency \'scipy.stats\' is available')
except ImportError:
message('- essential python dependency \'scipy.stats\' is not available')
err += 1
#
try:
import matplotlib
message('- python dependency \'matplotlib\' is available')
except ImportError:
message('- essential python dependency \'matplotlib\' is not available')
err += 1
#
try:
import matplotlib.pyplot
message('- python dependency \'matplotlib.pyplot\' is available')
except ImportError:
message('- essential python dependency \'matplotlib.pyplot\' is not \
available')
err += 1
#
try:
import mpl_toolkits
message('- python dependency \'mpl_toolkits\' is available')
except ImportError:
message('- essential python dependency \'mpl_toolkits\' is not available')
err += 1
#
try:
import mpl_toolkits.basemap
message('- python dependency \'mpl_toolkits.basemap\' is available')
except ImportError:
message('- essential python dependency \'mpl_toolkits.basemap\' is not \
available')
err += 1
#
if err > 0:
message('- you need to install one or more additional python packages for \
this software to work')
message('- all of these packages are available via Anaconda (\'conda\') \
and/or PyPI (\'pip\') repositories')
message('- exiting setup procedure')
sys.exit(1)
message(' ')
#
message('checking for example data files that should accompany this software')
gz_data_present = glob.glob('data/*.gz')
absent = 0
for gz_dfile in gz_data_files:
gz_dfile_path = 'data/%s' % gz_dfile
if gz_dfile_path in gz_data_present:
message('- found compressed data file \'%s\' as expected' % gz_dfile)
message('-- uncompressing \'%s\'' % gz_dfile)
os.system('cd data')
os.system('gunzip %s' % gz_dfile)
os.system('cd ..')
else:
message('- compressed example data file \'%s\' is absent' % gz_dfile)
absent += 1
if absent > 0:
message('- you don\'t need these if you have your own data in the right \
formats')
message('- if you need the examples, you can find them at on GitHub at')
message(' https://github.com/megarcia/WxCD')
#
data_present = glob.glob('data/*')
absent = 0
for dfile in data_files:
dfile_path = 'data/%s' % dfile
if dfile_path in data_present:
message('- found data file \'%s\' as expected' % dfile)
else:
message('- example data file \'%s\' is absent' % dfile)
absent += 1
if absent > 0:
message('- you don\'t need these if you have your own data in the right \
formats')
message('- if you need the examples, you can find them at on GitHub at')
message(' https://github.com/megarcia/WxCD')
message(' ')
#
message('checking for data documentation files that should accompany this \
software')
docs_present = glob.glob('docs/*')
absent = 0
for dfile in doc_files:
dfile = 'docs/%s' % dfile
if dfile in docs_present:
message('- found documentation file \'%s\' as expected' % dfile)
else:
message('- data documentation file \'%s\' is absent' % dfile)
absent += 1
if absent > 0:
message('- you don\'t need these if you have your own documentation')
message('- if you need the examples, you can find them at on GitHub at')
message(' https://github.com/megarcia/GT16_JGRA')
message(' ')
#
message('creating top-level and sub-directories that will be used for process \
output')
for dirname in add_dirs:
os.system('mkdir %s' % dirname)
message('- made top-level directory \'%s\' ' % dirname)
for dirname in analyses_dirs:
os.system('mkdir analyses/%s' % dirname)
message('- made sub-directory \'analyses/%s\' ' % dirname)
message(' ')
#
message('copying source scripts and modules to top-level directory')
os.system('cp source/*.py .')
message('archiving original scripts and modules to \'source_orig\' directory')
os.system('mv source source_orig')
#
message('copying tools to top-level directory')
os.system('cp tools/*.py .')
message('archiving original tools scripts to \'tools_orig\' directory')
os.system('mv tools tools_orig')
message(' ')
#
message('all set!')
message(' ')
#
message('if you plan to use the HTCondor example files, you\'ll need to \
move or copy them to')
message(' your top-level directory')
message(' ')
#
message('make sure to read the \'README.md\' file before you get started on \
the scripts')
message(' ')
#
message('if you need help getting your own dataset of GHCND weather \
observations, there is')
message(' a how-to document in the \'docs\' directory')
message(' ')
#
message('please send questions, bug reports, any other requests to \
[email protected]')
message(' (and include a helpfully descriptive subject line, if you could)')
message('or submit them through the Issues tab at the GitHub repository for \
this package')
message(' ')
#
sys.exit(0)
| megarcia/WxCD | setup.py | Python | gpl-3.0 | 18,007 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from mediadrop.lib.test.pythonic_testcase import *
from mediadrop.plugin.events import Event, FetchFirstResultEvent, GeneratorEvent
class EventTest(PythonicTestCase):
def setUp(self):
self.observers_called = 0
self.event = Event()
def probe(self):
self.observers_called += 1
def test_can_notify_all_observers(self):
self.event.post_observers.append(self.probe)
self.event.pre_observers.append(self.probe)
assert_equals(0, self.observers_called)
self.event()
assert_equals(2, self.observers_called)
class FetchFirstResultEventTest(PythonicTestCase):
def test_returns_first_non_null_result(self):
event = FetchFirstResultEvent([])
event.post_observers.append(lambda: None)
event.post_observers.append(lambda: 1)
event.post_observers.append(lambda: 2)
assert_equals(1, event())
def test_passes_all_event_parameters_to_observers(self):
event = FetchFirstResultEvent([])
event.post_observers.append(lambda foo, bar=None: foo)
event.post_observers.append(lambda foo, bar=None: bar or foo)
assert_equals(4, event(4))
assert_equals(6, event(None, bar=6))
class GeneratorEventTest(PythonicTestCase):
def test_can_unroll_lists(self):
event = GeneratorEvent([])
event.post_observers.append(lambda: [1, 2, 3])
event.post_observers.append(lambda: ('a', 'b'))
assert_equals([1, 2, 3, 'a', 'b'], list(event()))
def test_can_return_non_iterable_items(self):
event = GeneratorEvent([])
event.post_observers.append(lambda: [1, ])
event.post_observers.append(lambda: None)
event.post_observers.append(lambda: 5)
event.post_observers.append(lambda: 'some value')
assert_equals([1, None, 5, 'some value'], list(event()))
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EventTest))
suite.addTest(unittest.makeSuite(FetchFirstResultEventTest))
suite.addTest(unittest.makeSuite(GeneratorEventTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| kgao/MediaDrop | mediadrop/plugin/tests/events_test.py | Python | gpl-3.0 | 2,599 |
# ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
from Cryptodome.Util.py3compat import bord
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
c_uint8_ptr)
_raw_md2_lib = load_pycryptodome_raw_lib(
"Cryptodome.Hash._MD2",
"""
int md2_init(void **shaState);
int md2_destroy(void *shaState);
int md2_update(void *hs,
const uint8_t *buf,
size_t len);
int md2_digest(const void *shaState,
uint8_t digest[20]);
int md2_copy(const void *src, void *dst);
""")
class MD2Hash(object):
"""An MD2 hash object.
Do not instantiate directly. Use the :func:`new` function.
:ivar oid: ASN.1 Object ID
:vartype oid: string
:ivar block_size: the size in bytes of the internal message block,
input to the compression function
:vartype block_size: integer
:ivar digest_size: the size in bytes of the resulting hash
:vartype digest_size: integer
"""
# The size of the resulting hash in bytes.
digest_size = 16
# The internal block size of the hash algorithm in bytes.
block_size = 64
# ASN.1 Object ID
oid = "1.2.840.113549.2.2"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_md2_lib.md2_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating MD2"
% result)
self._state = SmartPointer(state.get(),
_raw_md2_lib.md2_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Args:
data (byte string/byte array/memoryview): The next chunk of the message being hashed.
"""
result = _raw_md2_lib.md2_update(self._state.get(),
c_uint8_ptr(data),
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating MD2"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Binary form.
:rtype: byte string
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_md2_lib.md2_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating MD2"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:return: A hash object of the same type
"""
clone = MD2Hash()
result = _raw_md2_lib.md2_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying MD2" % result)
return clone
def new(self, data=None):
return MD2Hash(data)
def new(data=None):
"""Create a new hash object.
:parameter data:
Optional. The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`MD2Hash.update`.
:type data: byte string/byte array/memoryview
:Return: A :class:`MD2Hash` hash object
"""
return MD2Hash().new(data)
# The size of the resulting hash in bytes.
digest_size = MD2Hash.digest_size
# The internal block size of the hash algorithm in bytes.
block_size = MD2Hash.block_size
| hclivess/Stallion | nuitka/Cryptodome/Hash/MD2.py | Python | gpl-3.0 | 6,130 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.4.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xf1\
\x00\
\x00\x09\x00\x78\x9c\xdd\x96\x51\x6f\x9b\x30\x10\xc7\xdf\xfb\x29\
\x3c\x1e\x9a\x4d\x15\xd0\x49\x7b\x98\x52\x48\x34\x92\x4c\xea\xd4\
\xaa\x54\x69\x55\xf5\xd1\x98\x0b\x71\x01\xdb\x35\x26\x09\xdf\x7e\
\x86\xb0\x96\xa4\x2c\xa4\x1d\x4f\xe3\xc5\xd8\x77\xbe\xdf\x9d\x8d\
\xff\xc6\x19\x6f\xd2\x04\xad\x40\x66\x94\x33\xd7\xf8\x6a\x9d\x1b\
\x08\x18\xe1\x21\x65\x91\x6b\xe4\x6a\x61\x7e\x37\xc6\xa3\x13\xe7\
\xd3\xf4\x66\x72\xf7\xe8\xcf\xd0\x26\x80\x44\xf7\xcb\x66\x77\xda\
\xe8\x04\xe9\xc7\x59\xf0\x24\x04\x89\xaa\x26\x74\x0d\xc6\x6b\x43\
\x65\x54\x54\x25\x30\xf2\x38\x8f\x53\x2c\xe3\x0c\x79\x58\x3a\xf6\
\x76\xf0\xd5\x29\xa8\xcd\x68\x29\x61\xe1\x1a\x4b\xa5\xc4\xd0\xb6\
\x41\x52\x62\xd2\x10\x2c\x51\xa8\x25\x67\xa6\x90\xfc\x09\x88\xca\
\x2c\x2e\x23\xbb\xc1\x68\x70\x66\x7a\x0a\x7a\x80\x00\xcd\xa9\x82\
\xb7\x1c\xfb\x0f\xa8\x93\xbd\x5e\xaf\x2d\x49\x75\xb5\x01\x66\x31\
\xe1\xa9\xc8\x95\x5e\x1e\x4b\xbf\xfd\x85\xec\x17\xb7\xea\x9d\xe4\
\x43\xeb\xd6\x88\xdc\x88\x9b\xbd\x09\xdc\x51\xc2\xb3\xb2\x28\xb7\
\xf7\x53\x6e\x0f\xde\x1e\xbb\x25\xf1\xa3\x98\x21\xac\x20\xe1\x42\
\x7f\x2e\x87\xe9\xd3\x17\xbf\x3e\xf8\x21\x27\x35\xff\x30\x94\x93\
\x3c\x05\xa6\xb0\xd2\xdf\x72\x1f\xdc\x20\xe1\xd1\x31\x60\x4f\xfb\
\xf5\xc1\x5b\x70\x99\xa7\xc7\x00\x7f\x96\x8e\x7d\x10\x45\x82\x19\
\xa8\x4e\xa4\x5f\xb9\xa1\x5b\xd5\x07\xf3\x59\x11\xbd\x49\x12\xda\
\x0e\xfc\x6e\x99\x93\xca\xaf\x1f\xa6\x89\x85\x68\xd5\x98\x1d\xa4\
\xf9\xa3\xf6\x3a\x1a\xea\xd8\xdb\x03\xff\x7e\x05\xf0\x2b\xfd\xfb\
\xb8\x0a\x6c\xf5\xb3\xa3\xa4\x1a\x72\x85\x59\x94\xe3\x08\x4a\x5a\
\xd6\x93\x2a\x88\x42\xd0\x66\x12\x65\xbf\x33\x11\x1f\x93\xb8\xcc\
\xe3\x92\x85\xb0\x19\x22\xbf\xf0\x2f\x3f\xb8\xd4\x7b\xbd\xbd\x45\
\x2f\x20\x3b\x74\x5f\x5d\x03\xcb\xff\xdb\x0b\xeb\xdb\xbf\xa1\x9f\
\xf0\x0a\x67\x44\x52\xa1\x86\x09\x27\x95\x98\x5a\x95\x65\x90\x62\
\x9a\x28\x3e\x1c\xcf\xef\xbd\x5f\xb3\xc9\x9d\x3b\x40\x67\x28\xac\
\x45\xd7\xaa\x48\x7a\x60\x70\x8a\x53\x71\xe1\xdd\x4c\x1f\x2b\x3b\
\x64\x04\x0b\xf8\xbc\x13\xe9\xcb\x45\x7b\xf2\x73\x60\x21\xba\xa2\
\x2c\xee\xcc\xfb\x75\xf3\x1d\x7b\xfb\x23\xf3\x1b\xc5\xa5\x8d\x58\
\
"
qt_resource_name = b"\
\x00\x15\
\x0c\xd3\x2e\x3c\
\x00\x44\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x42\x00\x6f\x00\x6f\x00\x6b\x00\x6d\x00\x61\x00\x72\x00\x6b\x00\x73\x00\x2e\
\x00\x78\x00\x62\x00\x65\x00\x6c\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| testmana2/test | Helpviewer/Bookmarks/DefaultBookmarks_rc.py | Python | gpl-3.0 | 2,920 |
../../../../../../../share/pyshared/orca/scripts/apps/nautilus/script.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/nautilus/script.py | Python | gpl-3.0 | 72 |
# @file plugin.py
#
# Connect Zen Coding to Pluma.
#
# Adapted to pluma by Joao Manoel ([email protected])
#
# Original Author Franck Marcia ([email protected])
#
import pluma, gobject, gtk, os
from zen_editor import ZenEditor
zencoding_ui_str = """
<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_5">
<menu action="ZenCodingMenuAction">
<menuitem name="ZenCodingExpand" action="ZenCodingExpandAction"/>
<menuitem name="ZenCodingExpandW" action="ZenCodingExpandWAction"/>
<menuitem name="ZenCodingWrap" action="ZenCodingWrapAction"/>
<separator/>
<menuitem name="ZenCodingInward" action="ZenCodingInwardAction"/>
<menuitem name="ZenCodingOutward" action="ZenCodingOutwardAction"/>
<menuitem name="ZenCodingMerge" action="ZenCodingMergeAction"/>
<separator/>
<menuitem name="ZenCodingPrev" action="ZenCodingPrevAction"/>
<menuitem name="ZenCodingNext" action="ZenCodingNextAction"/>
<separator/>
<menuitem name="ZenCodingRemove" action="ZenCodingRemoveAction"/>
<menuitem name="ZenCodingSplit" action="ZenCodingSplitAction"/>
<menuitem name="ZenCodingComment" action="ZenCodingCommentAction"/>
</menu>
</placeholder>
</menu>
</menubar>
</ui>
"""
class ZenCodingPlugin(pluma.Plugin):
"""A Pluma plugin to implement Zen Coding's HTML and CSS shorthand expander."""
def activate(self, window):
actions = [
('ZenCodingMenuAction', None, '_Zen Coding', None, "Zen Coding tools", None),
('ZenCodingExpandAction', None, '_Expand abbreviation', '<Ctrl>E', "Expand abbreviation to raw HTML/CSS", self.expand_abbreviation),
('ZenCodingExpandWAction', None, 'E_xpand dynamic abbreviation...', '<Ctrl><Alt>E', "Dynamically expand abbreviation as you type", self.expand_with_abbreviation),
('ZenCodingWrapAction', None, '_Wrap with abbreviation...', '<Ctrl><Shift>E', "Wrap with code expanded from abbreviation", self.wrap_with_abbreviation),
('ZenCodingInwardAction', None, 'Balance tag _inward', '<Ctrl><Alt>I', "Select inner tag's content", self.match_pair_inward),
('ZenCodingOutwardAction', None, 'Balance tag _outward', '<Ctrl><Alt>O', "Select outer tag's content", self.match_pair_outward),
('ZenCodingMergeAction', None, '_Merge lines', '<Ctrl><Alt>M', "Merge all lines of the current selection", self.merge_lines),
('ZenCodingPrevAction', None, '_Previous edit point', '<Alt>Left', "Place the cursor at the previous edit point", self.prev_edit_point),
('ZenCodingNextAction', None, '_Next edit point', '<Alt>Right', "Place the cursor at the next edit point", self.next_edit_point),
('ZenCodingRemoveAction', None, '_Remove tag', '<Ctrl><Alt>R', "Remove a tag", self.remove_tag),
('ZenCodingSplitAction', None, 'Split or _join tag', '<Ctrl><Alt>J', "Toggle between single and double tag", self.split_join_tag),
('ZenCodingCommentAction', None, 'Toggle _comment', '<Ctrl><Alt>C', "Toggle an XML or HTML comment", self.toggle_comment)
]
windowdata = dict()
window.set_data("ZenCodingPluginDataKey", windowdata)
windowdata["action_group"] = gtk.ActionGroup("PlumaZenCodingPluginActions")
windowdata["action_group"].add_actions(actions, window)
manager = window.get_ui_manager()
manager.insert_action_group(windowdata["action_group"], -1)
windowdata["ui_id"] = manager.add_ui_from_string(zencoding_ui_str)
window.set_data("ZenCodingPluginInfo", windowdata)
self.editor = ZenEditor()
error = self.editor.get_user_settings_error()
if error:
md = gtk.MessageDialog(window, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "There is an error in user settings:")
message = "{0} on line {1} at character {2}\n\nUser settings will not be available."
md.set_title("Zen Coding error")
md.format_secondary_text(message.format(error['msg'], error['lineno'], error['offset']))
md.run()
md.destroy()
def deactivate(self, window):
windowdata = window.get_data("ZenCodingPluginDataKey")
manager = window.get_ui_manager()
manager.remove_ui(windowdata["ui_id"])
manager.remove_action_group(windowdata["action_group"])
def update_ui(self, window):
view = window.get_active_view()
windowdata = window.get_data("ZenCodingPluginDataKey")
windowdata["action_group"].set_sensitive(bool(view and view.get_editable()))
def expand_abbreviation(self, action, window):
self.editor.expand_abbreviation(window)
def expand_with_abbreviation(self, action, window):
self.editor.expand_with_abbreviation(window)
def wrap_with_abbreviation(self, action, window):
self.editor.wrap_with_abbreviation(window)
def match_pair_inward(self, action, window):
self.editor.match_pair_inward(window)
def match_pair_outward(self, action, window):
self.editor.match_pair_outward(window)
def merge_lines(self, action, window):
self.editor.merge_lines(window)
def prev_edit_point(self, action, window):
self.editor.prev_edit_point(window)
def next_edit_point(self, action, window):
self.editor.next_edit_point(window)
def remove_tag(self, action, window):
self.editor.remove_tag(window)
def split_join_tag(self, action, window):
self.editor.split_join_tag(window)
def toggle_comment(self, action, window):
self.editor.toggle_comment(window)
| jmanoel7/pluma-plugins-0 | plugins/zencoding/plugin.py | Python | gpl-3.0 | 6,126 |
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoKufiArabic-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0261) #uni0759.fina
glyphs.append(0x007F) #uni0625
glyphs.append(0x00D4) #uni0624
glyphs.append(0x0005) #uni0627
glyphs.append(0x00E2) #uni0626
glyphs.append(0x0004) #uni0621
glyphs.append(0x007D) #uni0623
glyphs.append(0x0081) #uni0622
glyphs.append(0x009C) #uni0686.medi
glyphs.append(0x0099) #uni0629
glyphs.append(0x0085) #uni0628
glyphs.append(0x0267) #uni075C.fina
glyphs.append(0x0256) #uni0756.init
glyphs.append(0x007E) #uni0623.fina
glyphs.append(0x0173) #uni069A.init
glyphs.append(0x01A9) #uni06AB.init
glyphs.append(0x02B8) #wavyhamza_above
glyphs.append(0x00C8) #veh.fina
glyphs.append(0x0192) #uni06A3.init
glyphs.append(0x02C3) #uni06C8.fina
glyphs.append(0x01BC) #uni06B1.fina
glyphs.append(0x020E) #uni06AD.finamorocco
glyphs.append(0x0008) #uni062D.init
glyphs.append(0x00DE) #uni06CC.medi
glyphs.append(0x00A1) #uni062E
glyphs.append(0x0007) #uni062D
glyphs.append(0x000B) #uni062F
glyphs.append(0x008D) #uni062A
glyphs.append(0x0129) #uni067B.init
glyphs.append(0x009D) #uni062C
glyphs.append(0x0091) #uni062B
glyphs.append(0x00E8) #uni06440625.isol
glyphs.append(0x0279) #uni0760.medi
glyphs.append(0x02A3) #uni076B.fina
glyphs.append(0x01D0) #uni06B6.fina
glyphs.append(0x01E7) #uni06BF
glyphs.append(0x0072) #uni066E.init
glyphs.append(0x00E5) #uni0626.fina
glyphs.append(0x025C) #uni0758
glyphs.append(0x01E0) #uni06BB.fina
glyphs.append(0x0284) #uni0763.init
glyphs.append(0x01C5) #uni06B3.init
glyphs.append(0x00DB) #uni064A.fina
glyphs.append(0x0033) #uni06440627.fina
glyphs.append(0x0189) #uni06A0.init
glyphs.append(0x017A) #uni069C.fina
glyphs.append(0x0134) #uni067F.fina
glyphs.append(0x0101) #dammatan_01
glyphs.append(0x0216) #uni06B50627.fina
glyphs.append(0x0036) #uni066E.fina
glyphs.append(0x02CC) #uni06CE.init
glyphs.append(0x0075) #beh_dotless_alt.medi
glyphs.append(0x02A0) #uni076A.init
glyphs.append(0x0108) #Ghunna_above
glyphs.append(0x0027) #uni0645.init
glyphs.append(0x0031) #uni0649.fina
glyphs.append(0x02C6) #uni06CA
glyphs.append(0x0073) #uni066E.medi
glyphs.append(0x026D) #uni075D.medi
glyphs.append(0x02E1) #uni060D
glyphs.append(0x01BD) #uni06B1.init
glyphs.append(0x02DD) #uni06DD
glyphs.append(0x0257) #uni0756.medi
glyphs.append(0x0281) #uni0762.medi
glyphs.append(0x017F) #uni069D.init
glyphs.append(0x023E) #uni0750.init
glyphs.append(0x015A) #uni068C.fina
glyphs.append(0x005A) #uni06BA.fina
glyphs.append(0x018A) #uni06A0.medi
glyphs.append(0x01AC) #uni06AC.fina
glyphs.append(0x018E) #uni06A2.init
glyphs.append(0x0088) #uni0628.fina
glyphs.append(0x00F0) #uni06C2.fina
glyphs.append(0x0196) #uni06A4.medi
glyphs.append(0x0295) #uni0767.medi
glyphs.append(0x0141) #uni0682.init
glyphs.append(0x0062) #uni064B
glyphs.append(0x0265) #uni075B.fina
glyphs.append(0x02E5) #threedots_alt1.below
glyphs.append(0x02CD) #uni06CE.medi
glyphs.append(0x02D5) #uni06D1.fina
glyphs.append(0x01F5) #uni06DB
glyphs.append(0x0138) #uni0680.fina
glyphs.append(0x0277) #uni0760.fina
glyphs.append(0x0133) #uni067F
glyphs.append(0x0260) #uni0759
glyphs.append(0x012F) #uni067D
glyphs.append(0x0089) #uni067E
glyphs.append(0x0127) #uni067B
glyphs.append(0x012B) #uni067C
glyphs.append(0x0123) #uni067A
glyphs.append(0x00EE) #heh_ae.fina
glyphs.append(0x019A) #uni06A5.medi
glyphs.append(0x00D5) #uni0624.fina
glyphs.append(0x02AD) #twodots.vert.below
glyphs.append(0x01D9) #uni06B8.init
glyphs.append(0x02EF) #threedots_alt2.above
glyphs.append(0x008B) #uni067E.init
glyphs.append(0x01FC) #uni06E5
glyphs.append(0x01FD) #uni06E6
glyphs.append(0x00A4) #uni062E.fina
glyphs.append(0x02DF) #uni06E0
glyphs.append(0x01F8) #uni06E1
glyphs.append(0x0098) #uni0679.fina
glyphs.append(0x01FA) #uni06E3
glyphs.append(0x026B) #uni075D.fina
glyphs.append(0x01FF) #uni06E8
glyphs.append(0x02E0) #uni06E9
glyphs.append(0x0202) #uni06ED
glyphs.append(0x022A) #uni06EE
glyphs.append(0x000D) #uni0631.fina
glyphs.append(0x0125) #uni067A.init
glyphs.append(0x0200) #uni06EA
glyphs.append(0x003C) #uni066F.fina
glyphs.append(0x01A6) #uni06AA.medi
glyphs.append(0x0275) #uni075F.medi
glyphs.append(0x000F) #uni0633.init
glyphs.append(0x02F0) #twodots_alt1.above
glyphs.append(0x01C8) #uni06B4.fina
glyphs.append(0x019E) #uni06A6.medi
glyphs.append(0x0121) #uni0678
glyphs.append(0x0095) #uni0679
glyphs.append(0x011D) #uni0676
glyphs.append(0x011F) #uni0677
glyphs.append(0x011B) #uni0675
glyphs.append(0x0117) #uni0672
glyphs.append(0x0119) #uni0673
glyphs.append(0x006D) #uni0670
glyphs.append(0x0083) #uni0671
glyphs.append(0x02A9) #uni076D.medi
glyphs.append(0x01D1) #uni06B6.init
glyphs.append(0x026E) #uni075E
glyphs.append(0x02AE) #twodots.vert.small.above
glyphs.append(0x00B4) #uni0636.init
glyphs.append(0x0268) #uni075C.init
glyphs.append(0x02C5) #uni06C9.fina
glyphs.append(0x00B8) #uni0638.init
glyphs.append(0x0160) #uni068F.fina
glyphs.append(0x0204) #uni06FB.fina
glyphs.append(0x00FE) #uni06F4.urdu
glyphs.append(0x012D) #uni067C.init
glyphs.append(0x025F) #uni0758.medi
glyphs.append(0x0037) #uni066F.init
glyphs.append(0x020F) #uni06440672.isol
glyphs.append(0x01A2) #uni06A8.fina
glyphs.append(0x00B6) #uni0636.fina
glyphs.append(0x00B1) #uni0634.medi
glyphs.append(0x008F) #uni062A.medi
glyphs.append(0x02F3) #uni069F.init
glyphs.append(0x00BE) #uni063A.fina
glyphs.append(0x0241) #uni0751.fina
glyphs.append(0x0213) #uni06440675.isol
glyphs.append(0x0285) #uni0763.medi
glyphs.append(0x00B5) #uni0636.medi
glyphs.append(0x02E4) #threedots.rev_alt1.below
glyphs.append(0x02A7) #uni076D.fina
glyphs.append(0x0176) #uni069B.fina
glyphs.append(0x027F) #uni0762.fina
glyphs.append(0x0148) #uni0684.fina
glyphs.append(0x02EE) #threedots_alt1.above
glyphs.append(0x00EC) #uni06440671.isol
glyphs.append(0x02A8) #uni076D.init
glyphs.append(0x01F2) #uni06D8
glyphs.append(0x004F) #uni06C1.medi
glyphs.append(0x0128) #uni067B.fina
glyphs.append(0x0126) #uni067A.medi
glyphs.append(0x009B) #uni0686.init
glyphs.append(0x012E) #uni067C.medi
glyphs.append(0x02F4) #uni069F.medi
glyphs.append(0x0198) #uni06A5.fina
glyphs.append(0x0263) #uni075A.fina
glyphs.append(0x028B) #uni0765.fina
glyphs.append(0x02B2) #fourdots.above
glyphs.append(0x0249) #uni0753.fina
glyphs.append(0x009F) #uni062C.medi
glyphs.append(0x025A) #uni0757.init
glyphs.append(0x0291) #uni0766.medi
glyphs.append(0x00A3) #uni062E.medi
glyphs.append(0x00C9) #uni0642.init
glyphs.append(0x00BB) #uni063A
glyphs.append(0x0145) #uni0683.init
glyphs.append(0x017E) #uni069D.fina
glyphs.append(0x0253) #uni0755.medi
glyphs.append(0x0142) #uni0682.medi
glyphs.append(0x01A1) #uni06A8
glyphs.append(0x0053) #uni06A9
glyphs.append(0x00D7) #uni0649.medi
glyphs.append(0x01D6) #uni06B7.medi
glyphs.append(0x01C0) #uni06B2.fina
glyphs.append(0x0187) #uni06A0
glyphs.append(0x018B) #uni06A1
glyphs.append(0x018C) #uni06A2
glyphs.append(0x0190) #uni06A3
glyphs.append(0x02BC) #uni06A4
glyphs.append(0x0197) #uni06A5
glyphs.append(0x019B) #uni06A6
glyphs.append(0x019F) #uni06A7
glyphs.append(0x0015) #uni0635.fina
glyphs.append(0x01A3) #uni06AA
glyphs.append(0x01A7) #uni06AB
glyphs.append(0x01AB) #uni06AC
glyphs.append(0x0002) #nonmarkingreturn
glyphs.append(0x01B3) #uni06AE
glyphs.append(0x0055) #uni06AF
glyphs.append(0x00CE) #uni0646.medi
glyphs.append(0x00A9) #uni0632
glyphs.append(0x000E) #uni0633
glyphs.append(0x00A5) #uni0630
glyphs.append(0x000C) #uni0631
glyphs.append(0x00B3) #uni0636
glyphs.append(0x0016) #uni0637
glyphs.append(0x00AF) #uni0634
glyphs.append(0x0012) #uni0635
glyphs.append(0x0029) #uni0645.fina
glyphs.append(0x00B7) #uni0638
glyphs.append(0x001A) #uni0639
glyphs.append(0x0001) #.null
glyphs.append(0x019C) #uni06A6.fina
glyphs.append(0x00AE) #uni0698.fina
glyphs.append(0x0219) #uni06B70627.isol
glyphs.append(0x0297) #uni0768.fina
glyphs.append(0x024D) #uni0754.fina
glyphs.append(0x02CF) #uni06CE.fina
glyphs.append(0x022D) #uni06EF.fina
glyphs.append(0x0146) #uni0683.medi
glyphs.append(0x0080) #uni0625.fina
glyphs.append(0x0158) #uni068B.fina
glyphs.append(0x0038) #uni066F.medi
glyphs.append(0x0032) #uni06440627.isol
glyphs.append(0x011E) #uni0676.fina
glyphs.append(0x025D) #uni0758.fina
glyphs.append(0x0278) #uni0760.init
glyphs.append(0x01CA) #uni06B4.medi
glyphs.append(0x001D) #uni0639.fina
glyphs.append(0x0013) #uni0635.init
glyphs.append(0x002A) #uni0647
glyphs.append(0x00CF) #uni0646
glyphs.append(0x0026) #uni0645
glyphs.append(0x0022) #uni0644
glyphs.append(0x0020) #uni0643
glyphs.append(0x00CB) #uni0642
glyphs.append(0x00C1) #uni0641
glyphs.append(0x0003) #space
glyphs.append(0x00CC) #uni0642.fina
glyphs.append(0x02D3) #uni06D0.medi
glyphs.append(0x0096) #uni0679.init
glyphs.append(0x0030) #uni0649
glyphs.append(0x002E) #uni0648
glyphs.append(0x0282) #uni0763
glyphs.append(0x02EA) #uniFD3F
glyphs.append(0x00F5) #uni06BE.init
glyphs.append(0x0194) #uni06A4.fina
glyphs.append(0x02C9) #uni06CB.fina
glyphs.append(0x0130) #uni067D.fina
glyphs.append(0x0136) #uni067F.medi
glyphs.append(0x0131) #uni067D.init
glyphs.append(0x0230) #uni06FF.init
glyphs.append(0x02ED) #diagonal
glyphs.append(0x02B0) #threedots.small.above
glyphs.append(0x02B7) #hamza_medial
glyphs.append(0x0065) #uni064F
glyphs.append(0x0064) #uni064E
glyphs.append(0x0063) #uni064D
glyphs.append(0x0066) #uni064C
glyphs.append(0x0039) #feh_dotless.isol
glyphs.append(0x00DA) #uni064A
glyphs.append(0x0182) #uni069E.fina
glyphs.append(0x00C4) #uni0641.fina
glyphs.append(0x0010) #uni0633.medi
glyphs.append(0x0106) #shadda_01
glyphs.append(0x0193) #uni06A3.medi
glyphs.append(0x025E) #uni0758.init
glyphs.append(0x00E3) #uni0626.init
glyphs.append(0x02CB) #uni06CF.fina
glyphs.append(0x02A1) #uni076A.medi
glyphs.append(0x00C6) #veh.init
glyphs.append(0x00EB) #uni06440622.fina
glyphs.append(0x01D5) #uni06B7.init
glyphs.append(0x0000) #.notdef
glyphs.append(0x029F) #uni076A.fina
glyphs.append(0x0105) #kasra_01
glyphs.append(0x02D7) #uni06FA
glyphs.append(0x0207) #uni06FC
glyphs.append(0x0203) #uni06FB
glyphs.append(0x020C) #uni06FE
glyphs.append(0x020B) #uni06FD
glyphs.append(0x018F) #uni06A2.medi
glyphs.append(0x022E) #uni06FF
glyphs.append(0x02E2) #patah.wide
glyphs.append(0x02F1) #threedots.rev_alt2.below
glyphs.append(0x00F3) #uni06C3.fina
glyphs.append(0x02E9) #uniFD3E
glyphs.append(0x00A0) #uni062C.fina
glyphs.append(0x00CD) #uni0646.init
glyphs.append(0x0210) #uni06440672.fina
glyphs.append(0x0115) #uni0655064D
glyphs.append(0x00D9) #uni064A.medi
glyphs.append(0x0269) #uni075C.medi
glyphs.append(0x01FB) #uni06E4
glyphs.append(0x00F6) #uni06BE.medi
glyphs.append(0x0025) #uni0644.fina
glyphs.append(0x004E) #uni06C1.init
glyphs.append(0x0103) #fatha_01
glyphs.append(0x00CA) #uni0642.medi
glyphs.append(0x0054) #uni06A9.fina
glyphs.append(0x0186) #uni069F.fina
glyphs.append(0x0084) #uni0671.fina
glyphs.append(0x0195) #uni06A4.init
glyphs.append(0x003E) #twodots.above
glyphs.append(0x02E6) #twodots.vert_alt1.below
glyphs.append(0x023A) #uni065D
glyphs.append(0x0258) #uni0757
glyphs.append(0x015C) #uni068D.fina
glyphs.append(0x021C) #uni06B80627.fina
glyphs.append(0x0254) #uni0756
glyphs.append(0x0233) #riyal
glyphs.append(0x00F8) #uni06F1
glyphs.append(0x00F7) #uni06F0
glyphs.append(0x00FA) #uni06F3
glyphs.append(0x00F9) #uni06F2
glyphs.append(0x004C) #uni06F5
glyphs.append(0x004B) #uni06F4
glyphs.append(0x00FC) #uni06F7
glyphs.append(0x004D) #uni06F6
glyphs.append(0x00FB) #uni06F9
glyphs.append(0x00FD) #uni06F8
glyphs.append(0x02EC) #hah_alt.fina
glyphs.append(0x0237) #uni065A
glyphs.append(0x0238) #uni065B
glyphs.append(0x01E3) #uni06BC.init
glyphs.append(0x0014) #uni0635.medi
glyphs.append(0x0239) #uni065C
glyphs.append(0x00E4) #uni0626.medi
glyphs.append(0x0205) #uni06FB.init
glyphs.append(0x026C) #uni075D.init
glyphs.append(0x0259) #uni0757.fina
glyphs.append(0x0286) #uni0764
glyphs.append(0x028A) #uni0765
glyphs.append(0x028E) #uni0766
glyphs.append(0x0292) #uni0767
glyphs.append(0x0276) #uni0760
glyphs.append(0x027A) #uni0761
glyphs.append(0x027E) #uni0762
glyphs.append(0x01A8) #uni06AB.fina
glyphs.append(0x0220) #uni0603
glyphs.append(0x021F) #uni0602
glyphs.append(0x021E) #uni0601
glyphs.append(0x021D) #uni0600
glyphs.append(0x01C1) #uni06B2.init
glyphs.append(0x017C) #uni069C.medi
glyphs.append(0x0118) #uni0672.fina
glyphs.append(0x0074) #beh_dotless_alt.init
glyphs.append(0x0290) #uni0766.init
glyphs.append(0x012A) #uni067B.medi
glyphs.append(0x01DD) #uni06B9.init
glyphs.append(0x0172) #uni069A.fina
glyphs.append(0x02BD) #uni06C5.fina
glyphs.append(0x02A6) #uni076D
glyphs.append(0x029E) #uni076A
glyphs.append(0x02A2) #uni076B
glyphs.append(0x02A4) #uni076C
glyphs.append(0x005D) #uni060C
glyphs.append(0x0234) #uni060B
glyphs.append(0x008A) #uni067E.fina
glyphs.append(0x0222) #uni060F
glyphs.append(0x0221) #uni060E
glyphs.append(0x0211) #uni06440673.isol
glyphs.append(0x024E) #uni0754.init
glyphs.append(0x024F) #uni0754.medi
glyphs.append(0x0299) #uni0768.medi
glyphs.append(0x01B8) #uni06B0.fina
glyphs.append(0x02B3) #fourdots.below
glyphs.append(0x01E4) #uni06BC.medi
glyphs.append(0x00B2) #uni0634.fina
glyphs.append(0x012C) #uni067C.fina
glyphs.append(0x010B) #uni0651064B
glyphs.append(0x010D) #uni0651064F
glyphs.append(0x010C) #uni0651064E
glyphs.append(0x0150) #uni0687.fina
glyphs.append(0x0050) #uni06BE
glyphs.append(0x01E5) #uni06BD
glyphs.append(0x01B6) #uni06AE.medi
glyphs.append(0x0059) #uni06BA
glyphs.append(0x01E1) #uni06BC
glyphs.append(0x009E) #uni062C.init
glyphs.append(0x0139) #uni0680.init
glyphs.append(0x02DB) #uni076A0627.isol
glyphs.append(0x0114) #uni06540652
glyphs.append(0x01E2) #uni06BC.fina
glyphs.append(0x027B) #uni0761.fina
glyphs.append(0x022B) #uni06EE.fina
glyphs.append(0x01A0) #uni06A7.fina
glyphs.append(0x002B) #uni0647.init
glyphs.append(0x01B1) #uni06AD.init
glyphs.append(0x0090) #uni062A.fina
glyphs.append(0x01C9) #uni06B4.init
glyphs.append(0x01CB) #uni06B5
glyphs.append(0x01C7) #uni06B4
glyphs.append(0x01D3) #uni06B7
glyphs.append(0x01CF) #uni06B6
glyphs.append(0x01BB) #uni06B1
glyphs.append(0x01B7) #uni06B0
glyphs.append(0x01C3) #uni06B3
glyphs.append(0x01BF) #uni06B2
glyphs.append(0x02D2) #uni06D0.init
glyphs.append(0x01DB) #uni06B9
glyphs.append(0x01D7) #uni06B8
glyphs.append(0x0070) #uni062F.fina
glyphs.append(0x00BD) #uni063A.medi
glyphs.append(0x01E8) #uni06BF.fina
glyphs.append(0x01B2) #uni06AD.medi
glyphs.append(0x0082) #uni0622.fina
glyphs.append(0x02BF) #uni06C6.fina
glyphs.append(0x021A) #uni06B70627.fina
glyphs.append(0x023B) #uni065E
glyphs.append(0x0162) #uni0690.fina
glyphs.append(0x01AE) #uni06AC.medi
glyphs.append(0x016E) #uni0697.fina
glyphs.append(0x01EF) #uni06CD.fina
glyphs.append(0x0052) #uni06D2.fina
glyphs.append(0x010F) #uniFC63
glyphs.append(0x007A) #twodots.below
glyphs.append(0x00B9) #uni0638.medi
glyphs.append(0x01DC) #uni06B9.fina
glyphs.append(0x014D) #uni0685.init
glyphs.append(0x0199) #uni06A5.init
glyphs.append(0x00E6) #uni06440623.isol
glyphs.append(0x01AF) #uni06AD
glyphs.append(0x01AA) #uni06AB.medi
glyphs.append(0x008E) #uni062A.init
glyphs.append(0x0178) #uni069B.medi
glyphs.append(0x0144) #uni0683.fina
glyphs.append(0x00C2) #uni0641.init
glyphs.append(0x0243) #uni0751.medi
glyphs.append(0x024B) #uni0753.medi
glyphs.append(0x002C) #uni0647.medi
glyphs.append(0x006C) #uni0655
glyphs.append(0x006E) #uni0656
glyphs.append(0x0228) #uni0657
glyphs.append(0x0067) #uni0650
glyphs.append(0x0068) #uni0651
glyphs.append(0x0069) #uni0652
glyphs.append(0x006A) #uni0653
glyphs.append(0x0271) #uni075E.medi
glyphs.append(0x0229) #uni0658
glyphs.append(0x0236) #uni0659
glyphs.append(0x001B) #uni0639.init
glyphs.append(0x0245) #uni0752.fina
glyphs.append(0x002D) #uni0647.fina
glyphs.append(0x0283) #uni0763.fina
glyphs.append(0x0093) #uni062B.medi
glyphs.append(0x0107) #sukun_01
glyphs.append(0x013E) #uni0681.medi
glyphs.append(0x00C7) #veh.medi
glyphs.append(0x0177) #uni069B.init
glyphs.append(0x0273) #uni075F.fina
glyphs.append(0x007B) #alef_alt.isol
glyphs.append(0x0251) #uni0755.fina
glyphs.append(0x0120) #uni0677.fina
glyphs.append(0x01B0) #uni06AD.fina
glyphs.append(0x00ED) #uni06440671.fina
glyphs.append(0x0102) #kasratan_01
glyphs.append(0x029B) #uni0769.fina
glyphs.append(0x027C) #uni0761.init
glyphs.append(0x02AB) #twodots.small.below
glyphs.append(0x01E6) #uni06BD.fina
glyphs.append(0x0293) #uni0767.fina
glyphs.append(0x01CC) #uni06B5.fina
glyphs.append(0x020D) #uni06AD.morocco
glyphs.append(0x01B4) #uni06AE.fina
glyphs.append(0x006B) #uni0654
glyphs.append(0x00AA) #uni0632.fina
glyphs.append(0x00B0) #uni0634.init
glyphs.append(0x01DF) #uni06BB
glyphs.append(0x022C) #uni06EF
glyphs.append(0x0252) #uni0755.init
glyphs.append(0x01EA) #uni06BF.medi
glyphs.append(0x0294) #uni0767.init
glyphs.append(0x0057) #uni06AF.medi
glyphs.append(0x02D8) #uni06FA.fina
glyphs.append(0x000A) #uni062D.fina
glyphs.append(0x028F) #uni0766.fina
glyphs.append(0x0274) #uni075F.init
glyphs.append(0x00DF) #uni06CC.fina
glyphs.append(0x026F) #uni075E.fina
glyphs.append(0x0214) #uni06440675.fina
glyphs.append(0x0184) #uni069E.medi
glyphs.append(0x001F) #uni0643.medi
glyphs.append(0x0151) #uni0687.init
glyphs.append(0x01D2) #uni06B6.medi
glyphs.append(0x00E7) #uni06440623.fina
glyphs.append(0x01AD) #uni06AC.init
glyphs.append(0x01BA) #uni06B0.medi
glyphs.append(0x029C) #uni0769.init
glyphs.append(0x020A) #uni06FC.medi
glyphs.append(0x02AF) #twodots.vert.small.below
glyphs.append(0x0143) #uni0683
glyphs.append(0x013F) #uni0682
glyphs.append(0x013B) #uni0681
glyphs.append(0x0137) #uni0680
glyphs.append(0x014F) #uni0687
glyphs.append(0x0077) #uni0686
glyphs.append(0x014B) #uni0685
glyphs.append(0x0147) #uni0684
glyphs.append(0x0156) #uni068A.fina
glyphs.append(0x0153) #uni0689
glyphs.append(0x00A7) #uni0688
glyphs.append(0x02DC) #uni076A0627.fina
glyphs.append(0x0262) #uni075A
glyphs.append(0x0266) #uni075C
glyphs.append(0x0264) #uni075B
glyphs.append(0x0116) #uni06550650
glyphs.append(0x026A) #uni075D
glyphs.append(0x0272) #uni075F
glyphs.append(0x01D4) #uni06B7.fina
glyphs.append(0x005E) #uni061B
glyphs.append(0x02F5) #ZWSP
glyphs.append(0x0235) #uni061E
glyphs.append(0x005C) #uni061F
glyphs.append(0x003D) #onedot.above
glyphs.append(0x0079) #onedot.below
glyphs.append(0x00A8) #uni0688.fina
glyphs.append(0x01BE) #uni06B1.medi
glyphs.append(0x00BC) #uni063A.init
glyphs.append(0x014E) #uni0685.medi
glyphs.append(0x014C) #uni0685.fina
glyphs.append(0x010E) #uni06510650
glyphs.append(0x021B) #uni06B80627.isol
glyphs.append(0x00E1) #uni06D3.fina
glyphs.append(0x0152) #uni0687.medi
glyphs.append(0x02B4) #threedots.horz.below
glyphs.append(0x0017) #uni0637.init
glyphs.append(0x023D) #uni0750.fina
glyphs.append(0x01C2) #uni06B2.medi
glyphs.append(0x00C3) #uni0641.medi
glyphs.append(0x013A) #uni0680.medi
glyphs.append(0x0240) #uni0751
glyphs.append(0x023C) #uni0750
glyphs.append(0x0248) #uni0753
glyphs.append(0x0244) #uni0752
glyphs.append(0x0250) #uni0755
glyphs.append(0x024C) #uni0754
glyphs.append(0x0132) #uni067D.medi
glyphs.append(0x00FF) #uni06F7.urdu
glyphs.append(0x0223) #uni0610
glyphs.append(0x0224) #uni0611
glyphs.append(0x0225) #uni0612
glyphs.append(0x0226) #uni0613
glyphs.append(0x0227) #uni0614
glyphs.append(0x006F) #uni0615
glyphs.append(0x02B1) #threedots.small.below
glyphs.append(0x0159) #uni068C
glyphs.append(0x0157) #uni068B
glyphs.append(0x0155) #uni068A
glyphs.append(0x029D) #uni0769.medi
glyphs.append(0x015F) #uni068F
glyphs.append(0x015D) #uni068E
glyphs.append(0x015B) #uni068D
glyphs.append(0x0246) #uni0752.init
glyphs.append(0x0164) #uni0692.fina
glyphs.append(0x0024) #uni0644.medi
glyphs.append(0x0149) #uni0684.init
glyphs.append(0x022F) #uni06FF.fina
glyphs.append(0x0296) #uni0768
glyphs.append(0x015E) #uni068E.fina
glyphs.append(0x002F) #uni0648.fina
glyphs.append(0x029A) #uni0769
glyphs.append(0x0113) #uni0654064F
glyphs.append(0x0111) #uni0654064E
glyphs.append(0x0110) #uni0654064B
glyphs.append(0x0112) #uni0654064C
glyphs.append(0x0021) #uni0643.fina
glyphs.append(0x01C6) #uni06B3.medi
glyphs.append(0x0180) #uni069D.medi
glyphs.append(0x0122) #uni0678.fina
glyphs.append(0x028C) #uni0765.init
glyphs.append(0x01CE) #uni06B5.medi
glyphs.append(0x01DE) #uni06B9.medi
glyphs.append(0x017B) #uni069C.init
glyphs.append(0x0183) #uni069E.init
glyphs.append(0x01DA) #uni06B8.medi
glyphs.append(0x0094) #uni062B.fina
glyphs.append(0x017D) #uni069D
glyphs.append(0x027D) #uni0761.medi
glyphs.append(0x02AC) #twodots.vert.above
glyphs.append(0x0298) #uni0768.init
glyphs.append(0x02DA) #uni06FA.medi
glyphs.append(0x018D) #uni06A2.fina
glyphs.append(0x0100) #fathatan_01
glyphs.append(0x02B5) #smallv.arabic
glyphs.append(0x016A) #uni0695.fina
glyphs.append(0x00E9) #uni06440625.fina
glyphs.append(0x0215) #uni06B50627.isol
glyphs.append(0x02F6) #uni200C
glyphs.append(0x00C5) #veh.isol
glyphs.append(0x02F9) #uni200F
glyphs.append(0x02F8) #uni200E
glyphs.append(0x02F7) #uni200D
glyphs.append(0x00D3) #uni06C0.fina
glyphs.append(0x0206) #uni06FB.medi
glyphs.append(0x011A) #uni0673.fina
glyphs.append(0x02D1) #uni06D0.fina
glyphs.append(0x02AA) #twodots.small.above
glyphs.append(0x01A5) #uni06AA.init
glyphs.append(0x0019) #uni0637.fina
glyphs.append(0x016C) #uni0696.fina
glyphs.append(0x02E3) #twodots_alt1.below
glyphs.append(0x01F7) #uni06DF
glyphs.append(0x02DE) #uni06DE
glyphs.append(0x00F4) #uni06BE.fina
glyphs.append(0x01F6) #uni06DC
glyphs.append(0x001C) #uni0639.medi
glyphs.append(0x01F4) #uni06DA
glyphs.append(0x00A2) #uni062E.init
glyphs.append(0x011C) #uni0675.fina
glyphs.append(0x02BA) #threedots.rev.above
glyphs.append(0x0009) #uni062D.medi
glyphs.append(0x013D) #uni0681.init
glyphs.append(0x014A) #uni0684.medi
glyphs.append(0x0086) #uni0628.init
glyphs.append(0x00EF) #uni06C2
glyphs.append(0x00F2) #uni06C3
glyphs.append(0x00D2) #uni06C0
glyphs.append(0x00F1) #uni06C1
glyphs.append(0x02BE) #uni06C6
glyphs.append(0x02C0) #uni06C7
glyphs.append(0x01EB) #uni06C4
glyphs.append(0x01ED) #uni06C5
glyphs.append(0x02C1) #uni06C7.fina
glyphs.append(0x02C2) #uni06C8
glyphs.append(0x02C4) #uni06C9
glyphs.append(0x0154) #uni0689.fina
glyphs.append(0x0124) #uni067A.fina
glyphs.append(0x0255) #uni0756.fina
glyphs.append(0x0168) #uni0694.fina
glyphs.append(0x0247) #uni0752.medi
glyphs.append(0x02C7) #uni06CA.fina
glyphs.append(0x008C) #uni067E.medi
glyphs.append(0x0104) #damma_01
glyphs.append(0x00D8) #uni064A.init
glyphs.append(0x00DD) #uni06CC.init
glyphs.append(0x02C8) #uni06CB
glyphs.append(0x00DC) #uni06CC
glyphs.append(0x0166) #uni0693.fina
glyphs.append(0x0218) #uni06B60627.fina
glyphs.append(0x02CA) #uni06CF
glyphs.append(0x007C) #alef_alt.fina
glyphs.append(0x01EE) #uni06CD
glyphs.append(0x02CE) #uni06CE
glyphs.append(0x003F) #threedots.above
glyphs.append(0x01EC) #uni06C4.fina
glyphs.append(0x02EB) #hah_alt.isol
glyphs.append(0x01E9) #uni06BF.init
glyphs.append(0x01F1) #uni06D7
glyphs.append(0x01F0) #uni06D6
glyphs.append(0x00D1) #uni06D5
glyphs.append(0x02D6) #uni06D4
glyphs.append(0x00E0) #uni06D3
glyphs.append(0x0051) #uni06D2
glyphs.append(0x02D4) #uni06D1
glyphs.append(0x02D0) #uni06D0
glyphs.append(0x0191) #uni06A3.fina
glyphs.append(0x02BB) #threedots.rev.below
glyphs.append(0x01F3) #uni06D9
glyphs.append(0x009A) #uni0629.fina
glyphs.append(0x0087) #uni0628.medi
glyphs.append(0x0135) #uni067F.init
glyphs.append(0x0242) #uni0751.init
glyphs.append(0x01B5) #uni06AE.init
glyphs.append(0x0018) #uni0637.medi
glyphs.append(0x02E8) #threedots.horz_alt1.below
glyphs.append(0x023F) #uni0750.medi
glyphs.append(0x0209) #uni06FC.init
glyphs.append(0x00D0) #uni0646.fina
glyphs.append(0x0071) #wasla
glyphs.append(0x0231) #uni06FF.medi
glyphs.append(0x01C4) #uni06B3.fina
glyphs.append(0x0028) #uni0645.medi
glyphs.append(0x0056) #uni06AF.init
glyphs.append(0x0042) #uni0661
glyphs.append(0x0041) #uni0660
glyphs.append(0x0044) #uni0663
glyphs.append(0x0043) #uni0662
glyphs.append(0x0046) #uni0665
glyphs.append(0x0045) #uni0664
glyphs.append(0x0048) #uni0667
glyphs.append(0x0047) #uni0666
glyphs.append(0x004A) #uni0669
glyphs.append(0x0049) #uni0668
glyphs.append(0x0076) #uni06C1.fina
glyphs.append(0x01FE) #uni06E7
glyphs.append(0x01CD) #uni06B5.init
glyphs.append(0x0023) #uni0644.init
glyphs.append(0x013C) #uni0681.fina
glyphs.append(0x0188) #uni06A0.fina
glyphs.append(0x0270) #uni075E.init
glyphs.append(0x00C0) #uni06A9.medi
glyphs.append(0x0232) #allah
glyphs.append(0x01F9) #uni06E2
glyphs.append(0x0288) #uni0764.init
glyphs.append(0x0212) #uni06440673.fina
glyphs.append(0x005B) #uni066A
glyphs.append(0x0060) #uni066C
glyphs.append(0x005F) #uni066B
glyphs.append(0x0035) #uni066E
glyphs.append(0x0061) #uni066D
glyphs.append(0x003B) #uni066F
glyphs.append(0x02F2) #uni25CC
glyphs.append(0x01D8) #uni06B8.fina
glyphs.append(0x01B9) #uni06B0.init
glyphs.append(0x00A6) #uni0630.fina
glyphs.append(0x01A4) #uni06AA.fina
glyphs.append(0x02A5) #uni076C.fina
glyphs.append(0x024A) #uni0753.init
glyphs.append(0x003A) #uni06A1.fina
glyphs.append(0x0078) #uni0686.fina
glyphs.append(0x001E) #uni0643.init
glyphs.append(0x02B6) #circumflex.arabic
glyphs.append(0x02E7) #threedots_alt2.below
glyphs.append(0x025B) #uni0757.medi
glyphs.append(0x0109) #uni0651064C
glyphs.append(0x0201) #uni06EB
glyphs.append(0x0006) #uni0627.fina
glyphs.append(0x0092) #uni062B.init
glyphs.append(0x00BF) #uni06A9.init
glyphs.append(0x0171) #uni069A
glyphs.append(0x0175) #uni069B
glyphs.append(0x0179) #uni069C
glyphs.append(0x00AC) #uni0691.fina
glyphs.append(0x0181) #uni069E
glyphs.append(0x0185) #uni069F
glyphs.append(0x0208) #uni06FC.fina
glyphs.append(0x0174) #uni069A.medi
glyphs.append(0x0161) #uni0690
glyphs.append(0x00AB) #uni0691
glyphs.append(0x0163) #uni0692
glyphs.append(0x0165) #uni0693
glyphs.append(0x0167) #uni0694
glyphs.append(0x0169) #uni0695
glyphs.append(0x016B) #uni0696
glyphs.append(0x016D) #uni0697
glyphs.append(0x00AD) #uni0698
glyphs.append(0x016F) #uni0699
glyphs.append(0x0217) #uni06B60627.isol
glyphs.append(0x010A) #uni0651064D
glyphs.append(0x00EA) #uni06440622.isol
glyphs.append(0x019D) #uni06A6.init
glyphs.append(0x0058) #uni06AF.fina
glyphs.append(0x02B9) #wavyhamza_below
glyphs.append(0x0280) #uni0762.init
glyphs.append(0x028D) #uni0765.medi
glyphs.append(0x0289) #uni0764.medi
glyphs.append(0x00BA) #uni0638.fina
glyphs.append(0x0011) #uni0633.fina
glyphs.append(0x0034) #uni0640
glyphs.append(0x0170) #uni0699.fina
glyphs.append(0x0287) #uni0764.fina
glyphs.append(0x0140) #uni0682.fina
glyphs.append(0x02D9) #uni06FA.init
glyphs.append(0x0097) #uni0679.medi
glyphs.append(0x0040) #threedots.below
glyphs.append(0x00D6) #uni0649.init
return glyphs
| davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notokufiarabic_regular.py | Python | gpl-3.0 | 33,355 |
import web
import sam.common
import sam.models.links
class Details:
def __init__(self, db, subscription, ds, address, timestamp_range=None, port=None, page_size=50):
self.db = db
self.sub = subscription
self.table_nodes = "s{acct}_Nodes".format(acct=self.sub)
self.table_links = "s{acct}_ds{id}_Links".format(acct=self.sub, id=ds)
self.table_links_in = "s{acct}_ds{id}_LinksIn".format(acct=self.sub, id=ds)
self.table_links_out = "s{acct}_ds{id}_LinksOut".format(acct=self.sub, id=ds)
self.ds = ds
self.ip_start, self.ip_end = sam.common.determine_range_string(address)
self.page_size = page_size
self.port = port
if timestamp_range:
self.time_range = timestamp_range
else:
linksModel = sam.models.links.Links(db, self.sub, self.ds)
tr = linksModel.get_timerange()
self.time_range = (tr['min'], tr['max'])
if self.db.dbname == 'mysql':
self.elapsed = '(UNIX_TIMESTAMP(MAX(timestamp)) - UNIX_TIMESTAMP(MIN(timestamp)))'
self.divop = 'DIV'
else:
self.elapsed = '(MAX(timestamp) - MIN(timestamp))'
self.divop = '/'
sam.common.sqlite_udf(self.db)
def get_metadata(self):
qvars = {"start": self.ip_start, "end": self.ip_end}
# TODO: seconds has a magic number 300 added to account for DB time quantization.
query = """
SELECT {address_q} AS 'address'
, COALESCE(n.hostname, '') AS 'hostname'
, COALESCE(l_out.unique_out_ip, 0) AS 'unique_out_ip'
, COALESCE(l_out.unique_out_conn, 0) AS 'unique_out_conn'
, COALESCE(l_out.total_out, 0) AS 'total_out'
, COALESCE(l_out.b_s, 0) AS 'out_bytes_sent'
, COALESCE(l_out.b_r, 0) AS 'out_bytes_received'
, COALESCE(l_out.max_bps, 0) AS 'out_max_bps'
, COALESCE(l_out.sum_b * 1.0 / l_out.sum_duration, 0) AS 'out_avg_bps'
, COALESCE(l_out.p_s, 0) AS 'out_packets_sent'
, COALESCE(l_out.p_r, 0) AS 'out_packets_received'
, COALESCE(l_out.sum_duration * 1.0 / l_out.total_out, 0) AS 'out_duration'
, COALESCE(l_in.unique_in_ip, 0) AS 'unique_in_ip'
, COALESCE(l_in.unique_in_conn, 0) AS 'unique_in_conn'
, COALESCE(l_in.total_in, 0) AS 'total_in'
, COALESCE(l_in.b_s, 0) AS 'in_bytes_sent'
, COALESCE(l_in.b_r, 0) AS 'in_bytes_received'
, COALESCE(l_in.max_bps, 0) AS 'in_max_bps'
, COALESCE(l_in.sum_b * 1.0 / l_in.sum_duration, 0) AS 'in_avg_bps'
, COALESCE(l_in.p_s, 0) AS 'in_packets_sent'
, COALESCE(l_in.p_r, 0) AS 'in_packets_received'
, COALESCE(l_in.sum_duration * 1.0 / l_in.total_in, 0) AS 'in_duration'
, COALESCE(l_in.ports_used, 0) AS 'ports_used'
, children.endpoints AS 'endpoints'
, COALESCE(t.seconds, 0) + 300 AS 'seconds'
, (COALESCE(l_in.sum_b, 0) + COALESCE(l_out.sum_b, 0)) / (COALESCE(t.seconds, 0) + 300) AS 'overall_bps'
, COALESCE(l_in.protocol, "") AS 'in_protocols'
, COALESCE(l_out.protocol, "") AS 'out_protocols'
FROM (
SELECT ipstart, subnet, alias AS 'hostname'
FROM {nodes_table}
WHERE ipstart = $start AND ipend = $end
) AS n
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT dst) AS 'unique_out_ip'
, (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE src BETWEEN $start AND $end) AS `temp1`) AS 'unique_out_conn'
, SUM(links) AS 'total_out'
, SUM(bytes_sent) AS 'b_s'
, SUM(bytes_received) AS 'b_r'
, MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps'
, SUM(bytes_sent + bytes_received) AS 'sum_b'
, SUM(packets_sent) AS 'p_s'
, SUM(packets_received) AS 'p_r'
, SUM(duration * links) AS 'sum_duration'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocol'
FROM {links_table}
WHERE src BETWEEN $start AND $end
GROUP BY 's1'
) AS l_out
ON n.ipstart = l_out.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT src) AS 'unique_in_ip'
, (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE dst BETWEEN $start AND $end) AS `temp2`) AS 'unique_in_conn'
, SUM(links) AS 'total_in'
, SUM(bytes_sent) AS 'b_s'
, SUM(bytes_received) AS 'b_r'
, MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps'
, SUM(bytes_sent + bytes_received) AS 'sum_b'
, SUM(packets_sent) AS 'p_s'
, SUM(packets_received) AS 'p_r'
, SUM(duration * links) AS 'sum_duration'
, COUNT(DISTINCT port) AS 'ports_used'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocol'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
GROUP BY 's1'
) AS l_in
ON n.ipstart = l_in.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(ipstart) AS 'endpoints'
FROM {nodes_table}
WHERE ipstart = ipend AND ipstart BETWEEN $start AND $end
) AS children
ON n.ipstart = children.s1
LEFT JOIN (
SELECT $start AS 's1'
, {elapsed} AS 'seconds'
FROM {links_table}
GROUP BY 's1'
) AS t
ON n.ipstart = t.s1
LIMIT 1;
""".format(
address_q=sam.common.db_concat(self.db, 'decodeIP(n.ipstart)', "'/'", 'n.subnet'),
elapsed=self.elapsed,
nodes_table=self.table_nodes,
links_table=self.table_links)
results = self.db.query(query, vars=qvars)
first = results.first()
if first:
return first
else:
return {}
def build_where_clause(self, timestamp_range=None, port=None, protocol=None, rounding=True):
"""
Build a WHERE SQL clause that covers basic timerange, port, and protocol filtering.
:param timestamp_range: start and end times as unix timestamps (integers). Default is all time.
:type timestamp_range: tuple[int, int]
:param port: exclusively report traffic destined for this port, if specified.
:type port: int or str
:param protocol: exclusively report traffic using this protocol
:type protocol: str
:param rounding: round each time stamp to the nearest quantization mark. (db records are quantized for consiceness)
:type rounding: bool
:return: String SQL clause
:rtype: str
"""
clauses = []
t_start = 0
t_end = 0
if timestamp_range:
t_start = timestamp_range[0]
t_end = timestamp_range[1]
if rounding:
# rounding to 5 minutes, for use with the Syslog table
if t_start > 150:
t_start -= 150
if t_end <= 2 ** 31 - 150:
t_end += 149
if self.db.dbname == 'sqlite':
clauses.append("timestamp BETWEEN $tstart AND $tend")
else:
clauses.append("timestamp BETWEEN FROM_UNIXTIME($tstart) AND FROM_UNIXTIME($tend)")
if port:
clauses.append("port = $port")
if protocol:
clauses.append("protocols LIKE $protocol")
protocol = "%{0}%".format(protocol)
qvars = {'tstart': t_start, 'tend': t_end, 'port': port, 'protocol': protocol}
where = str(web.db.reparam("\n AND ".join(clauses), qvars))
if where:
where = " AND " + where
return where
def get_details_connections(self, inbound, page=1, order="-links", simple=False):
sort_options = ['links', 'src', 'dst', 'port', 'sum_bytes', 'sum_packets', 'protocols', 'avg_duration']
sort_options_simple = ['links', 'src', 'dst', 'port']
qvars = {
'table_links': self.table_links,
'start': self.ip_start,
'end': self.ip_end,
'page': self.page_size * (page - 1),
'page_size': self.page_size,
'WHERE': self.build_where_clause(self.time_range, self.port)
}
if inbound:
qvars['collected'] = "src"
qvars['filtered'] = "dst"
else:
qvars['filtered'] = "src"
qvars['collected'] = "dst"
# determine the sort direction
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
# determine the sort column
if simple:
if order and order[1:] in sort_options_simple:
sort_by = order[1:]
else:
sort_by = sort_options_simple[0]
else:
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
# add table prefix for some columns
if sort_by in ['port', 'src', 'dst']:
sort_by = "`links`." + sort_by
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
if simple:
query = """
SELECT decodeIP({collected}) AS '{collected}'
, port AS 'port'
, sum(links) AS 'links'
FROM {table_links} AS `links`
WHERE {filtered} BETWEEN $start AND $end
{WHERE}
GROUP BY `links`.{collected}, `links`.port
ORDER BY {order}
LIMIT {page}, {page_size}
""".format(**qvars)
else:
query = """
SELECT src, dst, port, links, protocols
, sum_bytes
, (sum_bytes / links) AS 'avg_bytes'
, sum_packets
, (sum_packets / links) AS 'avg_packets'
, avg_duration
FROM(
SELECT decodeIP(src) AS 'src'
, decodeIP(dst) AS 'dst'
, port AS 'port'
, SUM(links) AS 'links'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocols'
, SUM(bytes_sent + COALESCE(bytes_received, 0)) AS 'sum_bytes'
, SUM(packets_sent + COALESCE(packets_received, 0)) AS 'sum_packets'
, SUM(duration*links) / SUM(links) AS 'avg_duration'
FROM {table_links} AS `links`
WHERE {filtered} BETWEEN $start AND $end
{WHERE}
GROUP BY `links`.src, `links`.dst, `links`.port
ORDER BY {order}
LIMIT {page}, {page_size}
) AS precalc;
""".format(**qvars)
return list(self.db.query(query, vars=qvars))
def get_details_ports(self, page=1, order="-links"):
sort_options = ['links', 'port']
first_result = (page - 1) * self.page_size
qvars = {
'links_table': self.table_links,
'start': self.ip_start,
'end': self.ip_end,
'first': first_result,
'size': self.page_size,
'WHERE': self.build_where_clause(self.time_range, self.port),
}
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT port AS 'port', sum(links) AS 'links'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
{WHERE}
GROUP BY port
ORDER BY {order}
LIMIT $first, $size;
""".format(**qvars)
return list(sam.common.db.query(query, vars=qvars))
def get_details_children(self, order='+ipstart'):
sort_options = ['ipstart', 'hostname', 'endpoints', 'ratio']
ip_diff = self.ip_end - self.ip_start
if ip_diff == 0:
return []
elif ip_diff == 255:
quotient = 1
child_subnet_start = 25
child_subnet_end = 32
elif ip_diff == 65535:
quotient = 256
child_subnet_start = 17
child_subnet_end = 24
elif ip_diff == 16777215:
quotient = 65536
child_subnet_start = 9
child_subnet_end = 16
else:
quotient = 16777216
child_subnet_start = 1
child_subnet_end = 8
qvars = {'ip_start': self.ip_start,
'ip_end': self.ip_end,
's_start': child_subnet_start,
's_end': child_subnet_end,
'quot': quotient,
'quot_1': quotient - 1}
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT decodeIP(`n`.ipstart) AS 'address'
, COALESCE(`n`.alias, '') AS 'hostname'
, `n`.subnet AS 'subnet'
, `sn`.kids AS 'endpoints'
, COALESCE(COALESCE(`l_in`.links,0) / (COALESCE(`l_in`.links,0) + COALESCE(`l_out`.links,0)), 0) AS 'ratio'
FROM {nodes_table} AS `n`
LEFT JOIN (
SELECT dst_start {div} $quot * $quot AS 'low'
, dst_end {div} $quot * $quot + $quot_1 AS 'high'
, sum(links) AS 'links'
FROM {links_in_table}
GROUP BY low, high
) AS `l_in`
ON `l_in`.low = `n`.ipstart AND `l_in`.high = `n`.ipend
LEFT JOIN (
SELECT src_start {div} $quot * $quot AS 'low'
, src_end {div} $quot * $quot + $quot_1 AS 'high'
, sum(links) AS 'links'
FROM {links_out_table}
GROUP BY low, high
) AS `l_out`
ON `l_out`.low = `n`.ipstart AND `l_out`.high = `n`.ipend
LEFT JOIN (
SELECT ipstart {div} $quot * $quot AS 'low'
, ipend {div} $quot * $quot + $quot_1 AS 'high'
, COUNT(ipstart) AS 'kids'
FROM {nodes_table}
WHERE ipstart = ipend
GROUP BY low, high
) AS `sn`
ON `sn`.low = `n`.ipstart AND `sn`.high = `n`.ipend
WHERE `n`.ipstart BETWEEN $ip_start AND $ip_end
AND `n`.subnet BETWEEN $s_start AND $s_end
ORDER BY {order};
""".format(div=self.divop,
order=qvars['order'],
nodes_table=self.table_nodes,
links_in_table=self.table_links_in,
links_out_table=self.table_links_out)
return list(sam.common.db.query(query, vars=qvars))
def get_details_summary(self):
where = self.build_where_clause(timestamp_range=self.time_range, port=self.port)
# TODO: seconds has a magic number 300 added to account for DB time quantization.
query = """
SELECT `inputs`.ips AS 'unique_in'
, `outputs`.ips AS 'unique_out'
, `inputs`.ports AS 'unique_ports'
FROM
(SELECT COUNT(DISTINCT src) AS 'ips', COUNT(DISTINCT port) AS 'ports'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
{where}
) AS `inputs`
JOIN (SELECT COUNT(DISTINCT dst) AS 'ips'
FROM {links_table}
WHERE src BETWEEN $start AND $end
{where}
) AS `outputs`;""".format(where=where, links_table=self.table_links)
qvars = {'start': self.ip_start, 'end': self.ip_end}
rows = sam.common.db.query(query, vars=qvars)
return rows.first()
| riolet/SAM | sam/models/details.py | Python | gpl-3.0 | 16,289 |
#
# Copyright © 2012–2022 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""File format specific behavior."""
from weblate.formats.convert import (
HTMLFormat,
IDMLFormat,
OpenDocumentFormat,
PlainTextFormat,
WindowsRCFormat,
)
from weblate.formats.helpers import BytesIOMode
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.trans.tests.utils import get_test_file
IDML_FILE = get_test_file("en.idml")
HTML_FILE = get_test_file("cs.html")
OPENDOCUMENT_FILE = get_test_file("cs.odt")
TEST_RC = get_test_file("cs-CZ.rc")
TEST_TXT = get_test_file("cs.txt")
class ConvertFormatTest(AutoFormatTest):
NEW_UNIT_MATCH = None
EXPECTED_FLAGS = ""
def parse_file(self, filename):
return self.FORMAT(filename, template_store=self.FORMAT(filename))
class HTMLFormatTest(ConvertFormatTest):
FORMAT = HTMLFormat
FILE = HTML_FILE
MIME = "text/html"
EXT = "html"
COUNT = 5
MASK = "*/translations.html"
EXPECTED_PATH = "cs_CZ/translations.html"
FIND_CONTEXT = "+html.body.p:5-1"
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"<body>"
NEW_UNIT_MATCH = None
BASE = HTML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
class OpenDocumentFormatTest(ConvertFormatTest):
FORMAT = OpenDocumentFormat
FILE = OPENDOCUMENT_FILE
MIME = "application/vnd.oasis.opendocument.text"
EXT = "odt"
COUNT = 4
MASK = "*/translations.odt"
EXPECTED_PATH = "cs_CZ/translations.odt"
FIND_CONTEXT = (
"odf///office:document-content[0]/office:body[0]/office:text[0]/text:p[1]"
)
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = OPENDOCUMENT_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
OpenDocumentFormat.convertfile(BytesIOMode("test.odt", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class IDMLFormatTest(ConvertFormatTest):
FORMAT = IDMLFormat
FILE = IDML_FILE
MIME = "application/octet-stream"
EXT = "idml"
COUNT = 6
MASK = "*/translations.idml"
EXPECTED_PATH = "cs_CZ/translations.idml"
FIND_CONTEXT = (
"idPkg:Story[0]/{}Story[0]/{}XMLElement[0]/{}ParagraphStyleRange[0]"
"Stories/Story_mainmainmainmainmainmainmainmainmainmainmainu188.xml"
)
FIND_MATCH = """<g id="0"><g id="1">THE HEADLINE HERE</g></g>"""
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = IDML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
IDMLFormat.convertfile(BytesIOMode("test.idml", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class WindowsRCFormatTest(ConvertFormatTest):
FORMAT = WindowsRCFormat
FILE = TEST_RC
BASE = TEST_RC
MIME = "text/plain"
EXT = "rc"
COUNT = 5
MASK = "rc/*.rc"
EXPECTED_PATH = "rc/cs-CZ.rc"
MATCH = "STRINGTABLE"
FIND_CONTEXT = "STRINGTABLE.IDS_MSG1"
FIND_MATCH = "Hello, world!\n"
EDIT_OFFSET = 1
class PlainTextFormatTest(ConvertFormatTest):
FORMAT = PlainTextFormat
FILE = TEST_TXT
BASE = TEST_TXT
MIME = "text/plain"
EXT = "txt"
COUNT = 5
MASK = "txt/*.txt"
EXPECTED_PATH = "txt/cs_CZ.txt"
MATCH = "Hello"
FIND_CONTEXT = "cs.txt:2"
FIND_MATCH = "Hello, world!"
EDIT_OFFSET = 1
| nijel/weblate | weblate/formats/tests/test_convert.py | Python | gpl-3.0 | 4,423 |
# dialogs - provide common dialogs
#
# Copyright (c) 2006 FSF Europe
#
# Authors:
# Sebastian Heinlein <[email protected]>
# Michael Vogt <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from gi.repository import Gtk
def show_error_dialog(parent, primary, secondary):
p = "<span weight=\"bold\" size=\"larger\">%s</span>" % primary
dialog = Gtk.MessageDialog(parent,Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR,Gtk.ButtonsType.CLOSE,"")
dialog.set_markup(p);
dialog.format_secondary_text(secondary);
dialog.run()
dialog.hide()
| ruibarreira/linuxtrail | usr/lib/python3/dist-packages/softwareproperties/gtk/dialogs.py | Python | gpl-3.0 | 1,305 |
# coding: utf-8
from common import base
class Plugin(base.BASE):
__name__ = 'csdn'
__title__ = 'CSDN'
__url__ = 'http://www.csdn.net/'
def register(self, target):
self.information = {
'email': {
'url': 'http://passport.csdn.net/account/register',
'method': 'get',
'settings': {
'params': {
'action': 'validateEmail',
'email': target
}
},
'result': {
'type': 'str',
'value': 'false'
}
}
}
| tonybreak/Registered | plugins/csdn.py | Python | gpl-3.0 | 675 |
#
# Copyright (c) 2010-2011, 2015
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN),
# and Simone Basso <[email protected]>.
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
''' HTTP client '''
import logging
from .stream_handler import StreamHandler
from .http_client_stream import HttpClientStream
from .http_message import HttpMessage
from . import utils
from . import utils_net
class HttpClient(StreamHandler):
''' Manages one or more HTTP streams '''
def __init__(self, poller):
''' Initialize the HTTP client '''
StreamHandler.__init__(self, poller)
self.host_header = ""
self.rtt = 0
def connect_uri(self, uri, count=1):
''' Connects to the given URI '''
try:
message = HttpMessage()
message.compose(method="GET", uri=uri)
if message.scheme == "https":
self.conf["net.stream.secure"] = True
endpoint = (message.address, int(message.port))
self.host_header = utils_net.format_epnt(endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as why:
self.connection_failed(None, why)
else:
self.connect(endpoint, count)
def connection_ready(self, stream):
''' Invoked when the connection is ready '''
def got_response_headers(self, stream, request, response):
''' Invoked when we receive response headers '''
return True
def got_response(self, stream, request, response):
''' Invoked when we receive the response '''
def connection_made(self, sock, endpoint, rtt):
''' Invoked when the connection is created '''
if rtt:
logging.debug("ClientHTTP: latency: %s", utils.time_formatter(rtt))
self.rtt = rtt
# XXX If we didn't connect via connect_uri()...
if not self.host_header:
self.host_header = utils_net.format_epnt(endpoint)
stream = HttpClientStream(self.poller)
stream.attach(self, sock, self.conf)
self.connection_ready(stream)
| bassosimone/neubot-server | neubot/runtime/http_client.py | Python | gpl-3.0 | 2,757 |
import unittest
import random
import sys
import os
ETEPATH = os.path.abspath(os.path.split(os.path.realpath(__file__))[0]+'/../')
sys.path.insert(0, ETEPATH)
from ete2 import Tree, TreeStyle, NodeStyle, PhyloTree, faces, random_color
from ete2.treeview.faces import *
from ete2.treeview.main import _NODE_TYPE_CHECKER, FACE_POSITIONS
sys.path.insert(0, os.path.join(ETEPATH, "examples/treeview"))
import face_grid, bubble_map, item_faces, node_style, node_background, face_positions, face_rotation, seq_motif_faces, barchart_and_piechart_faces
sys.path.insert(0, os.path.join(ETEPATH, "examples/phylogenies"))
import phylotree_visualization
CONT = 0
class Test_Coretype_Treeview(unittest.TestCase):
""" Tests tree basics. """
def test_renderer(self):
main_tree = Tree()
main_tree.dist = 0
t, ts = face_grid.get_example_tree()
t_grid = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_grid, 0, "aligned")
t, ts = bubble_map.get_example_tree()
t_bubble = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_bubble, 0, "aligned")
t, ts = item_faces.get_example_tree()
t_items = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_items, 0, "aligned")
t, ts = node_style.get_example_tree()
t_nodest = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_nodest, 0, "aligned")
t, ts = node_background.get_example_tree()
t_bg = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_bg, 0, "aligned")
t, ts = face_positions.get_example_tree()
t_fpos = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_fpos, 0, "aligned")
t, ts = phylotree_visualization.get_example_tree()
t_phylo = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(t_phylo, 0, "aligned")
t, ts = face_rotation.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
t, ts = seq_motif_faces.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
t, ts = barchart_and_piechart_faces.get_example_tree()
temp_facet = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_facet, 0, "aligned")
#Test orphan nodes and trees with 0 branch length
t, ts = Tree(), TreeStyle()
t.populate(5)
for n in t.traverse():
n.dist = 0
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts.optimal_scale_level = "full"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts = TreeStyle()
t.populate(5)
ts.mode = "c"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
ts.optimal_scale_level = "full"
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
temp_tface = TreeFace(Tree('node;'), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
ts.mode = "c"
temp_tface = TreeFace(Tree('node;'), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
ts.mode = "c"
temp_tface = TreeFace(Tree(), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
t, ts = Tree(), TreeStyle()
temp_tface = TreeFace(Tree(), ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
# TEST TIGHT TEST WRAPPING
chars = ["." "p", "j", "jJ"]
def layout(node):
global CONT
if CONT >= len(chars):
CONT = 0
if node.is_leaf():
node.img_style["size"] = 0
F2= AttrFace("name", tight_text=True)
F= TextFace(chars[CONT], tight_text=True)
F.inner_border.width = 0
F2.inner_border.width = 0
#faces.add_face_to_node(F ,node, 0, position="branch-right")
faces.add_face_to_node(F2 ,node, 1, position="branch-right")
CONT += 1
t = Tree()
t.populate(20, random_branches=True)
ts = TreeStyle()
ts.layout_fn = layout
ts.mode = "c"
ts.show_leaf_name = False
temp_tface = TreeFace(t, ts)
n = main_tree.add_child()
n.add_face(temp_tface, 0, "aligned")
# MAIN TREE
ms = TreeStyle()
ms.mode = "r"
ms.show_leaf_name = False
main_tree.render('test.png', tree_style=ms)
main_tree.render('test.svg', tree_style=ms)
if __name__ == '__main__':
unittest.main()
| fw1121/ete | test/test_treeview.py | Python | gpl-3.0 | 5,124 |
#!/usr/bin/env python
# Progressive Cactus Package
# Copyright (C) 2009-2012 by Glenn Hickey ([email protected])
# and Benedict Paten ([email protected])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import xml.etree.ElementTree as ET
import math
import time
import random
import copy
from optparse import OptionParser
from optparse import OptionGroup
import imp
import socket
import signal
import traceback
import datetime
from sonLib.bioio import logger
from sonLib.bioio import setLoggingFromOptions
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from sonLib.bioio import popenCatch
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from jobTree.src.master import getJobFileDirName, getConfigFileName
from jobTree.src.jobTreeStatus import parseJobFiles
from cactus.progressive.multiCactusProject import MultiCactusProject
from cactus.shared.experimentWrapper import ExperimentWrapper
from cactus.shared.configWrapper import ConfigWrapper
from seqFile import SeqFile
from projectWrapper import ProjectWrapper
from jobStatusMonitor import JobStatusMonitor
def initParser():
usage = "usage: runProgressiveCactus.sh [options] <seqFile> <workDir> <outputHalFile>\n\n"\
"Required Arguments:\n"\
" <seqFile>\t\tFile containing newick tree and seqeunce paths"\
" paths.\n"\
"\t\t\t(see documetation or examples for format).\n"\
" <workDir>\t\tWorking directory (which can grow "\
"exteremely large)\n"\
" <outputHalFile>\tPath of output alignment in .hal format."
parser = OptionParser(usage=usage)
#JobTree Options (method below now adds an option group)
Stack.addJobTreeOptions(parser)
#Progressive Cactus will handle where the jobtree path is
parser.remove_option("--jobTree")
#Progressive Cactus Options
parser.add_option("--optionsFile", dest="optionsFile",
help="Text file containing command line options to use as"\
" defaults", default=None)
parser.add_option("--database", dest="database",
help="Database type: tokyo_cabinet or kyoto_tycoon"
" [default: %default]",
default="kyoto_tycoon")
parser.add_option("--outputMaf", dest="outputMaf",
help="[DEPRECATED use hal2maf on the ouput file instead] Path of output alignment in .maf format. This option should be avoided and will soon be removed. It may cause sequence names to be mangled, and use a tremendous amount of memory. ",
default=None)
parser.add_option("--configFile", dest="configFile",
help="Specify cactus configuration file",
default=None)
parser.add_option("--legacy", dest="legacy", action="store_true", help=
"Run cactus directly on all input sequences "
"without any progressive decomposition (ie how it "
"was originally published in 2011)",
default=False)
parser.add_option("--autoAbortOnDeadlock", dest="autoAbortOnDeadlock",
action="store_true",
help="Abort automatically when jobTree monitor" +
" suspects a deadlock by deleting the jobTree folder." +
" Will guarantee no trailing ktservers but still " +
" dangerous to use until we can more robustly detect " +
" deadlocks.",
default=False)
parser.add_option("--overwrite", dest="overwrite", action="store_true",
help="Re-align nodes in the tree that have already" +
" been successfully aligned.",
default=False)
parser.add_option("--rootOutgroupDists", dest="rootOutgroupDists",
help="root outgroup distance (--rootOutgroupPaths must " +
"be given as well)", default=None)
parser.add_option("--rootOutgroupPaths", dest="rootOutgroupPaths", type=str,
help="root outgroup path (--rootOutgroup must be given " +
"as well)", default=None)
parser.add_option("--root", dest="root", help="Name of ancestral node (which"
" must appear in NEWICK tree in <seqfile>) to use as a "
"root for the alignment. Any genomes not below this node "
"in the tree may be used as outgroups but will never appear"
" in the output. If no root is specifed then the root"
" of the tree is used. ", default=None)
#Kyoto Tycoon Options
ktGroup = OptionGroup(parser, "kyoto_tycoon Options",
"Kyoto tycoon provides a client/server framework "
"for large in-memory hash tables and is available "
"via the --database option.")
ktGroup.add_option("--ktPort", dest="ktPort",
help="starting port (lower bound of range) of ktservers"
" [default: %default]",
default=1978)
ktGroup.add_option("--ktHost", dest="ktHost",
help="The hostname to use for connections to the "
"ktserver (this just specifies where nodes will attempt"
" to find the server, *not* where the ktserver will be"
" run)",
default=None)
ktGroup.add_option("--ktType", dest="ktType",
help="Kyoto Tycoon server type "
"(memory, snapshot, or disk)"
" [default: %default]",
default='memory')
# sonlib doesn't allow for spaces in attributes in the db conf
# which renders this options useless
#ktGroup.add_option("--ktOpts", dest="ktOpts",
# help="Command line ktserver options",
# default=None)
ktGroup.add_option("--ktCreateTuning", dest="ktCreateTuning",
help="ktserver options when creating db "\
"(ex #bnum=30m#msiz=50g)",
default=None)
ktGroup.add_option("--ktOpenTuning", dest="ktOpenTuning",
help="ktserver options when opening existing db "\
"(ex #opts=ls#ktopts=p)",
default=None)
parser.add_option_group(ktGroup)
return parser
# Try to weed out errors early by checking options and paths
def validateInput(workDir, outputHalFile, options):
try:
if workDir.find(' ') >= 0:
raise RuntimeError("Cactus does not support spaces in pathnames: %s"
% workDir)
if not os.path.isdir(workDir):
os.makedirs(workDir)
if not os.path.isdir(workDir) or not os.access(workDir, os.W_OK):
raise
except:
raise RuntimeError("Can't write to workDir: %s" % workDir)
try:
open(outputHalFile, "w")
except:
raise RuntimeError("Unable to write to hal: %s" % outputHalFile)
if options.database != "tokyo_cabinet" and\
options.database != "kyoto_tycoon":
raise RuntimeError("Invalid database type: %s" % options.database)
if options.outputMaf is not None:
try:
open(options.outputMaf, "w")
except:
raise RuntimeError("Unable to write to maf: %s" % options.outputMaf)
if options.configFile is not None:
try:
ConfigWrapper(ET.parse(options.configFile).getroot())
except:
raise RuntimeError("Unable to read config: %s" % options.configFile)
if options.database == 'kyoto_tycoon':
if options.ktType.lower() != 'memory' and\
options.ktType.lower() != 'snapshot' and\
options.ktType.lower() != 'disk':
raise RuntimeError("Invalid ktserver type specified: %s. Must be "
"memory, snapshot or disk" % options.ktType)
# Convert the jobTree options taken in by the parser back
# out to command line options to pass to progressive cactus
def getJobTreeCommands(jtPath, parser, options):
cmds = "--jobTree %s" % jtPath
for optGroup in parser.option_groups:
if optGroup.title.startswith("jobTree") or optGroup.title.startswith("Jobtree"):
for opt in optGroup.option_list:
if hasattr(options, opt.dest) and \
getattr(options, opt.dest) != optGroup.defaults[opt.dest]:
cmds += " %s" % str(opt)
if opt.nargs > 0:
cmds += " \"%s\"" % getattr(options, opt.dest)
return cmds
# Go through a text file and add every word inside to an arguments list
# which will be prepended to sys.argv. This way both the file and
# command line are passed to the option parser, with the command line
# getting priority.
def parseOptionsFile(path):
if not os.path.isfile(path):
raise RuntimeError("Options File not found: %s" % path)
args = []
optFile = open(path, "r")
for l in optFile:
line = l.rstrip()
if line:
args += shlex.split(line)
# This source file should always be in progressiveCactus/src. So
# we return the path to progressiveCactus/environment, which needs
# to be sourced before doing anything.
def getEnvFilePath():
path = os.path.dirname(sys.argv[0])
envFile = os.path.join(path, '..', 'environment')
assert os.path.isfile(envFile)
return envFile
# If specified with the risky --autoAbortOnDeadlock option, we call this to
# force an abort if the jobStatusMonitor thinks it's hopeless.
# We delete the jobTreePath to get rid of kyoto tycoons.
def abortFunction(jtPath, options):
def afClosure():
sys.stderr.write('\nAborting due to deadlock (prevent with'
+ '--noAutoAbort' +
' option), and running rm -rf %s\n\n' % jtPath)
system('rm -rf %s' % jtPath)
sys.exit(-1)
if options.autoAbortOnDeadlock:
return afClosure
else:
return None
# Run cactus progressive on the project that has been created in workDir.
# Any jobtree options are passed along. Should probably look at redirecting
# stdout/stderr in the future.
def runCactus(workDir, jtCommands, jtPath, options):
envFile = getEnvFilePath()
pjPath = os.path.join(workDir, ProjectWrapper.alignmentDirName,
'%s_project.xml' % ProjectWrapper.alignmentDirName)
logFile = os.path.join(workDir, 'cactus.log')
if options.overwrite:
overwriteFlag = '--overwrite'
system("rm -f %s" % logFile)
else:
overwriteFlag = ''
logHandle = open(logFile, "a")
logHandle.write("\n%s: Beginning Progressive Cactus Alignment\n\n" % str(
datetime.datetime.now()))
logHandle.close()
cmd = '. %s && cactus_progressive.py %s %s %s >> %s 2>&1' % (envFile,
jtCommands,
pjPath,
overwriteFlag,
logFile)
jtMonitor = JobStatusMonitor(jtPath, pjPath, logFile,
deadlockCallbackFn=abortFunction(jtPath,
options))
if options.database == "kyoto_tycoon":
jtMonitor.daemon = True
jtMonitor.start()
system(cmd)
logHandle = open(logFile, "a")
logHandle.write("\n%s: Finished Progressive Cactus Alignment\n" % str(
datetime.datetime.now()))
logHandle.close()
def checkCactus(workDir, options):
pass
# Call cactus2hal to extract a single hal file out of the progressive
# alignmenet in the working directory. If the maf option was set, we
# just move out the root maf.
def extractOutput(workDir, outputHalFile, options):
if options.outputMaf is not None:
mcProj = MultiCactusProject()
mcProj.readXML(
os.path.join(workDir, ProjectWrapper.alignmentDirName,
ProjectWrapper.alignmentDirName + "_project.xml"))
rootName = mcProj.mcTree.getRootName()
rootPath = os.path.join(workDir, ProjectWrapper.alignmentDirName,
rootName, rootName + '.maf')
cmd = 'mv %s %s' % (rootPath, options.outputMaf)
system(cmd)
envFile = getEnvFilePath()
logFile = os.path.join(workDir, 'cactus.log')
pjPath = os.path.join(workDir, ProjectWrapper.alignmentDirName,
'%s_project.xml' % ProjectWrapper.alignmentDirName)
logHandle = open(logFile, "a")
logHandle.write("\n\n%s: Beginning HAL Export\n\n" % str(
datetime.datetime.now()))
logHandle.close()
cmd = '. %s && cactus2hal.py %s %s >> %s 2>&1' % (envFile, pjPath,
outputHalFile, logFile)
system(cmd)
logHandle = open(logFile, "a")
logHandle.write("\n%s: Finished HAL Export \n" % str(
datetime.datetime.now()))
logHandle.close()
def main():
# init as dummy function
cleanKtFn = lambda x,y:x
stage = -1
workDir = None
try:
parser = initParser()
options, args = parser.parse_args()
if (options.rootOutgroupDists is not None) \
^ (options.rootOutgroupPaths is not None):
parser.error("--rootOutgroupDists and --rootOutgroupPaths must be " +
"provided together")
if len(args) == 0:
parser.print_help()
return 1
if len(args) != 3:
raise RuntimeError("Error parsing command line. Exactly 3 arguments are required but %d arguments were detected: %s" % (len(args), str(args)))
if options.optionsFile != None:
fileArgs = parseOptionsFile(options.optionsFile)
options, args = parser.parse_args(fileArgs + sys.argv[1:])
if len(args) != 3:
raise RuntimeError("Error parsing options file. Make sure all "
"options have -- prefix")
stage = 0
setLoggingFromOptions(options)
seqFile = SeqFile(args[0])
workDir = args[1]
outputHalFile = args[2]
validateInput(workDir, outputHalFile, options)
jtPath = os.path.join(workDir, "jobTree")
stage = 1
print "\nBeginning Alignment"
system("rm -rf %s" % jtPath)
projWrapper = ProjectWrapper(options, seqFile, workDir)
projWrapper.writeXml()
jtCommands = getJobTreeCommands(jtPath, parser, options)
runCactus(workDir, jtCommands, jtPath, options)
cmd = 'jobTreeStatus --failIfNotComplete --jobTree %s > /dev/null 2>&1 ' %\
jtPath
system(cmd)
stage = 2
print "Beginning HAL Export"
extractOutput(workDir, outputHalFile, options)
print "Success.\n" "Temporary data was left in: %s\n" \
% workDir
return 0
except RuntimeError, e:
sys.stderr.write("Error: %s\n\n" % str(e))
if stage >= 0 and workDir is not None and os.path.isdir(workDir):
sys.stderr.write("Temporary data was left in: %s\n" % workDir)
if stage == 1:
sys.stderr.write("More information can be found in %s\n" %
os.path.join(workDir, "cactus.log"))
elif stage == 2:
sys.stderr.write("More information can be found in %s\n" %
os.path.join(workDir, "cactus.log"))
return -1
if __name__ == '__main__':
sys.exit(main())
| BD2KGenomics/cactus | src/progressiveCactus.py | Python | gpl-3.0 | 16,648 |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
#
# The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.5 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 9 Mar 2007
Slightly modified to eliminate the deprecationwarning for the md5 module.
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib
__version__ = '2.2'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = hashlib.new("md5")
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
| samn/spectral-workbench | webserver/public/lib/bespin-0.9a2/lib/dryice/path.py | Python | gpl-3.0 | 33,721 |
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansHebrew-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #null ????
chars.append(0x200C) #uni200C ZERO WIDTH NON-JOINER
chars.append(0x000D) #nonmarkingreturn ????
chars.append(0x200E) #uni200E LEFT-TO-RIGHT MARK
chars.append(0x200F) #uni200F RIGHT-TO-LEFT MARK
chars.append(0x0020) #space SPACE
chars.append(0x200D) #uni200D ZERO WIDTH JOINER
chars.append(0x00A0) #space NO-BREAK SPACE
chars.append(0x20AA) #sheqel NEW SHEQEL SIGN
chars.append(0xFEFF) #null ZERO WIDTH NO-BREAK SPACE
chars.append(0xFB1D) #uniFB1D HEBREW LETTER YOD WITH HIRIQ
chars.append(0xFB1E) #uniFB1E HEBREW POINT JUDEO-SPANISH VARIKA
chars.append(0xFB1F) #yodyod_patah HEBREW LIGATURE YIDDISH YOD YOD PATAH
chars.append(0xFB20) #alternativeayin HEBREW LETTER ALTERNATIVE AYIN
chars.append(0xFB21) #alefwide HEBREW LETTER WIDE ALEF
chars.append(0xFB22) #daletwide HEBREW LETTER WIDE DALET
chars.append(0xFB23) #hewide HEBREW LETTER WIDE HE
chars.append(0xFB24) #kafwide HEBREW LETTER WIDE KAF
chars.append(0xFB25) #lamedwide HEBREW LETTER WIDE LAMED
chars.append(0xFB26) #finalmemwide HEBREW LETTER WIDE FINAL MEM
chars.append(0xFB27) #reshwide HEBREW LETTER WIDE RESH
chars.append(0xFB28) #tavwide HEBREW LETTER WIDE TAV
chars.append(0xFB29) #alt_plussign HEBREW LETTER ALTERNATIVE PLUS SIGN
chars.append(0xFB2A) #shinshindot HEBREW LETTER SHIN WITH SHIN DOT
chars.append(0xFB2B) #shinsindot HEBREW LETTER SHIN WITH SIN DOT
chars.append(0xFB2C) #shindageshshindot HEBREW LETTER SHIN WITH DAGESH AND SHIN DOT
chars.append(0xFB2D) #shindageshsindot HEBREW LETTER SHIN WITH DAGESH AND SIN DOT
chars.append(0xFB2E) #alefpatah HEBREW LETTER ALEF WITH PATAH
chars.append(0xFB2F) #alefqamats HEBREW LETTER ALEF WITH QAMATS
chars.append(0xFB30) #alefmapiq HEBREW LETTER ALEF WITH MAPIQ
chars.append(0xFB31) #betdagesh HEBREW LETTER BET WITH DAGESH
chars.append(0xFB32) #gimeldagesh HEBREW LETTER GIMEL WITH DAGESH
chars.append(0xFB33) #daletdagesh HEBREW LETTER DALET WITH DAGESH
chars.append(0xFB34) #hedagesh HEBREW LETTER HE WITH MAPIQ
chars.append(0xFB35) #vavdagesh HEBREW LETTER VAV WITH DAGESH
chars.append(0xFB36) #zayindagesh HEBREW LETTER ZAYIN WITH DAGESH
chars.append(0xFB38) #tetdagesh HEBREW LETTER TET WITH DAGESH
chars.append(0xFB39) #yoddagesh HEBREW LETTER YOD WITH DAGESH
chars.append(0xFB3A) #finalkafdagesh HEBREW LETTER FINAL KAF WITH DAGESH
chars.append(0xFB3B) #kafdagesh HEBREW LETTER KAF WITH DAGESH
chars.append(0xFB3C) #lameddagesh HEBREW LETTER LAMED WITH DAGESH
chars.append(0xFB3E) #memdagesh HEBREW LETTER MEM WITH DAGESH
chars.append(0xFB40) #nundagesh HEBREW LETTER NUN WITH DAGESH
chars.append(0xFB41) #samekhdagesh HEBREW LETTER SAMEKH WITH DAGESH
chars.append(0xFB43) #finalpedagesh HEBREW LETTER FINAL PE WITH DAGESH
chars.append(0xFB44) #pedagesh HEBREW LETTER PE WITH DAGESH
chars.append(0xFB46) #tsadidagesh HEBREW LETTER TSADI WITH DAGESH
chars.append(0xFB47) #qofdagesh HEBREW LETTER QOF WITH DAGESH
chars.append(0xFB48) #reshdagesh HEBREW LETTER RESH WITH DAGESH
chars.append(0xFB49) #shindagesh HEBREW LETTER SHIN WITH DAGESH
chars.append(0xFB4A) #tavdagesh HEBREW LETTER TAV WITH DAGESH
chars.append(0xFB4B) #vavholam HEBREW LETTER VAV WITH HOLAM
chars.append(0xFB4C) #betrafe HEBREW LETTER BET WITH RAFE
chars.append(0xFB4D) #kafrafe HEBREW LETTER KAF WITH RAFE
chars.append(0xFB4E) #perafe HEBREW LETTER PE WITH RAFE
chars.append(0xFB4F) #aleflamed HEBREW LIGATURE ALEF LAMED
chars.append(0x0591) #uni0591 HEBREW ACCENT ETNAHTA
chars.append(0x0592) #uni0592 HEBREW ACCENT SEGOL
chars.append(0x0593) #uni0593 HEBREW ACCENT SHALSHELET
chars.append(0x0594) #uni0594 HEBREW ACCENT ZAQEF QATAN
chars.append(0x0595) #uni0595 HEBREW ACCENT ZAQEF GADOL
chars.append(0x0596) #uni0596 HEBREW ACCENT TIPEHA
chars.append(0x0597) #uni0597 HEBREW ACCENT REVIA
chars.append(0x0598) #uni0598 HEBREW ACCENT ZARQA
chars.append(0x0599) #uni0599 HEBREW ACCENT PASHTA
chars.append(0x059A) #uni059A HEBREW ACCENT YETIV
chars.append(0x059B) #uni059B HEBREW ACCENT TEVIR
chars.append(0x059C) #uni059C HEBREW ACCENT GERESH
chars.append(0x059D) #uni059D HEBREW ACCENT GERESH MUQDAM
chars.append(0x059E) #uni059E HEBREW ACCENT GERSHAYIM
chars.append(0x059F) #uni059F HEBREW ACCENT QARNEY PARA
chars.append(0x05A0) #uni05A0 HEBREW ACCENT TELISHA GEDOLA
chars.append(0x05A1) #uni05A1 HEBREW ACCENT PAZER
chars.append(0x05A2) #uni05A2 HEBREW ACCENT ATNAH HAFUKH
chars.append(0x05A3) #uni05A3 HEBREW ACCENT MUNAH
chars.append(0x05A4) #uni05A4 HEBREW ACCENT MAHAPAKH
chars.append(0x05A5) #uni05A5 HEBREW ACCENT MERKHA
chars.append(0x05A6) #uni05A6 HEBREW ACCENT MERKHA KEFULA
chars.append(0x05A7) #uni05A7 HEBREW ACCENT DARGA
chars.append(0x05A8) #uni05A8 HEBREW ACCENT QADMA
chars.append(0x05A9) #uni05A9 HEBREW ACCENT TELISHA QETANA
chars.append(0x05AA) #uni05AA HEBREW ACCENT YERAH BEN YOMO
chars.append(0x05AB) #uni05AB HEBREW ACCENT OLE
chars.append(0x05AC) #uni05AC HEBREW ACCENT ILUY
chars.append(0x05AD) #uni05AD HEBREW ACCENT DEHI
chars.append(0x05AE) #uni05AE HEBREW ACCENT ZINOR
chars.append(0x05AF) #uni05AF HEBREW MARK MASORA CIRCLE
chars.append(0x05B0) #sheva HEBREW POINT SHEVA
chars.append(0x05B1) #hatafsegol HEBREW POINT HATAF SEGOL
chars.append(0x05B2) #hatafpatah HEBREW POINT HATAF PATAH
chars.append(0x05B3) #hatafqamats HEBREW POINT HATAF QAMATS
chars.append(0x05B4) #hiriq HEBREW POINT HIRIQ
chars.append(0x05B5) #tsere HEBREW POINT TSERE
chars.append(0x05B6) #segol HEBREW POINT SEGOL
chars.append(0x05B7) #patah HEBREW POINT PATAH
chars.append(0x05B8) #qamats HEBREW POINT QAMATS
chars.append(0x05B9) #holam HEBREW POINT HOLAM
chars.append(0x05BA) #uni05BA HEBREW POINT HOLAM HASER FOR VAV
chars.append(0x05BB) #qubuts HEBREW POINT QUBUTS
chars.append(0x05BC) #dagesh HEBREW POINT DAGESH OR MAPIQ
chars.append(0x05BD) #meteg HEBREW POINT METEG
chars.append(0x05BE) #maqaf HEBREW PUNCTUATION MAQAF
chars.append(0x05BF) #rafe HEBREW POINT RAFE
chars.append(0x05C0) #paseq HEBREW PUNCTUATION PASEQ
chars.append(0x05C1) #shindot HEBREW POINT SHIN DOT
chars.append(0x05C2) #sindot HEBREW POINT SIN DOT
chars.append(0x05C3) #sofpasuq HEBREW PUNCTUATION SOF PASUQ
chars.append(0x05C4) #upper_dot HEBREW MARK UPPER DOT
chars.append(0x05C5) #lowerdot HEBREW MARK LOWER DOT
chars.append(0x05C6) #uni05C6 HEBREW PUNCTUATION NUN HAFUKHA
chars.append(0x05C7) #qamatsqatan HEBREW POINT QAMATS QATAN
chars.append(0x25CC) #uni25CC DOTTED CIRCLE
chars.append(0x05D0) #alef HEBREW LETTER ALEF
chars.append(0x05D1) #bet HEBREW LETTER BET
chars.append(0x05D2) #gimel HEBREW LETTER GIMEL
chars.append(0x05D3) #dalet HEBREW LETTER DALET
chars.append(0x05D4) #he HEBREW LETTER HE
chars.append(0x05D5) #vav HEBREW LETTER VAV
chars.append(0x05D6) #zayin HEBREW LETTER ZAYIN
chars.append(0x05D7) #het HEBREW LETTER HET
chars.append(0x05D8) #tet HEBREW LETTER TET
chars.append(0x05D9) #yod HEBREW LETTER YOD
chars.append(0x05DA) #finalkaf HEBREW LETTER FINAL KAF
chars.append(0x05DB) #kaf HEBREW LETTER KAF
chars.append(0x05DC) #lamed HEBREW LETTER LAMED
chars.append(0x05DD) #finalmem HEBREW LETTER FINAL MEM
chars.append(0x05DE) #mem HEBREW LETTER MEM
chars.append(0x05DF) #finalnun HEBREW LETTER FINAL NUN
chars.append(0x05E0) #nun HEBREW LETTER NUN
chars.append(0x05E1) #samekh HEBREW LETTER SAMEKH
chars.append(0x05E2) #ayin HEBREW LETTER AYIN
chars.append(0x05E3) #finalpe HEBREW LETTER FINAL PE
chars.append(0x05E4) #pe HEBREW LETTER PE
chars.append(0x05E5) #finaltsadi HEBREW LETTER FINAL TSADI
chars.append(0x05E6) #tsadi HEBREW LETTER TSADI
chars.append(0x05E7) #qof HEBREW LETTER QOF
chars.append(0x05E8) #resh HEBREW LETTER RESH
chars.append(0x05E9) #shin HEBREW LETTER SHIN
chars.append(0x05EA) #tav HEBREW LETTER TAV
chars.append(0x05F0) #vavvav HEBREW LIGATURE YIDDISH DOUBLE VAV
chars.append(0x05F1) #vavyod HEBREW LIGATURE YIDDISH VAV YOD
chars.append(0x05F2) #yodyod HEBREW LIGATURE YIDDISH DOUBLE YOD
chars.append(0x05F3) #geresh HEBREW PUNCTUATION GERESH
chars.append(0x05F4) #gershayim HEBREW PUNCTUATION GERSHAYIM
return chars
| davelab6/pyfontaine | fontaine/charsets/noto_chars/notosanshebrew_regular.py | Python | gpl-3.0 | 9,355 |
# Authors:
# Jason Gerard DeRose <[email protected]>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Foundational classes and functions.
"""
import re
from constants import NAME_REGEX, NAME_ERROR
from constants import TYPE_ERROR, SET_ERROR, DEL_ERROR, OVERRIDE_ERROR
class ReadOnly(object):
"""
Base class for classes that can be locked into a read-only state.
Be forewarned that Python does not offer true read-only attributes for
user-defined classes. Do *not* rely upon the read-only-ness of this
class for security purposes!
The point of this class is not to make it impossible to set or to delete
attributes after an instance is locked, but to make it impossible to do so
*accidentally*. Rather than constantly reminding our programmers of things
like, for example, "Don't set any attributes on this ``FooBar`` instance
because doing so wont be thread-safe", this class offers a real way to
enforce read-only attribute usage.
For example, before a `ReadOnly` instance is locked, you can set and delete
its attributes as normal:
>>> class Person(ReadOnly):
... pass
...
>>> p = Person()
>>> p.name = 'John Doe'
>>> p.phone = '123-456-7890'
>>> del p.phone
But after an instance is locked, you cannot set its attributes:
>>> p.__islocked__() # Is this instance locked?
False
>>> p.__lock__() # This will lock the instance
>>> p.__islocked__()
True
>>> p.department = 'Engineering'
Traceback (most recent call last):
...
AttributeError: locked: cannot set Person.department to 'Engineering'
Nor can you deleted its attributes:
>>> del p.name
Traceback (most recent call last):
...
AttributeError: locked: cannot delete Person.name
However, as noted at the start, there are still obscure ways in which
attributes can be set or deleted on a locked `ReadOnly` instance. For
example:
>>> object.__setattr__(p, 'department', 'Engineering')
>>> p.department
'Engineering'
>>> object.__delattr__(p, 'name')
>>> hasattr(p, 'name')
False
But again, the point is that a programmer would never employ the above
techniques *accidentally*.
Lastly, this example aside, you should use the `lock()` function rather
than the `ReadOnly.__lock__()` method. And likewise, you should
use the `islocked()` function rather than the `ReadOnly.__islocked__()`
method. For example:
>>> readonly = ReadOnly()
>>> islocked(readonly)
False
>>> lock(readonly) is readonly # lock() returns the instance
True
>>> islocked(readonly)
True
"""
__locked = False
def __lock__(self):
"""
Put this instance into a read-only state.
After the instance has been locked, attempting to set or delete an
attribute will raise an AttributeError.
"""
assert self.__locked is False, '__lock__() can only be called once'
self.__locked = True
def __islocked__(self):
"""
Return True if instance is locked, otherwise False.
"""
return self.__locked
def __setattr__(self, name, value):
"""
If unlocked, set attribute named ``name`` to ``value``.
If this instance is locked, an AttributeError will be raised.
:param name: Name of attribute to set.
:param value: Value to assign to attribute.
"""
if self.__locked:
raise AttributeError(
SET_ERROR % (self.__class__.__name__, name, value)
)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
"""
If unlocked, delete attribute named ``name``.
If this instance is locked, an AttributeError will be raised.
:param name: Name of attribute to delete.
"""
if self.__locked:
raise AttributeError(
DEL_ERROR % (self.__class__.__name__, name)
)
return object.__delattr__(self, name)
def lock(instance):
"""
Lock an instance of the `ReadOnly` class or similar.
This function can be used to lock instances of any class that implements
the same locking API as the `ReadOnly` class. For example, this function
can lock instances of the `config.Env` class.
So that this function can be easily used within an assignment, ``instance``
is returned after it is locked. For example:
>>> readonly = ReadOnly()
>>> readonly is lock(readonly)
True
>>> readonly.attr = 'This wont work'
Traceback (most recent call last):
...
AttributeError: locked: cannot set ReadOnly.attr to 'This wont work'
Also see the `islocked()` function.
:param instance: The instance of `ReadOnly` (or similar) to lock.
"""
assert instance.__islocked__() is False, 'already locked: %r' % instance
instance.__lock__()
assert instance.__islocked__() is True, 'failed to lock: %r' % instance
return instance
def islocked(instance):
"""
Return ``True`` if ``instance`` is locked.
This function can be used on an instance of the `ReadOnly` class or an
instance of any other class implemented the same locking API.
For example:
>>> readonly = ReadOnly()
>>> islocked(readonly)
False
>>> readonly.__lock__()
>>> islocked(readonly)
True
Also see the `lock()` function.
:param instance: The instance of `ReadOnly` (or similar) to interrogate.
"""
assert (
hasattr(instance, '__lock__') and callable(instance.__lock__)
), 'no __lock__() method: %r' % instance
return instance.__islocked__()
def check_name(name):
"""
Verify that ``name`` is suitable for a `NameSpace` member name.
In short, ``name`` must be a valid lower-case Python identifier that
neither starts nor ends with an underscore. Otherwise an exception is
raised.
This function will raise a ``ValueError`` if ``name`` does not match the
`constants.NAME_REGEX` regular expression. For example:
>>> check_name('MyName')
Traceback (most recent call last):
...
ValueError: name must match '^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'; got 'MyName'
Also, this function will raise a ``TypeError`` if ``name`` is not an
``str`` instance. For example:
>>> check_name(u'my_name')
Traceback (most recent call last):
...
TypeError: name: need a <type 'str'>; got u'my_name' (a <type 'unicode'>)
So that `check_name()` can be easily used within an assignment, ``name``
is returned unchanged if it passes the check. For example:
>>> n = check_name('my_name')
>>> n
'my_name'
:param name: Identifier to test.
"""
if type(name) is not str:
raise TypeError(
TYPE_ERROR % ('name', str, name, type(name))
)
if re.match(NAME_REGEX, name) is None:
raise ValueError(
NAME_ERROR % (NAME_REGEX, name)
)
return name
class NameSpace(ReadOnly):
"""
A read-only name-space with handy container behaviours.
A `NameSpace` instance is an ordered, immutable mapping object whose values
can also be accessed as attributes. A `NameSpace` instance is constructed
from an iterable providing its *members*, which are simply arbitrary objects
with a ``name`` attribute whose value:
1. Is unique among the members
2. Passes the `check_name()` function
Beyond that, no restrictions are placed on the members: they can be
classes or instances, and of any type.
The members can be accessed as attributes on the `NameSpace` instance or
through a dictionary interface. For example, say we create a `NameSpace`
instance from a list containing a single member, like this:
>>> class my_member(object):
... name = 'my_name'
...
>>> namespace = NameSpace([my_member])
>>> namespace
NameSpace(<1 member>, sort=True)
We can then access ``my_member`` both as an attribute and as a dictionary
item:
>>> my_member is namespace.my_name # As an attribute
True
>>> my_member is namespace['my_name'] # As dictionary item
True
For a more detailed example, say we create a `NameSpace` instance from a
generator like this:
>>> class Member(object):
... def __init__(self, i):
... self.i = i
... self.name = 'member%d' % i
... def __repr__(self):
... return 'Member(%d)' % self.i
...
>>> ns = NameSpace(Member(i) for i in xrange(3))
>>> ns
NameSpace(<3 members>, sort=True)
As above, the members can be accessed as attributes and as dictionary items:
>>> ns.member0 is ns['member0']
True
>>> ns.member1 is ns['member1']
True
>>> ns.member2 is ns['member2']
True
Members can also be accessed by index and by slice. For example:
>>> ns[0]
Member(0)
>>> ns[-1]
Member(2)
>>> ns[1:]
(Member(1), Member(2))
(Note that slicing a `NameSpace` returns a ``tuple``.)
`NameSpace` instances provide standard container emulation for membership
testing, counting, and iteration. For example:
>>> 'member3' in ns # Is there a member named 'member3'?
False
>>> 'member2' in ns # But there is a member named 'member2'
True
>>> len(ns) # The number of members
3
>>> list(ns) # Iterate through the member names
['member0', 'member1', 'member2']
Although not a standard container feature, the `NameSpace.__call__()` method
provides a convenient (and efficient) way to iterate through the *members*
(as opposed to the member names). Think of it like an ordered version of
the ``dict.itervalues()`` method. For example:
>>> list(ns[name] for name in ns) # One way to do it
[Member(0), Member(1), Member(2)]
>>> list(ns()) # A more efficient, simpler way to do it
[Member(0), Member(1), Member(2)]
Another convenience method is `NameSpace.__todict__()`, which will return
a copy of the ``dict`` mapping the member names to the members.
For example:
>>> ns.__todict__()
{'member1': Member(1), 'member0': Member(0), 'member2': Member(2)}
As `NameSpace.__init__()` locks the instance, `NameSpace` instances are
read-only from the get-go. An ``AttributeError`` is raised if you try to
set *any* attribute on a `NameSpace` instance. For example:
>>> ns.member3 = Member(3) # Lets add that missing 'member3'
Traceback (most recent call last):
...
AttributeError: locked: cannot set NameSpace.member3 to Member(3)
(For information on the locking protocol, see the `ReadOnly` class, of which
`NameSpace` is a subclass.)
By default the members will be sorted alphabetically by the member name.
For example:
>>> sorted_ns = NameSpace([Member(7), Member(3), Member(5)])
>>> sorted_ns
NameSpace(<3 members>, sort=True)
>>> list(sorted_ns)
['member3', 'member5', 'member7']
>>> sorted_ns[0]
Member(3)
But if the instance is created with the ``sort=False`` keyword argument, the
original order of the members is preserved. For example:
>>> unsorted_ns = NameSpace([Member(7), Member(3), Member(5)], sort=False)
>>> unsorted_ns
NameSpace(<3 members>, sort=False)
>>> list(unsorted_ns)
['member7', 'member3', 'member5']
>>> unsorted_ns[0]
Member(7)
The `NameSpace` class is used in many places throughout freeIPA. For a few
examples, see the `plugable.API` and the `frontend.Command` classes.
"""
def __init__(self, members, sort=True, name_attr='name'):
"""
:param members: An iterable providing the members.
:param sort: Whether to sort the members by member name.
"""
if type(sort) is not bool:
raise TypeError(
TYPE_ERROR % ('sort', bool, sort, type(sort))
)
self.__sort = sort
if sort:
self.__members = tuple(
sorted(members, key=lambda m: getattr(m, name_attr))
)
else:
self.__members = tuple(members)
self.__names = tuple(getattr(m, name_attr) for m in self.__members)
self.__map = dict()
for member in self.__members:
name = check_name(getattr(member, name_attr))
if name in self.__map:
raise AttributeError(OVERRIDE_ERROR %
(self.__class__.__name__, name, self.__map[name], member)
)
assert not hasattr(self, name), 'Ouch! Has attribute %r' % name
self.__map[name] = member
setattr(self, name, member)
lock(self)
def __len__(self):
"""
Return the number of members.
"""
return len(self.__members)
def __iter__(self):
"""
Iterate through the member names.
If this instance was created with ``sort=False``, the names will be in
the same order as the members were passed to the constructor; otherwise
the names will be in alphabetical order (which is the default).
This method is like an ordered version of ``dict.iterkeys()``.
"""
for name in self.__names:
yield name
def __call__(self):
"""
Iterate through the members.
If this instance was created with ``sort=False``, the members will be
in the same order as they were passed to the constructor; otherwise the
members will be in alphabetical order by name (which is the default).
This method is like an ordered version of ``dict.itervalues()``.
"""
for member in self.__members:
yield member
def __contains__(self, name):
"""
Return ``True`` if namespace has a member named ``name``.
"""
return name in self.__map
def __getitem__(self, key):
"""
Return a member by name or index, or return a slice of members.
:param key: The name or index of a member, or a slice object.
"""
if isinstance(key, basestring):
return self.__map[key]
if type(key) in (int, slice):
return self.__members[key]
raise TypeError(
TYPE_ERROR % ('key', (str, int, slice), key, type(key))
)
def __repr__(self):
"""
Return a pseudo-valid expression that could create this instance.
"""
cnt = len(self)
if cnt == 1:
m = 'member'
else:
m = 'members'
return '%s(<%d %s>, sort=%r)' % (
self.__class__.__name__,
cnt,
m,
self.__sort,
)
def __todict__(self):
"""
Return a copy of the private dict mapping member name to member.
"""
return dict(self.__map)
| hatchetation/freeipa | ipalib/base.py | Python | gpl-3.0 | 15,669 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HashBackend
from cryptography.hazmat.primitives import interfaces
@utils.register_interface(interfaces.HashContext)
class Hash(object):
def __init__(self, algorithm, backend, ctx=None):
if not isinstance(backend, HashBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HashBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, interfaces.HashAlgorithm):
raise TypeError("Expected instance of interfaces.HashAlgorithm.")
self._algorithm = algorithm
self._backend = backend
if ctx is None:
self._ctx = self._backend.create_hash_ctx(self.algorithm)
else:
self._ctx = ctx
algorithm = utils.read_only_property("_algorithm")
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._ctx.update(data)
def copy(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return Hash(
self.algorithm, backend=self._backend, ctx=self._ctx.copy()
)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
digest = self._ctx.finalize()
self._ctx = None
return digest
@utils.register_interface(interfaces.HashAlgorithm)
class SHA1(object):
name = "sha1"
digest_size = 20
block_size = 64
@utils.register_interface(interfaces.HashAlgorithm)
class SHA224(object):
name = "sha224"
digest_size = 28
block_size = 64
@utils.register_interface(interfaces.HashAlgorithm)
class SHA256(object):
name = "sha256"
digest_size = 32
block_size = 64
@utils.register_interface(interfaces.HashAlgorithm)
class SHA384(object):
name = "sha384"
digest_size = 48
block_size = 128
@utils.register_interface(interfaces.HashAlgorithm)
class SHA512(object):
name = "sha512"
digest_size = 64
block_size = 128
@utils.register_interface(interfaces.HashAlgorithm)
class RIPEMD160(object):
name = "ripemd160"
digest_size = 20
block_size = 64
@utils.register_interface(interfaces.HashAlgorithm)
class Whirlpool(object):
name = "whirlpool"
digest_size = 64
block_size = 64
@utils.register_interface(interfaces.HashAlgorithm)
class MD5(object):
name = "md5"
digest_size = 16
block_size = 64
| CoderBotOrg/coderbotsrv | server/lib/cryptography/hazmat/primitives/hashes.py | Python | gpl-3.0 | 3,022 |
from .base import Browser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executorwebkit import WebKitDriverWdspecExecutor # noqa: F401
from ..webdriver_server import WebKitDriverServer
__wptrunner__ = {"product": "webkit",
"check_args": "check_args",
"browser": "WebKitBrowser",
"browser_kwargs": "browser_kwargs",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "WebKitDriverWdspecExecutor"},
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"timeout_multiplier": "get_timeout_multiplier"}
def check_args(**kwargs):
require_arg(kwargs, "binary")
require_arg(kwargs, "webdriver_binary")
require_arg(kwargs, "webkit_port")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def capabilities_for_port(server_config, **kwargs):
port_name = kwargs["webkit_port"]
if port_name in ["gtk", "wpe"]:
port_key_map = {"gtk": "webkitgtk"}
browser_options_port = port_key_map.get(port_name, port_name)
browser_options_key = "%s:browserOptions" % browser_options_port
return {
"browserName": "MiniBrowser",
"browserVersion": "2.20",
"platformName": "ANY",
browser_options_key: {
"binary": kwargs["binary"],
"args": kwargs.get("binary_args", []),
"certificates": [
{"host": server_config["browser_host"],
"certificateFile": kwargs["host_cert_path"]}]}}
return {}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = capabilities_for_port(server_config,
**kwargs)
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
def run_info_extras(**kwargs):
return {"webkit_port": kwargs["webkit_port"]}
class WebKitBrowser(Browser):
"""Generic WebKit browser is backed by WebKit's WebDriver implementation,
which is supplied through ``wptrunner.webdriver.WebKitDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary=None,
webdriver_args=None):
Browser.__init__(self, logger)
self.binary = binary
self.server = WebKitDriverServer(self.logger, binary=webdriver_binary,
args=webdriver_args)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
| larsbergstrom/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/webkit.py | Python | mpl-2.0 | 3,966 |
#!/usr/bin/env python
#coding:utf-8
import cyclone.auth
import cyclone.escape
import cyclone.web
import datetime
import time
import os
from beaker.cache import cache_managers
from toughradius.manage.base import BaseHandler
from toughlib.permit import permit
from toughradius.manage import models
from toughradius.manage.settings import *
from toughradius.common import tools
import psutil
@permit.route(r"/admin")
class HomeHandler(BaseHandler):
@cyclone.web.authenticated
def get(self):
# cpuuse = psutil.cpu_percent(interval=None, percpu=True)
# memuse = psutil.virtual_memory()
# online_count = self.db.query(models.TrOnline.id).count()
# user_total = self.db.query(models.TrAccount.account_number).filter_by(status=1).count()
# self.render("index.html",config=self.settings.config,
# cpuuse=cpuuse,memuse=memuse,online_count=online_count,user_total=user_total)
self.redirect("/admin/dashboard")
@permit.route(r"/")
class HomeHandler(BaseHandler):
@cyclone.web.authenticated
def get(self):
self.redirect("/admin/dashboard")
@permit.route(r"/about")
class HomeHandler(BaseHandler):
@cyclone.web.authenticated
def get(self):
self.render("about.html")
@permit.route(r"/toughcloud/service/register")
class ToughcloudRegisterHandler(BaseHandler):
def get_toughcloud_url(self):
if os.environ.get("TR_DEV"):
return 'http://127.0.0.1:9079/customer/license/request?sid=%s'%tools.get_sys_uuid()
else:
return 'https://www.toughcloud.net/customer/license/request?sid=%s'%tools.get_sys_uuid()
@cyclone.web.authenticated
def get(self):
self.redirect(self.get_toughcloud_url())
| sumonchai/tough | toughradius/manage/system/index.py | Python | agpl-3.0 | 1,739 |
# -*- coding: utf-8 -*-
# Copyright 2016 Acsone SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Account Invoice Check Total',
'summary': """
Check if the verification total is equal to the bill's total""",
'version': '10.0.1.0.0',
'license': 'AGPL-3',
'author': 'Acsone SA/NV,Odoo Community Association (OCA)',
'website': 'https://acsone.eu/',
'depends': [
'account',
],
'data': [
'views/account_config_settings.xml',
'security/account_invoice_security.xml',
'views/account_invoice.xml',
],
}
| sysadminmatmoz/account-invoicing | account_invoice_check_total/__manifest__.py | Python | agpl-3.0 | 607 |
# -*- coding: UTF-8 -*-
import logging
unicode_string = u"Татьяна"
utf8_string = "'Татьяна' is an invalid string value"
logging.warning(unicode_string)
logging.warning(utf8_string)
try:
raise Exception(utf8_string)
except Exception,e:
print "--- (Log a traceback of the exception):"
logging.exception(e)
print "--- Everything okay until here, but now we run into trouble:"
logging.warning(u"1 Deferred %s : %s",unicode_string,e)
logging.warning(u"2 Deferred %s : %s",unicode_string,utf8_string)
print "--- some workarounds:"
logging.warning(u"3 Deferred %s : %s",unicode_string,utf8_string.decode('UTF-8'))
from django.utils.encoding import force_unicode
logging.warning(u"4 Deferred %s : %s",unicode_string,force_unicode(utf8_string))
| lsaffre/blog | docs/blog/2011/0527.py | Python | agpl-3.0 | 806 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Samalyse SARL
# Copyright (C) 2010-2014 Parisson SARL
# This file is part of Telemeta.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Olivier Guilyardi <[email protected]>
# David LIPSZYC <[email protected]>
# Guillaume Pellerin <[email protected]>
from __future__ import division
from django.utils.translation import ugettext_lazy as _
from telemeta.models.core import *
from telemeta.models.resource import *
from telemeta.models.collection import *
class MediaCorpus(MediaBaseResource):
"Describe a corpus"
element_type = 'corpus'
children_type = 'collections'
children = models.ManyToManyField(MediaCollection, related_name="corpus",
verbose_name=_('collections'), blank=True)
recorded_from_year = IntegerField(_('recording year (from)'), help_text=_('YYYY'))
recorded_to_year = IntegerField(_('recording year (until)'), help_text=_('YYYY'))
objects = MediaCorpusManager()
permissions = (("can_download_corpus_epub", "Can download corpus EPUB"),)
@property
def public_id(self):
return self.code
@property
def has_mediafile(self):
for child in self.children.all():
if child.has_mediafile:
return True
return False
def computed_duration(self):
duration = Duration()
for child in self.children.all():
duration += child.computed_duration()
return duration
computed_duration.verbose_name = _('total available duration')
class Meta(MetaCore):
db_table = 'media_corpus'
verbose_name = _('corpus')
verbose_name_plural = _('corpus')
ordering = ['code']
class MediaCorpusRelated(MediaRelated):
"Corpus related media"
resource = ForeignKey(MediaCorpus, related_name="related", verbose_name=_('corpus'))
class Meta(MetaCore):
db_table = 'media_corpus_related'
verbose_name = _('corpus related media')
verbose_name_plural = _('corpus related media')
| ANR-kamoulox/Telemeta | telemeta/models/corpus.py | Python | agpl-3.0 | 2,752 |
#!/usr/bin/env python3
# Sifer Aseph
"""Prints a "hello world" statement."""
def main():
"""Utterly standard."""
print("Hello cruel world.")
if __name__ == "__main__":
main()
| Surufel/Personal | 2.pysifer/Older/hello_world.py | Python | agpl-3.0 | 189 |
"""
Django Views for service status app
"""
from __future__ import absolute_import
import json
import time
from celery.exceptions import TimeoutError
from django.http import HttpResponse
from djcelery import celery
from openedx.core.djangoapps.service_status.tasks import delayed_ping
def index(_):
"""
An empty view
"""
return HttpResponse()
def celery_status(_):
"""
A view that returns Celery stats
"""
stats = celery.control.inspect().stats() or {}
return HttpResponse(json.dumps(stats, indent=4),
content_type="application/json")
def celery_ping(_):
"""
A Simple view that checks if Celery can process a simple task
"""
start = time.time()
result = delayed_ping.apply_async(('ping', 0.1))
task_id = result.id
# Wait until we get the result
try:
value = result.get(timeout=4.0)
success = True
except TimeoutError:
value = None
success = False
output = {
'success': success,
'task_id': task_id,
'value': value,
'time': time.time() - start,
}
return HttpResponse(json.dumps(output, indent=4),
content_type="application/json")
| ESOedX/edx-platform | openedx/core/djangoapps/service_status/views.py | Python | agpl-3.0 | 1,236 |
"""
Useful utilities for management commands.
"""
from django.core.management.base import CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
def get_mutually_exclusive_required_option(options, *selections):
"""
Validates that exactly one of the 2 given options is specified.
Returns the name of the found option.
"""
selected = [sel for sel in selections if options.get(sel)]
if len(selected) != 1:
selection_string = ', '.join(f'--{selection}' for selection in selections)
raise CommandError(f'Must specify exactly one of {selection_string}')
return selected[0]
def validate_mutually_exclusive_option(options, option_1, option_2):
"""
Validates that both of the 2 given options are not specified.
"""
if options.get(option_1) and options.get(option_2):
raise CommandError(f'Both --{option_1} and --{option_2} cannot be specified.')
def validate_dependent_option(options, dependent_option, depending_on_option):
"""
Validates that option_1 is specified if dependent_option is specified.
"""
if options.get(dependent_option) and not options.get(depending_on_option):
raise CommandError(f'Option --{dependent_option} requires option --{depending_on_option}.')
def parse_course_keys(course_key_strings):
"""
Parses and returns a list of CourseKey objects from the given
list of course key strings.
"""
try:
return [CourseKey.from_string(course_key_string) for course_key_string in course_key_strings]
except InvalidKeyError as error:
raise CommandError('Invalid key specified: {}'.format(str(error))) # lint-amnesty, pylint: disable=raise-missing-from
| eduNEXT/edunext-platform | openedx/core/lib/command_utils.py | Python | agpl-3.0 | 1,739 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields, api
def format_code(code_seq):
code = map(int, str(code_seq))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) + str(code[6]) + str(code[7]),
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
code_form = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
code_form = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
code_form = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
code_form = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
code_form = code_str[14 - code_len:21]
return code_form
class clv_insured(models.Model):
_inherit = 'clv_insured'
code = fields.Char('Insured Code', size=64, select=1, required=False, readonly=False, default='/',
help='Use "/" to get an automatic new Insured Code.')
@api.model
def create(self, vals):
if not 'code' in vals or ('code' in vals and vals['code'] == '/'):
code_seq = self.pool.get('ir.sequence').get(self._cr, self._uid, 'clv_insured.code')
vals['code'] = format_code(code_seq)
return super(clv_insured, self).create(vals)
@api.multi
def write(self, vals):
if 'code' in vals and vals['code'] == '/':
code_seq = self.pool.get('ir.sequence').get(self._cr, self._uid, 'clv_insured.code')
vals['code'] = format_code(code_seq)
return super(clv_insured, self).write(vals)
@api.one
def copy(self, default=None):
default = dict(default or {})
default.update({'code': '/',})
return super(clv_insured, self).copy(default)
| CLVsol/odoo_addons | clv_insured/seq/clv_insured_seq.py | Python | agpl-3.0 | 3,732 |
#!/usr/bin/env python3
import os, sys, glob, pickle, subprocess
sys.path.insert(0, os.path.dirname(__file__))
from clang import cindex
sys.path = sys.path[1:]
def configure_libclang():
llvm_libdirs = ['/usr/lib/llvm-3.2/lib', '/usr/lib64/llvm']
try:
libdir = subprocess.check_output(['llvm-config', '--libdir']).decode('utf-8').strip()
llvm_libdirs.insert(0, libdir)
except OSError:
pass
for d in llvm_libdirs:
if not os.path.exists(d):
continue
files = glob.glob(os.path.join(d, 'libclang.so*'))
if len(files) != 0:
cindex.Config.set_library_file(files[0])
return
class Call:
def __init__(self, cursor, decl):
self.ident = cursor.displayname.decode('utf-8')
self.filename = cursor.location.file.name.decode('utf-8')
ex = cursor.extent
self.start_line = ex.start.line
self.start_column = ex.start.column
self.end_line = ex.end.line
self.end_column = ex.end.column
self.decl_filename = decl.location.file.name.decode('utf-8')
class Definition:
def __init__(self, cursor):
self.ident = cursor.spelling.decode('utf-8')
self.display = cursor.displayname.decode('utf-8')
self.filename = cursor.location.file.name.decode('utf-8')
ex = cursor.extent
self.start_line = ex.start.line
self.start_column = ex.start.column
self.end_line = ex.end.line
self.end_column = ex.end.column
def process_diagnostics(tu):
diagnostics = tu.diagnostics
haserr = False
for d in diagnostics:
sys.stderr.write('{0}\n'.format(d.format.decode('utf-8')))
if d.severity > cindex.Diagnostic.Warning:
haserr = True
if haserr:
sys.exit(1)
def walk_cursors(tu, files):
proc = list(tu.cursor.get_children())
while len(proc) > 0:
cursor = proc[0]
proc = proc[1:]
if cursor.location.file is None:
continue
fname = cursor.location.file.name.decode('utf-8')
if fname in files:
yield cursor
proc += list(cursor.get_children())
def newer(a, b):
try:
return os.stat(a).st_mtime > os.stat(b).st_mtime
except:
return True
def scan_libgit2_glib(cflags, files, git2dir):
files = [os.path.abspath(f) for f in files]
dname = os.path.dirname(__file__)
allcalls = {}
l = 0
if not os.getenv('SILENT'):
sys.stderr.write('\n')
i = 0
for f in files:
if not os.getenv('SILENT'):
name = os.path.basename(f)
if len(name) > l:
l = len(name)
perc = int((i / len(files)) * 100)
sys.stderr.write('[{0: >3}%] Processing ... {1}{2}\r'.format(perc, name, ' ' * (l - len(name))))
i += 1
astf = os.path.join(dname, '.' + os.path.basename(f) + '.cache')
if not newer(f, astf):
with open(astf, 'rb') as fo:
calls = pickle.load(fo)
else:
tu = cindex.TranslationUnit.from_source(f, cflags)
process_diagnostics(tu)
calls = {}
for cursor in walk_cursors(tu, files):
if cursor.kind == cindex.CursorKind.CALL_EXPR or \
cursor.kind == cindex.CursorKind.DECL_REF_EXPR:
cdecl = cursor.get_referenced()
if cdecl.kind != cindex.CursorKind.FUNCTION_DECL:
continue
if (not cdecl is None) and (not cdecl.location.file is None):
fdefname = cdecl.location.file.name.decode('utf-8')
if fdefname.startswith(git2dir):
call = Call(cursor, cdecl)
if call.ident in calls:
calls[call.ident].append(call)
else:
calls[call.ident] = [call]
with open(astf, 'wb') as fo:
pickle.dump(calls, fo)
for k in calls:
if k in allcalls:
allcalls[k] += calls[k]
else:
allcalls[k] = list(calls[k])
if not os.getenv('SILENT'):
sys.stderr.write('\r[100%] Processing ... done{0}\n'.format(' ' * (l - 4)))
return allcalls
def scan_libgit2(cflags, git2dir):
tu = cindex.TranslationUnit.from_source(git2dir + '.h', cflags)
process_diagnostics(tu)
headers = glob.glob(os.path.join(git2dir, '*.h'))
defs = {}
objapi = ['lookup', 'lookup_prefix', 'free', 'id', 'owner']
objderiv = ['commit', 'tree', 'tag', 'blob']
ignore = set()
for deriv in objderiv:
for api in objapi:
ignore.add('git_' + deriv + '_' + api)
for cursor in walk_cursors(tu, headers):
if cursor.kind == cindex.CursorKind.FUNCTION_DECL:
deff = Definition(cursor)
if not deff.ident in ignore:
defs[deff.ident] = deff
return defs
configure_libclang()
pos = sys.argv.index('--')
cflags = sys.argv[1:pos]
files = sys.argv[pos+1:]
incdir = os.getenv('LIBGIT2_INCLUDE_DIR')
defs = scan_libgit2(cflags, incdir)
calls = scan_libgit2_glib(cflags, files, incdir)
notused = {}
perfile = {}
nperfile = {}
for d in defs:
o = defs[d]
if not d in calls:
notused[d] = defs[d]
if not o.filename in nperfile:
nperfile[o.filename] = [o]
else:
nperfile[o.filename].append(o)
if not o.filename in perfile:
perfile[o.filename] = [o]
else:
perfile[o.filename].append(o)
ss = [notused[f] for f in notused]
ss.sort(key=lambda x: '{0} {1}'.format(os.path.basename(x.filename), x.ident))
lastf = None
keys = list(perfile.keys())
keys.sort()
for filename in keys:
b = os.path.basename(filename)
f = perfile[filename]
n_perfile = len(f)
if filename in nperfile:
n_nperfile = len(nperfile[filename])
else:
n_nperfile = 0
perc = int(((n_perfile - n_nperfile) / n_perfile) * 100)
print('\n File {0}, coverage {1}% ({2} out of {3}):'.format(b, perc, n_perfile - n_nperfile, n_perfile))
cp = list(f)
cp.sort(key=lambda x: "{0} {1}".format(not x.ident in calls, x.ident))
for d in cp:
if d.ident in calls:
print(' \033[32m✓ {0}\033[0m'.format(d.display))
else:
print(' \033[31m✗ {0}\033[0m'.format(d.display))
perc = int(((len(defs) - len(notused)) / len(defs)) * 100)
print('\nTotal coverage: {0}% ({1} functions out of {2} are being called)\n'.format(perc, len(defs) - len(notused), len(defs)))
# vi:ts=4:et
| chergert/libgit2-glib | tools/coverage.py | Python | lgpl-2.1 | 6,733 |
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2014 Yorik van Havre <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
from __future__ import print_function
import FreeCAD
import Path
import PathScripts.PathDressup as PathDressup
import PathScripts.PathGeom as PathGeom
import PathScripts.PathLog as PathLog
import PathScripts.PathUtil as PathUtil
import PathScripts.PathUtils as PathUtils
import math
from PySide import QtCore
# lazily loaded modules
from lazy_loader.lazy_loader import LazyLoader
DraftGeomUtils = LazyLoader('DraftGeomUtils', globals(), 'DraftGeomUtils')
Part = LazyLoader('Part', globals(), 'Part')
LOG_MODULE = PathLog.thisModule()
PathLog.setLevel(PathLog.Level.NOTICE, LOG_MODULE)
#PathLog.trackModule(LOG_MODULE)
# Qt translation handling
def translate(context, text, disambig=None):
return QtCore.QCoreApplication.translate(context, text, disambig)
movecommands = ['G0', 'G00', 'G1', 'G01', 'G2', 'G02', 'G3', 'G03']
movestraight = ['G1', 'G01']
movecw = ['G2', 'G02']
moveccw = ['G3', 'G03']
movearc = movecw + moveccw
def debugMarker(vector, label, color=None, radius=0.5):
if PathLog.getLevel(LOG_MODULE) == PathLog.Level.DEBUG:
obj = FreeCAD.ActiveDocument.addObject("Part::Sphere", label)
obj.Label = label
obj.Radius = radius
obj.Placement = FreeCAD.Placement(vector, FreeCAD.Rotation(FreeCAD.Vector(0, 0, 1), 0))
if color:
obj.ViewObject.ShapeColor = color
def debugCircle(vector, r, label, color=None):
if PathLog.getLevel(LOG_MODULE) == PathLog.Level.DEBUG:
obj = FreeCAD.ActiveDocument.addObject("Part::Cylinder", label)
obj.Label = label
obj.Radius = r
obj.Height = 1
obj.Placement = FreeCAD.Placement(vector, FreeCAD.Rotation(FreeCAD.Vector(0, 0, 1), 0))
obj.ViewObject.Transparency = 90
if color:
obj.ViewObject.ShapeColor = color
def addAngle(a1, a2):
a = a1 + a2
while a <= -math.pi:
a += 2*math.pi
while a > math.pi:
a -= 2*math.pi
return a
def anglesAreParallel(a1, a2):
an1 = addAngle(a1, 0)
an2 = addAngle(a2, 0)
if an1 == an2:
return True
if an1 == addAngle(an2, math.pi):
return True
return False
def getAngle(v):
a = v.getAngle(FreeCAD.Vector(1, 0, 0))
if v.y < 0:
return -a
return a
def pointFromCommand(cmd, pt, X='X', Y='Y', Z='Z'):
x = cmd.Parameters.get(X, pt.x)
y = cmd.Parameters.get(Y, pt.y)
z = cmd.Parameters.get(Z, pt.z)
return FreeCAD.Vector(x, y, z)
def edgesForCommands(cmds, startPt):
edges = []
lastPt = startPt
for cmd in cmds:
if cmd.Name in movecommands:
pt = pointFromCommand(cmd, lastPt)
if cmd.Name in movestraight:
edges.append(Part.Edge(Part.LineSegment(lastPt, pt)))
elif cmd.Name in movearc:
center = lastPt + pointFromCommand(cmd, FreeCAD.Vector(0, 0, 0), 'I', 'J', 'K')
A = lastPt - center
B = pt - center
d = -B.x * A.y + B.y * A.x
if d == 0:
# we're dealing with half a circle here
angle = getAngle(A) + math.pi/2
if cmd.Name in movecw:
angle -= math.pi
else:
C = A + B
angle = getAngle(C)
R = (lastPt - center).Length
ptm = center + FreeCAD.Vector(math.cos(angle), math.sin(angle), 0) * R
edges.append(Part.Edge(Part.Arc(lastPt, ptm, pt)))
lastPt = pt
return edges
class Style(object):
# pylint: disable=no-init
Dogbone = 'Dogbone'
Tbone_H = 'T-bone horizontal'
Tbone_V = 'T-bone vertical'
Tbone_L = 'T-bone long edge'
Tbone_S = 'T-bone short edge'
All = [Dogbone, Tbone_H, Tbone_V, Tbone_L, Tbone_S]
class Side(object):
# pylint: disable=no-init
Left = 'Left'
Right = 'Right'
All = [Left, Right]
@classmethod
def oppositeOf(cls, side):
if side == cls.Left:
return cls.Right
if side == cls.Right:
return cls.Left
return None
class Incision(object):
# pylint: disable=no-init
Fixed = 'fixed'
Adaptive = 'adaptive'
Custom = 'custom'
All = [Adaptive, Fixed, Custom]
class Smooth(object):
# pylint: disable=no-init
Neither = 0
In = 1
Out = 2
InAndOut = In | Out
# Chord
# A class to represent the start and end point of a path command. If the underlying
# Command is a rotate command the receiver does represent a chord in the geometric
# sense of the word. If the underlying command is a straight move then the receiver
# represents the actual move.
# This implementation really only deals with paths in the XY plane. Z is assumed to
# be constant in all calculated results.
# Instances of Chord are generally considered immutable and all movement member
# functions return new instances.
class Chord (object):
def __init__(self, start=None, end=None):
if not start:
start = FreeCAD.Vector()
if not end:
end = FreeCAD.Vector()
self.Start = start
self.End = end
def __str__(self):
return "Chord([%g, %g, %g] -> [%g, %g, %g])" % (self.Start.x, self.Start.y, self.Start.z, self.End.x, self.End.y, self.End.z)
def moveTo(self, newEnd):
return Chord(self.End, newEnd)
def moveToParameters(self, params):
x = params.get('X', self.End.x)
y = params.get('Y', self.End.y)
z = params.get('Z', self.End.z)
return self.moveTo(FreeCAD.Vector(x, y, z))
def moveBy(self, x, y, z):
return self.moveTo(self.End + FreeCAD.Vector(x, y, z))
def move(self, distance, angle):
dx = distance * math.cos(angle)
dy = distance * math.sin(angle)
return self.moveBy(dx, dy, 0)
def asVector(self):
return self.End - self.Start
def asDirection(self):
return self.asVector().normalize()
def asLine(self):
return Part.LineSegment(self.Start, self.End)
def asEdge(self):
return Part.Edge(self.asLine())
def getLength(self):
return self.asVector().Length
def getDirectionOfVector(self, B):
A = self.asDirection()
# if the 2 vectors are identical, they head in the same direction
PathLog.debug(" {}.getDirectionOfVector({})".format(A, B))
if PathGeom.pointsCoincide(A, B):
return 'Straight'
d = -A.x*B.y + A.y*B.x
if d < 0:
return Side.Left
if d > 0:
return Side.Right
# at this point the only direction left is backwards
return 'Back'
def getDirectionOf(self, chordOrVector):
if type(chordOrVector) is Chord:
return self.getDirectionOfVector(chordOrVector.asDirection())
return self.getDirectionOfVector(chordOrVector.normalize())
def getAngleOfVector(self, ref):
angle = self.asVector().getAngle(ref)
# unfortunately they never figure out the sign :(
# positive angles go up, so when the reference vector is left
# then the receiver must go down
if self.getDirectionOfVector(ref) == Side.Left:
return -angle
return angle
def getAngle(self, refChordOrVector):
if type(refChordOrVector) is Chord:
return self.getAngleOfVector(refChordOrVector.asDirection())
return self.getAngleOfVector(refChordOrVector.normalize())
def getAngleXY(self):
return self.getAngle(FreeCAD.Vector(1, 0, 0))
def commandParams(self, f):
params = {"X": self.End.x, "Y": self.End.y, "Z": self.End.z}
if f:
params['F'] = f
return params
def g1Command(self, f):
return Path.Command("G1", self.commandParams(f))
def arcCommand(self, cmd, center, f):
params = self.commandParams(f)
d = center - self.Start
params['I'] = d.x
params['J'] = d.y
params['K'] = 0
return Path.Command(cmd, params)
def g2Command(self, center, f):
return self.arcCommand("G2", center, f)
def g3Command(self, center, f):
return self.arcCommand("G3", center, f)
def isAPlungeMove(self):
return not PathGeom.isRoughly(self.End.z, self.Start.z)
def isANoopMove(self):
PathLog.debug("{}.isANoopMove(): {}".format(self, PathGeom.pointsCoincide(self.Start, self.End)))
return PathGeom.pointsCoincide(self.Start, self.End)
def foldsBackOrTurns(self, chord, side):
direction = chord.getDirectionOf(self)
PathLog.info(" - direction = %s/%s" % (direction, side))
return direction == 'Back' or direction == side
def connectsTo(self, chord):
return PathGeom.pointsCoincide(self.End, chord.Start)
class Bone(object):
def __init__(self, boneId, obj, lastCommand, inChord, outChord, smooth, F):
self.obj = obj
self.boneId = boneId
self.lastCommand = lastCommand
self.inChord = inChord
self.outChord = outChord
self.smooth = smooth
self.smooth = Smooth.Neither
self.F = F
# initialized later
self.cDist = None
self.cAngle = None
self.tAngle = None
self.cPt = None
def angle(self):
if self.cAngle is None:
baseAngle = self.inChord.getAngleXY()
turnAngle = self.outChord.getAngle(self.inChord)
theta = addAngle(baseAngle, (turnAngle - math.pi)/2)
if self.obj.Side == Side.Left:
theta = addAngle(theta, math.pi)
self.tAngle = turnAngle
self.cAngle = theta
return self.cAngle
def distance(self, toolRadius):
if self.cDist is None:
self.angle() # make sure the angles are initialized
self.cDist = toolRadius / math.cos(self.tAngle/2)
return self.cDist
def corner(self, toolRadius):
if self.cPt is None:
self.cPt = self.inChord.move(self.distance(toolRadius), self.angle()).End
return self.cPt
def location(self):
return (self.inChord.End.x, self.inChord.End.y)
def locationZ(self):
return (self.inChord.End.x, self.inChord.End.y, self.inChord.End.z)
def adaptiveLength(self, boneAngle, toolRadius):
theta = self.angle()
distance = self.distance(toolRadius)
# there is something weird happening if the boneAngle came from a horizontal/vertical t-bone
# for some reason pi/2 is not equal to pi/2
if math.fabs(theta - boneAngle) < 0.00001:
# moving directly towards the corner
PathLog.debug("adaptive - on target: %.2f - %.2f" % (distance, toolRadius))
return distance - toolRadius
PathLog.debug("adaptive - angles: corner=%.2f bone=%.2f diff=%.12f" % (theta/math.pi, boneAngle/math.pi, theta - boneAngle))
# The bones root and end point form a triangle with the intersection of the tool path
# with the toolRadius circle around the bone end point.
# In case the math looks questionable, look for "triangle ssa"
# c = distance
# b = self.toolRadius
# beta = fabs(boneAngle - theta)
beta = math.fabs(addAngle(boneAngle, -theta)) # pylint: disable=invalid-unary-operand-type
D = (distance / toolRadius) * math.sin(beta)
if D > 1: # no intersection
PathLog.debug("adaptive - no intersection - no bone")
return 0
gamma = math.asin(D)
alpha = math.pi - beta - gamma
if PathGeom.isRoughly(0.0, math.sin(beta)):
# it is not a good idea to divide by 0
length = 0.0
else:
length = toolRadius * math.sin(alpha) / math.sin(beta)
if D < 1 and toolRadius < distance: # there exists a second solution
beta2 = beta
gamma2 = math.pi - gamma
alpha2 = math.pi - beta2 - gamma2
length2 = toolRadius * math.sin(alpha2) / math.sin(beta2)
length = min(length, length2)
PathLog.debug("adaptive corner=%.2f * %.2f˚ -> bone=%.2f * %.2f˚" % (distance, theta, length, boneAngle))
return length
class ObjectDressup(object):
def __init__(self, obj, base):
# Tool Properties
obj.addProperty("App::PropertyLink", "Base", "Base", QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "The base path to modify"))
obj.addProperty("App::PropertyEnumeration", "Side", "Dressup", QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "The side of path to insert bones"))
obj.Side = [Side.Left, Side.Right]
obj.Side = Side.Right
obj.addProperty("App::PropertyEnumeration", "Style", "Dressup", QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "The style of bones"))
obj.Style = Style.All
obj.Style = Style.Dogbone
obj.addProperty("App::PropertyIntegerList", "BoneBlacklist", "Dressup", QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "Bones that aren't dressed up"))
obj.BoneBlacklist = []
obj.setEditorMode('BoneBlacklist', 2) # hide this one
obj.addProperty("App::PropertyEnumeration", "Incision", "Dressup", QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "The algorithm to determine the bone length"))
obj.Incision = Incision.All
obj.Incision = Incision.Adaptive
obj.addProperty("App::PropertyFloat", "Custom", "Dressup", QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "Dressup length if Incision == custom"))
obj.Custom = 0.0
obj.Proxy = self
obj.Base = base
# initialized later
self.boneShapes = None
self.toolRadius = 0
self.dbg = None
self.locationBlacklist = None
self.shapes = None
self.boneId = None
self.bones = None
def onDocumentRestored(self, obj):
obj.setEditorMode('BoneBlacklist', 2) # hide this one
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def theOtherSideOf(self, side):
if side == Side.Left:
return Side.Right
return Side.Left
# Answer true if a dogbone could be on either end of the chord, given its command
def canAttachDogbone(self, cmd, chord):
return cmd.Name in movestraight and not chord.isAPlungeMove() and not chord.isANoopMove()
def shouldInsertDogbone(self, obj, inChord, outChord):
return outChord.foldsBackOrTurns(inChord, self.theOtherSideOf(obj.Side))
def findPivotIntersection(self, pivot, pivotEdge, edge, refPt, d, color):
# pylint: disable=unused-argument
PathLog.track("(%.2f, %.2f)^%.2f - [(%.2f, %.2f), (%.2f, %.2f)]" % (pivotEdge.Curve.Center.x, pivotEdge.Curve.Center.y, pivotEdge.Curve.Radius, edge.Vertexes[0].Point.x, edge.Vertexes[0].Point.y, edge.Vertexes[1].Point.x, edge.Vertexes[1].Point.y))
ppt = None
pptDistance = 0
for pt in DraftGeomUtils.findIntersection(edge, pivotEdge, dts=False):
# debugMarker(pt, "pti.%d-%s.in" % (self.boneId, d), color, 0.2)
distance = (pt - refPt).Length
PathLog.debug(" --> (%.2f, %.2f): %.2f" % (pt.x, pt.y, distance))
if not ppt or pptDistance < distance:
ppt = pt
pptDistance = distance
if not ppt:
tangent = DraftGeomUtils.findDistance(pivot, edge)
if tangent:
PathLog.debug("Taking tangent as intersect %s" % tangent)
ppt = pivot + tangent
else:
PathLog.debug("Taking chord start as intersect %s" % edge.Vertexes[0].Point)
ppt = edge.Vertexes[0].Point
# debugMarker(ppt, "ptt.%d-%s.in" % (self.boneId, d), color, 0.2)
PathLog.debug(" --> (%.2f, %.2f)" % (ppt.x, ppt.y))
return ppt
def pointIsOnEdge(self, point, edge):
param = edge.Curve.parameter(point)
return edge.FirstParameter <= param <= edge.LastParameter
def smoothChordCommands(self, bone, inChord, outChord, edge, wire, corner, smooth, color=None):
if smooth == 0:
PathLog.info(" No smoothing requested")
return [bone.lastCommand, outChord.g1Command(bone.F)]
d = 'in'
refPoint = inChord.Start
if smooth == Smooth.Out:
d = 'out'
refPoint = outChord.End
if DraftGeomUtils.areColinear(inChord.asEdge(), outChord.asEdge()):
PathLog.info(" straight edge %s" % d)
return [outChord.g1Command(bone.F)]
pivot = None
pivotDistance = 0
PathLog.info("smooth: (%.2f, %.2f)-(%.2f, %.2f)" % (edge.Vertexes[0].Point.x, edge.Vertexes[0].Point.y, edge.Vertexes[1].Point.x, edge.Vertexes[1].Point.y))
for e in wire.Edges:
self.dbg.append(e)
if type(e.Curve) == Part.LineSegment or type(e.Curve) == Part.Line:
PathLog.debug(" (%.2f, %.2f)-(%.2f, %.2f)" % (e.Vertexes[0].Point.x, e.Vertexes[0].Point.y, e.Vertexes[1].Point.x, e.Vertexes[1].Point.y))
else:
PathLog.debug(" (%.2f, %.2f)^%.2f" % (e.Curve.Center.x, e.Curve.Center.y, e.Curve.Radius))
for pt in DraftGeomUtils.findIntersection(edge, e, True, findAll=True):
if not PathGeom.pointsCoincide(pt, corner) and self.pointIsOnEdge(pt, e):
# debugMarker(pt, "candidate-%d-%s" % (self.boneId, d), color, 0.05)
PathLog.debug(" -> candidate")
distance = (pt - refPoint).Length
if not pivot or pivotDistance > distance:
pivot = pt
pivotDistance = distance
else:
PathLog.debug(" -> corner intersect")
if pivot:
# debugCircle(pivot, self.toolRadius, "pivot.%d-%s" % (self.boneId, d), color)
pivotEdge = Part.Edge(Part.Circle(pivot, FreeCAD.Vector(0, 0, 1), self.toolRadius))
t1 = self.findPivotIntersection(pivot, pivotEdge, inChord.asEdge(), inChord.End, d, color)
t2 = self.findPivotIntersection(pivot, pivotEdge, outChord.asEdge(), inChord.End, d, color)
commands = []
if not PathGeom.pointsCoincide(t1, inChord.Start):
PathLog.debug(" add lead in")
commands.append(Chord(inChord.Start, t1).g1Command(bone.F))
if bone.obj.Side == Side.Left:
PathLog.debug(" add g3 command")
commands.append(Chord(t1, t2).g3Command(pivot, bone.F))
else:
PathLog.debug(" add g2 command center=(%.2f, %.2f) -> from (%2f, %.2f) to (%.2f, %.2f" % (pivot.x, pivot.y, t1.x, t1.y, t2.x, t2.y))
commands.append(Chord(t1, t2).g2Command(pivot, bone.F))
if not PathGeom.pointsCoincide(t2, outChord.End):
PathLog.debug(" add lead out")
commands.append(Chord(t2, outChord.End).g1Command(bone.F))
# debugMarker(pivot, "pivot.%d-%s" % (self.boneId, d), color, 0.2)
# debugMarker(t1, "pivot.%d-%s.in" % (self.boneId, d), color, 0.1)
# debugMarker(t2, "pivot.%d-%s.out" % (self.boneId, d), color, 0.1)
return commands
PathLog.info(" no pivot found - straight command")
return [inChord.g1Command(bone.F), outChord.g1Command(bone.F)]
def inOutBoneCommands(self, bone, boneAngle, fixedLength):
corner = bone.corner(self.toolRadius)
bone.tip = bone.inChord.End # in case there is no bone
PathLog.debug("corner = (%.2f, %.2f)" % (corner.x, corner.y))
# debugMarker(corner, 'corner', (1., 0., 1.), self.toolRadius)
length = fixedLength
if bone.obj.Incision == Incision.Custom:
length = bone.obj.Custom
if bone.obj.Incision == Incision.Adaptive:
length = bone.adaptiveLength(boneAngle, self.toolRadius)
if length == 0:
PathLog.info("no bone after all ..")
return [bone.lastCommand, bone.outChord.g1Command(bone.F)]
# track length for marker visuals
self.length = max(self.length, length)
boneInChord = bone.inChord.move(length, boneAngle)
boneOutChord = boneInChord.moveTo(bone.outChord.Start)
# debugCircle(boneInChord.Start, self.toolRadius, 'boneStart')
# debugCircle(boneInChord.End, self.toolRadius, 'boneEnd')
bone.tip = boneInChord.End
if bone.smooth == 0:
return [bone.lastCommand, boneInChord.g1Command(bone.F), boneOutChord.g1Command(bone.F), bone.outChord.g1Command(bone.F)]
# reconstruct the corner and convert to an edge
offset = corner - bone.inChord.End
iChord = Chord(bone.inChord.Start + offset, bone.inChord.End + offset)
oChord = Chord(bone.outChord.Start + offset, bone.outChord.End + offset)
iLine = iChord.asLine()
oLine = oChord.asLine()
cornerShape = Part.Shape([iLine, oLine])
# construct a shape representing the cut made by the bone
vt0 = FreeCAD.Vector(0, self.toolRadius, 0)
vt1 = FreeCAD.Vector(length, self.toolRadius, 0)
vb0 = FreeCAD.Vector(0, -self.toolRadius, 0)
vb1 = FreeCAD.Vector(length, -self.toolRadius, 0)
vm2 = FreeCAD.Vector(length + self.toolRadius, 0, 0)
boneBot = Part.LineSegment(vb1, vb0)
boneLid = Part.LineSegment(vb0, vt0)
boneTop = Part.LineSegment(vt0, vt1)
# what we actually want is an Arc - but findIntersect only returns the coincident if one exists
# which really sucks because that's the one we're probably not interested in ....
boneArc = Part.Arc(vt1, vm2, vb1)
# boneArc = Part.Circle(FreeCAD.Vector(length, 0, 0), FreeCAD.Vector(0,0,1), self.toolRadius)
boneWire = Part.Shape([boneTop, boneArc, boneBot, boneLid])
boneWire.rotate(FreeCAD.Vector(0, 0, 0), FreeCAD.Vector(0, 0, 1), boneAngle * 180 / math.pi)
boneWire.translate(bone.inChord.End)
self.boneShapes = [cornerShape, boneWire]
bone.inCommands = self.smoothChordCommands(bone, bone.inChord, boneInChord, Part.Edge(iLine), boneWire, corner, bone.smooth & Smooth.In, (1., 0., 0.))
bone.outCommands = self.smoothChordCommands(bone, boneOutChord, bone.outChord, Part.Edge(oLine), boneWire, corner, bone.smooth & Smooth.Out, (0., 1., 0.))
return bone.inCommands + bone.outCommands
def dogbone(self, bone):
boneAngle = bone.angle()
length = self.toolRadius * 0.41422 # 0.41422 = 2/sqrt(2) - 1 + (a tiny bit)
return self.inOutBoneCommands(bone, boneAngle, length)
def tboneHorizontal(self, bone):
angle = bone.angle()
boneAngle = 0
if math.fabs(angle) > math.pi/2:
boneAngle = math.pi
return self.inOutBoneCommands(bone, boneAngle, self.toolRadius)
def tboneVertical(self, bone):
angle = bone.angle()
boneAngle = math.pi/2
if PathGeom.isRoughly(angle, math.pi) or angle < 0:
boneAngle = -boneAngle
return self.inOutBoneCommands(bone, boneAngle, self.toolRadius)
def tboneEdgeCommands(self, bone, onIn):
if onIn:
boneAngle = bone.inChord.getAngleXY()
else:
boneAngle = bone.outChord.getAngleXY()
if Side.Right == bone.outChord.getDirectionOf(bone.inChord):
boneAngle = boneAngle - math.pi/2
else:
boneAngle = boneAngle + math.pi/2
onInString = 'out'
if onIn:
onInString = 'in'
PathLog.debug("tboneEdge boneAngle[%s]=%.2f (in=%.2f, out=%.2f)" % (onInString, boneAngle/math.pi, bone.inChord.getAngleXY()/math.pi, bone.outChord.getAngleXY()/math.pi))
return self.inOutBoneCommands(bone, boneAngle, self.toolRadius)
def tboneLongEdge(self, bone):
inChordIsLonger = bone.inChord.getLength() > bone.outChord.getLength()
return self.tboneEdgeCommands(bone, inChordIsLonger)
def tboneShortEdge(self, bone):
inChordIsShorter = bone.inChord.getLength() < bone.outChord.getLength()
return self.tboneEdgeCommands(bone, inChordIsShorter)
def boneIsBlacklisted(self, bone):
blacklisted = False
parentConsumed = False
if bone.boneId in bone.obj.BoneBlacklist:
blacklisted = True
elif bone.location() in self.locationBlacklist:
bone.obj.BoneBlacklist.append(bone.boneId)
blacklisted = True
elif hasattr(bone.obj.Base, 'BoneBlacklist'):
parentConsumed = bone.boneId not in bone.obj.Base.BoneBlacklist
blacklisted = parentConsumed
if blacklisted:
self.locationBlacklist.add(bone.location())
return (blacklisted, parentConsumed)
# Generate commands necessary to execute the dogbone
def boneCommands(self, bone, enabled):
if enabled:
if bone.obj.Style == Style.Dogbone:
return self.dogbone(bone)
if bone.obj.Style == Style.Tbone_H:
return self.tboneHorizontal(bone)
if bone.obj.Style == Style.Tbone_V:
return self.tboneVertical(bone)
if bone.obj.Style == Style.Tbone_L:
return self.tboneLongEdge(bone)
if bone.obj.Style == Style.Tbone_S:
return self.tboneShortEdge(bone)
else:
return [bone.lastCommand, bone.outChord.g1Command(bone.F)]
def insertBone(self, bone):
PathLog.debug(">----------------------------------- %d --------------------------------------" % bone.boneId)
self.boneShapes = []
blacklisted, inaccessible = self.boneIsBlacklisted(bone)
enabled = not blacklisted
self.bones.append((bone.boneId, bone.locationZ(), enabled, inaccessible))
self.boneId = bone.boneId
if False and PathLog.getLevel(LOG_MODULE) == PathLog.Level.DEBUG and bone.boneId > 2:
commands = self.boneCommands(bone, False)
else:
commands = self.boneCommands(bone, enabled)
bone.commands = commands
self.shapes[bone.boneId] = self.boneShapes
PathLog.debug("<----------------------------------- %d --------------------------------------" % bone.boneId)
return commands
def removePathCrossing(self, commands, bone1, bone2):
commands.append(bone2.lastCommand)
bones = bone2.commands
if True and hasattr(bone1, "outCommands") and hasattr(bone2, "inCommands"):
inEdges = edgesForCommands(bone1.outCommands, bone1.tip)
outEdges = edgesForCommands(bone2.inCommands, bone2.inChord.Start)
for i in range(len(inEdges)):
e1 = inEdges[i]
for j in range(len(outEdges)-1, -1, -1):
e2 = outEdges[j]
cutoff = DraftGeomUtils.findIntersection(e1, e2)
for pt in cutoff:
# debugCircle(e1.Curve.Center, e1.Curve.Radius, "bone.%d-1" % (self.boneId), (1.,0.,0.))
# debugCircle(e2.Curve.Center, e2.Curve.Radius, "bone.%d-2" % (self.boneId), (0.,1.,0.))
if PathGeom.pointsCoincide(pt, e1.valueAt(e1.LastParameter)) or PathGeom.pointsCoincide(pt, e2.valueAt(e2.FirstParameter)):
continue
# debugMarker(pt, "it", (0.0, 1.0, 1.0))
# 1. remove all redundant commands
commands = commands[:-(len(inEdges) - i)]
# 2., correct where c1 ends
c1 = bone1.outCommands[i]
c1Params = c1.Parameters
c1Params.update({'X': pt.x, 'Y': pt.y, 'Z': pt.z})
c1 = Path.Command(c1.Name, c1Params)
commands.append(c1)
# 3. change where c2 starts, this depends on the command itself
c2 = bone2.inCommands[j]
if c2.Name in movearc:
center = e2.Curve.Center
offset = center - pt
c2Params = c2.Parameters
c2Params.update({'I': offset.x, 'J': offset.y, 'K': offset.z})
c2 = Path.Command(c2.Name, c2Params)
bones = [c2]
bones.extend(bone2.commands[j+1:])
else:
bones = bone2.commands[j:]
# there can only be the one ...
return commands, bones
return commands, bones
def execute(self, obj, forReal=True):
if not obj.Base:
return
if forReal and not obj.Base.isDerivedFrom("Path::Feature"):
return
if not obj.Base.Path:
return
if not obj.Base.Path.Commands:
return
self.setup(obj, False)
commands = [] # the dressed commands
lastChord = Chord() # the last chord
lastCommand = None # the command that generated the last chord
lastBone = None # track last bone for optimizations
oddsAndEnds = [] # track chords that are connected to plunges - in case they form a loop
boneId = 1
self.bones = []
self.locationBlacklist = set()
self.length = 0
# boneIserted = False
for (i, thisCommand) in enumerate(obj.Base.Path.Commands):
# if i > 14:
# if lastCommand:
# commands.append(lastCommand)
# lastCommand = None
# commands.append(thisCommand)
# continue
PathLog.info("%3d: %s" % (i, thisCommand))
if thisCommand.Name in movecommands:
thisChord = lastChord.moveToParameters(thisCommand.Parameters)
thisIsACandidate = self.canAttachDogbone(thisCommand, thisChord)
if thisIsACandidate and lastCommand and self.shouldInsertDogbone(obj, lastChord, thisChord):
PathLog.info(" Found bone corner: {}".format(lastChord.End))
bone = Bone(boneId, obj, lastCommand, lastChord, thisChord, Smooth.InAndOut, thisCommand.Parameters.get('F'))
bones = self.insertBone(bone)
boneId += 1
if lastBone:
PathLog.info(" removing potential path crossing")
# debugMarker(thisChord.Start, "it", (1.0, 0.0, 1.0))
commands, bones = self.removePathCrossing(commands, lastBone, bone)
commands.extend(bones[:-1])
lastCommand = bones[-1]
lastBone = bone
elif lastCommand and thisChord.isAPlungeMove():
PathLog.info(" Looking for connection in odds and ends")
haveNewLastCommand = False
for chord in (chord for chord in oddsAndEnds if lastChord.connectsTo(chord)):
if self.shouldInsertDogbone(obj, lastChord, chord):
PathLog.info(" and there is one")
PathLog.debug(" odd/end={} last={}".format(chord, lastChord))
bone = Bone(boneId, obj, lastCommand, lastChord, chord, Smooth.In, lastCommand.Parameters.get('F'))
bones = self.insertBone(bone)
boneId += 1
if lastBone:
PathLog.info(" removing potential path crossing")
# debugMarker(chord.Start, "it", (0.0, 1.0, 1.0))
commands, bones = self.removePathCrossing(commands, lastBone, bone)
commands.extend(bones[:-1])
lastCommand = bones[-1]
haveNewLastCommand = True
if not haveNewLastCommand:
commands.append(lastCommand)
lastCommand = None
commands.append(thisCommand)
lastBone = None
elif thisIsACandidate:
PathLog.info(" is a candidate, keeping for later")
if lastCommand:
commands.append(lastCommand)
lastCommand = thisCommand
lastBone = None
elif thisChord.isANoopMove():
PathLog.info(" ignoring and dropping noop move")
continue
else:
PathLog.info(" nope")
if lastCommand:
commands.append(lastCommand)
lastCommand = None
commands.append(thisCommand)
lastBone = None
if lastChord.isAPlungeMove() and thisIsACandidate:
PathLog.info(" adding to odds and ends")
oddsAndEnds.append(thisChord)
lastChord = thisChord
else:
if thisCommand.Name[0] != '(':
PathLog.info(" Clean slate")
if lastCommand:
commands.append(lastCommand)
lastCommand = None
lastBone = None
commands.append(thisCommand)
# for cmd in commands:
# PathLog.debug("cmd = '%s'" % cmd)
path = Path.Path(commands)
obj.Path = path
def setup(self, obj, initial):
PathLog.info("Here we go ... ")
if initial:
if hasattr(obj.Base, "BoneBlacklist"):
# dressing up a bone dressup
obj.Side = obj.Base.Side
else:
PathLog.info("Default side = right")
# otherwise dogbones are opposite of the base path's side
side = Side.Right
if hasattr(obj.Base, 'Side') and obj.Base.Side == 'Inside':
PathLog.info("inside -> side = left")
side = Side.Left
else:
PathLog.info("not inside -> side stays right")
if hasattr(obj.Base, 'Direction') and obj.Base.Direction == 'CCW':
PathLog.info("CCW -> switch sides")
side = Side.oppositeOf(side)
else:
PathLog.info("CW -> stay on side")
obj.Side = side
self.toolRadius = 5
tc = PathDressup.toolController(obj.Base)
if tc is None or tc.ToolNumber == 0:
self.toolRadius = 5
else:
tool = tc.Proxy.getTool(tc) # PathUtils.getTool(obj, tc.ToolNumber)
if not tool or float(tool.Diameter) == 0:
self.toolRadius = 5
else:
self.toolRadius = float(tool.Diameter) / 2
self.shapes = {}
self.dbg = []
def boneStateList(self, obj):
state = {}
# If the receiver was loaded from file, then it never generated the bone list.
if not hasattr(self, 'bones'):
self.execute(obj)
for (nr, loc, enabled, inaccessible) in self.bones:
item = state.get((loc[0], loc[1]))
if item:
item[2].append(nr)
item[3].append(loc[2])
else:
state[(loc[0], loc[1])] = (enabled, inaccessible, [nr], [loc[2]])
return state
class Marker(object):
def __init__(self, pt, r, h):
if PathGeom.isRoughly(h, 0):
h = 0.1
self.pt = pt
self.r = r
self.h = h
self.sep = coin.SoSeparator()
self.pos = coin.SoTranslation()
self.pos.translation = (pt.x, pt.y, pt.z + h / 2)
self.rot = coin.SoRotationXYZ()
self.rot.axis = self.rot.X
self.rot.angle = math.pi / 2
self.cyl = coin.SoCylinder()
self.cyl.radius = r
self.cyl.height = h
# self.cyl.removePart(self.cyl.TOP)
# self.cyl.removePart(self.cyl.BOTTOM)
self.material = coin.SoMaterial()
self.sep.addChild(self.pos)
self.sep.addChild(self.rot)
self.sep.addChild(self.material)
self.sep.addChild(self.cyl)
self.lowlight()
def setSelected(self, selected):
if selected:
self.highlight()
else:
self.lowlight()
def highlight(self):
self.material.diffuseColor = self.color(1)
self.material.transparency = 0.45
def lowlight(self):
self.material.diffuseColor = self.color(0)
self.material.transparency = 0.75
def color(self, id):
if id == 1:
return coin.SbColor(.9, .9, .5)
return coin.SbColor(.9, .5, .9)
class TaskPanel(object):
DataIds = QtCore.Qt.ItemDataRole.UserRole
DataKey = QtCore.Qt.ItemDataRole.UserRole + 1
DataLoc = QtCore.Qt.ItemDataRole.UserRole + 2
def __init__(self, viewProvider, obj):
self.viewProvider = viewProvider
self.obj = obj
self.form = FreeCADGui.PySideUic.loadUi(":/panels/DogboneEdit.ui")
self.s = None
FreeCAD.ActiveDocument.openTransaction(translate("Path_DressupDogbone", "Edit Dogbone Dress-up"))
self.height = 10
self.markers = []
def reject(self):
FreeCAD.ActiveDocument.abortTransaction()
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
self.cleanup()
def accept(self):
self.getFields()
FreeCAD.ActiveDocument.commitTransaction()
FreeCADGui.ActiveDocument.resetEdit()
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
FreeCAD.ActiveDocument.recompute()
self.cleanup()
def cleanup(self):
self.viewProvider.showMarkers(False)
for m in self.markers:
self.viewProvider.switch.removeChild(m.sep)
self.markers = []
def getFields(self):
self.obj.Style = str(self.form.styleCombo.currentText())
self.obj.Side = str(self.form.sideCombo.currentText())
self.obj.Incision = str(self.form.incisionCombo.currentText())
self.obj.Custom = self.form.custom.value()
blacklist = []
for i in range(0, self.form.bones.count()):
item = self.form.bones.item(i)
if item.checkState() == QtCore.Qt.CheckState.Unchecked:
blacklist.extend(item.data(self.DataIds))
self.obj.BoneBlacklist = sorted(blacklist)
self.obj.Proxy.execute(self.obj)
def updateBoneList(self):
itemList = []
for loc, (enabled, inaccessible, ids, zs) in PathUtil.keyValueIter(self.obj.Proxy.boneStateList(self.obj)):
lbl = '(%.2f, %.2f): %s' % (loc[0], loc[1], ','.join(str(id) for id in ids))
item = QtGui.QListWidgetItem(lbl)
if enabled:
item.setCheckState(QtCore.Qt.CheckState.Checked)
else:
item.setCheckState(QtCore.Qt.CheckState.Unchecked)
flags = QtCore.Qt.ItemFlag.ItemIsSelectable
if not inaccessible:
flags |= QtCore.Qt.ItemFlag.ItemIsEnabled | QtCore.Qt.ItemFlag.ItemIsUserCheckable
item.setFlags(flags)
item.setData(self.DataIds, ids)
item.setData(self.DataKey, ids[0])
item.setData(self.DataLoc, loc)
itemList.append(item)
self.form.bones.clear()
markers = []
for item in sorted(itemList, key=lambda item: item.data(self.DataKey)):
self.form.bones.addItem(item)
loc = item.data(self.DataLoc)
r = max(self.obj.Proxy.length, 1)
markers.append(Marker(FreeCAD.Vector(loc[0], loc[1], min(zs)), r, max(1, max(zs) - min(zs))))
for m in self.markers:
self.viewProvider.switch.removeChild(m.sep)
for m in markers:
self.viewProvider.switch.addChild(m.sep)
self.markers = markers
def updateUI(self):
customSelected = self.obj.Incision == Incision.Custom
self.form.custom.setEnabled(customSelected)
self.form.customLabel.setEnabled(customSelected)
self.updateBoneList()
if PathLog.getLevel(LOG_MODULE) == PathLog.Level.DEBUG:
for obj in FreeCAD.ActiveDocument.Objects:
if obj.Name.startswith('Shape'):
FreeCAD.ActiveDocument.removeObject(obj.Name)
PathLog.info('object name %s' % self.obj.Name)
if hasattr(self.obj.Proxy, "shapes"):
PathLog.info("showing shapes attribute")
for shapes in self.obj.Proxy.shapes.values():
for shape in shapes:
Part.show(shape)
else:
PathLog.info("no shapes attribute found")
def updateModel(self):
self.getFields()
self.updateUI()
FreeCAD.ActiveDocument.recompute()
def setupCombo(self, combo, text, items):
if items and len(items) > 0:
for i in range(combo.count(), -1, -1):
combo.removeItem(i)
combo.addItems(items)
index = combo.findText(text, QtCore.Qt.MatchFixedString)
if index >= 0:
combo.setCurrentIndex(index)
def setFields(self):
self.setupCombo(self.form.styleCombo, self.obj.Style, Style.All)
self.setupCombo(self.form.sideCombo, self.obj.Side, Side.All)
self.setupCombo(self.form.incisionCombo, self.obj.Incision, Incision.All)
self.form.custom.setMinimum(0.0)
self.form.custom.setDecimals(3)
self.form.custom.setValue(self.obj.Custom)
self.updateUI()
def open(self):
self.s = SelObserver()
# install the function mode resident
FreeCADGui.Selection.addObserver(self.s)
def setupUi(self):
self.setFields()
# now that the form is filled, setup the signal handlers
self.form.styleCombo.currentIndexChanged.connect(self.updateModel)
self.form.sideCombo.currentIndexChanged.connect(self.updateModel)
self.form.incisionCombo.currentIndexChanged.connect(self.updateModel)
self.form.custom.valueChanged.connect(self.updateModel)
self.form.bones.itemChanged.connect(self.updateModel)
self.form.bones.itemSelectionChanged.connect(self.updateMarkers)
self.viewProvider.showMarkers(True)
def updateMarkers(self):
index = self.form.bones.currentRow()
for i, m in enumerate(self.markers):
m.setSelected(i == index)
class SelObserver(object):
def __init__(self):
import PathScripts.PathSelection as PST
PST.eselect()
def __del__(self):
import PathScripts.PathSelection as PST
PST.clear()
def addSelection(self, doc, obj, sub, pnt):
# pylint: disable=unused-argument
FreeCADGui.doCommand('Gui.Selection.addSelection(FreeCAD.ActiveDocument.' + obj + ')')
FreeCADGui.updateGui()
class ViewProviderDressup(object):
def __init__(self, vobj):
self.vobj = vobj
self.obj = None
def attach(self, vobj):
self.obj = vobj.Object
if self.obj and self.obj.Base:
for i in self.obj.Base.InList:
if hasattr(i, "Group"):
group = i.Group
for g in group:
if g.Name == self.obj.Base.Name:
group.remove(g)
i.Group = group
# FreeCADGui.ActiveDocument.getObject(obj.Base.Name).Visibility = False
self.switch = coin.SoSwitch()
vobj.RootNode.addChild(self.switch)
def showMarkers(self, on):
sw = coin.SO_SWITCH_ALL if on else coin.SO_SWITCH_NONE
self.switch.whichChild = sw
def claimChildren(self):
return [self.obj.Base]
def setEdit(self, vobj, mode=0):
# pylint: disable=unused-argument
FreeCADGui.Control.closeDialog()
panel = TaskPanel(self, vobj.Object)
FreeCADGui.Control.showDialog(panel)
panel.setupUi()
return True
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def onDelete(self, arg1=None, arg2=None):
'''this makes sure that the base operation is added back to the project and visible'''
# pylint: disable=unused-argument
if arg1.Object and arg1.Object.Base:
FreeCADGui.ActiveDocument.getObject(arg1.Object.Base.Name).Visibility = True
job = PathUtils.findParentJob(arg1.Object)
if job:
job.Proxy.addOperation(arg1.Object.Base, arg1.Object)
arg1.Object.Base = None
return True
def Create(base, name='DogboneDressup'):
'''
Create(obj, name='DogboneDressup') ... dresses the given PathProfile/PathContour object with dogbones.
'''
obj = FreeCAD.ActiveDocument.addObject('Path::FeaturePython', name)
dbo = ObjectDressup(obj, base)
job = PathUtils.findParentJob(base)
job.Proxy.addOperation(obj, base)
if FreeCAD.GuiUp:
obj.ViewObject.Proxy = ViewProviderDressup(obj.ViewObject)
obj.Base.ViewObject.Visibility = False
dbo.setup(obj, True)
return obj
class CommandDressupDogbone(object):
# pylint: disable=no-init
def GetResources(self):
return {'Pixmap': 'Path_Dressup',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "Dogbone Dress-up"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_DressupDogbone", "Creates a Dogbone Dress-up object from a selected path")}
def IsActive(self):
if FreeCAD.ActiveDocument is not None:
for o in FreeCAD.ActiveDocument.Objects:
if o.Name[:3] == "Job":
return True
return False
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelection()
if len(selection) != 1:
FreeCAD.Console.PrintError(translate("Path_DressupDogbone", "Please select one path object")+"\n")
return
baseObject = selection[0]
if not baseObject.isDerivedFrom("Path::Feature"):
FreeCAD.Console.PrintError(translate("Path_DressupDogbone", "The selected object is not a path")+"\n")
return
# everything ok!
FreeCAD.ActiveDocument.openTransaction(translate("Path_DressupDogbone", "Create Dogbone Dress-up"))
FreeCADGui.addModule('PathScripts.PathDressupDogbone')
FreeCADGui.doCommand("PathScripts.PathDressupDogbone.Create(FreeCAD.ActiveDocument.%s)" % baseObject.Name)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtGui
from pivy import coin
FreeCADGui.addCommand('Path_DressupDogbone', CommandDressupDogbone())
FreeCAD.Console.PrintLog("Loading DressupDogbone... done\n")
| Fat-Zer/FreeCAD_sf_master | src/Mod/Path/PathScripts/PathDressupDogbone.py | Python | lgpl-2.1 | 49,208 |
import sys
def setup(core, object):
object.setAttachment('radial_filename', 'ring/unity')
object.setAttachment('objType', 'ring')
object.setStfFilename('static_item_n')
object.setStfName('item_ring_set_commando_utility_b_01_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_ring_set_commando_utility_b_01_01')
object.setIntAttribute('required_combat_level', 85)
object.setStringAttribute('class_required', 'Commando')
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:constitution_modified', 10)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:strength_modified', 15)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_devastation_bonus', 5)
object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_bonus_commando_utility_b_1')
object.setStringAttribute('@set_bonus:piece_bonus_count_4', '@set_bonus:set_bonus_commando_utility_b_2')
object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_bonus_commando_utility_b_3')
object.setAttachment('setBonus', 'set_bonus_commando_utility_b')
return | agry/NGECore2 | scripts/object/tangible/wearables/ring/item_ring_set_commando_utility_b_01_01.py | Python | lgpl-3.0 | 1,084 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('tatooine_opening_jano')
mobileTemplate.setLevel(1)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setSocialGroup("township")
mobileTemplate.setOptionsBitmask(Options.INVULNERABLE | Options.CONVERSABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_tatooine_opening_jano.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('jano', mobileTemplate)
return | ProjectSWGCore/NGECore2 | scripts/mobiles/generic/static/tatooine/jano.py | Python | lgpl-3.0 | 1,147 |
#!/usr/bin/python
''' Auto-generated ui widget plugin '''
from projexui.qt.QtDesigner import QPyDesignerCustomWidgetPlugin
from projexui.qt.QtGui import QIcon
import projex.resources
from projexui.widgets.xviewwidget import XViewWidget as Base
setattr(Base, '__designer_mode__', True)
DEFAULT_XML = '''<ui language="c++" displayname="XViewWidget">
<widget class="XViewWidget" name="XViewWidget"/>
<customwidgets>
<customwidget>
<class>XViewWidget</class>
<header>projexui.widgets.xviewwidget</header>
<addpagemethod>%(addpagemethod)s</addpagemethod>
<propertyspecifications>
%(propertyspecs)s
</propertyspecifications>
</customwidget>
</customwidgets>
</ui>'''
class XViewWidgetPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(XViewWidgetPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return Base(parent)
def name(self):
return getattr(Base, '__designer_name__', Base.__name__)
def group(self):
return getattr(Base, '__designer_group__', 'ProjexUI')
def icon(self):
default = projex.resources.find('img/logo_16.png')
return QIcon(getattr(Base, '__designer_icon__', default))
def toolTip( self ):
docs = getattr(Base, '__doc__', '')
if docs is None:
docs = ''
return getattr(Base, '__designer_tooltip__', docs)
def whatsThis( self ):
return ''
def isContainer( self ):
return getattr(Base, '__designer_container__', False)
def includeFile( self ):
return 'projexui.widgets.xviewwidget'
def domXml( self ):
opts = {}
specs = []
for prop, info in getattr(Base, '__designer_propspecs__', {}).items():
xml = '<%spropertyspecification name="%s" type="%s"/>'
xml %= (info[0], prop, info[1])
specs.append(xml)
opts['addpagemethod'] = getattr(Base, '__designer_addpage__', '')
opts['propertyspecs'] = ''.join(specs)
default = DEFAULT_XML % opts
return getattr(Base, '__designer_xml__', default)
| bitesofcode/projexui | projexui/designer/build/xviewwidgetplugin.py | Python | lgpl-3.0 | 2,507 |
import enum
import inspect
import pydoc
import unittest
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, unique
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
*, protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
if target is None:
target = source
for protocol in range(start, stop+1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj,
*, protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
for protocol in range(start, stop+1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertNotIn(3, Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, 4))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y, protocol=(4, 4))
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
expected_help_output = """
Help on class Color in module %s:
class Color(enum.Enum)
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping.
""".strip()
class TestStdLib(unittest.TestCase):
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
expected_text = expected_help_output % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', None),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object=None),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
if __name__ == '__main__':
unittest.main()
| Orav/kbengine | kbe/src/lib/python/Lib/test/test_enum.py | Python | lgpl-3.0 | 58,305 |
# Copyright 2012-2013 Greg Horn
#
# This file is part of rawesome.
#
# rawesome is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rawesome is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with rawesome. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import time
#from pylab import *
import casadi as C
import nmheMaps
from ocputils import Constraints
from newton import Newton
from collocation import LagrangePoly
class Nmhe(object):
def __init__(self,dae,nk):
self.dae = dae
self.nk = nk
self._gaussNewtonObjF = []
mapSize = len(self.dae.xNames())*(self.nk+1) + len(self.dae.pNames())
V = C.msym('dvs',mapSize)
self._dvMap = nmheMaps.VectorizedReadOnlyNmheMap(self.dae,self.nk,V)
self._boundMap = nmheMaps.WriteableNmheMap(self.dae,self.nk)
self._guessMap = nmheMaps.WriteableNmheMap(self.dae,self.nk)
self._U = C.msym('u',self.nk,len(self.dae.uNames()))
self._outputMapGenerator = nmheMaps.NmheOutputMapGenerator(self,self._U)
self._outputMap = nmheMaps.NmheOutputMap(self._outputMapGenerator, self._dvMap.vectorize(), self._U)
self._constraints = Constraints()
def __call__(self,*args,**kwargs):
return self.lookup(*args,**kwargs)
def lookup(self,name,timestep=None):
try:
return self._dvMap.lookup(name,timestep=timestep)
except NameError:
pass
try:
return self._outputMap.lookup(name,timestep)
except NameError:
pass
raise NameError("unrecognized name \""+name+"\"")
def bound(self,name,(lb,ub),timestep=None):
self._boundMap.setVal(name,(lb,ub),timestep=timestep)
def guess(self,name,val,timestep=None):
self._guessMap.setVal(name,val,timestep=timestep)
def constrain(self,lhs,comparison,rhs,tag=('unnamed_constraint',None)):
self._constraints.add(lhs,comparison,rhs,tag)
def setObj(self,obj):
if hasattr(self,'_obj'):
raise ValueError("don't change the objective function")
self._obj = obj
def addGaussNewtonObjF(self,gnF):
self._gaussNewtonObjF.append(gnF)
def _setupDynamicsConstraints(self,endTime,traj):
# Todo: add parallelization
# Todo: get endTime right
g = []
nicp = 1
deg = 4
p = self._dvMap.pVec()
for k in range(self.nk):
newton = Newton(LagrangePoly,self.dae,1,nicp,deg,'RADAU')
newton.setupStuff(endTime)
X0_i = self._dvMap.xVec(k)
U_i = self._U[k,:].T
# guess
if traj is None:
newton.isolver.setOutput(1,0)
else:
X = C.DMatrix([[traj.lookup(name,timestep=k,degIdx=j) for j in range(1,traj.dvMap._deg+1)] \
for name in traj.dvMap._xNames])
Z = C.DMatrix([[traj.lookup(name,timestep=k,degIdx=j) for j in range(1,traj.dvMap._deg+1)] \
for name in traj.dvMap._zNames])
newton.isolver.setOutput(C.veccat([X,Z]),0)
_, Xf_i = newton.isolver.call([X0_i,U_i,p])
X0_i_plus = self._dvMap.xVec(k+1)
g.append(Xf_i-X0_i_plus)
return g
def makeSolver(self,endTime,traj=None):
# make sure all bounds are set
(xMissing,pMissing) = self._boundMap.getMissing()
msg = []
for name in xMissing:
msg.append("you forgot to set a bound on \""+name+"\" at timesteps: "+str(xMissing[name]))
for name in pMissing:
msg.append("you forgot to set a bound on \""+name+"\"")
if len(msg)>0:
raise ValueError('\n'.join(msg))
# constraints:
g = self._constraints.getG()
glb = self._constraints.getLb()
gub = self._constraints.getUb()
gDyn = self._setupDynamicsConstraints(endTime,traj)
gDynLb = gDynUb = [C.DMatrix.zeros(gg.shape) for gg in gDyn]
g = C.veccat([g]+gDyn)
glb = C.veccat([glb]+gDynLb)
gub = C.veccat([gub]+gDynUb)
self.glb = glb
self.gub = gub
# design vars
V = self._dvMap.vectorize()
# gradient of arbitraryObj
if hasattr(self,'_obj'):
arbitraryObj = self._obj
else:
arbitraryObj = 0
gradF = C.gradient(arbitraryObj,V)
# hessian of lagrangian:
Js = [C.jacobian(gnf,V) for gnf in self._gaussNewtonObjF]
gradFgns = [C.mul(J.T,F) for (F,J) in zip(self._gaussNewtonObjF, Js)]
gaussNewtonHess = sum([C.mul(J.T,J) for J in Js])
hessL = gaussNewtonHess + C.jacobian(gradF,V)
gradF += sum(gradFgns)
# equality/inequality constraint jacobian
gfcn = C.MXFunction([V,self._U],[g])
gfcn.init()
jacobG = gfcn.jacobian(0,0)
jacobG.init()
# function which generates everything needed
f = sum([f_*f_ for f_ in self._gaussNewtonObjF])
if hasattr(self,'_obj'):
f += self._obj
self.masterFun = C.MXFunction([V,self._U],[hessL, gradF, g, jacobG.call([V,self._U])[0], f])
self.masterFun.init()
# self.qp = C.CplexSolver(hessL.sparsity(),jacobG.output(0).sparsity())
self.qp = C.NLPQPSolver(hessL.sparsity(),jacobG.output(0).sparsity())
self.qp.setOption('nlp_solver',C.IpoptSolver)
self.qp.setOption('nlp_solver_options',{'print_level':0,'print_time':False})
self.qp.init()
def runSolver(self,U,trajTrue=None):
# make sure all bounds are set
(xMissing,pMissing) = self._guessMap.getMissing()
msg = []
for name in xMissing:
msg.append("you forgot to set a guess for \""+name+"\" at timesteps: "+str(xMissing[name]))
for name in pMissing:
msg.append("you forgot to set a guess for \""+name+"\"")
if len(msg)>0:
raise ValueError('\n'.join(msg))
lbx,ubx = zip(*(self._boundMap.vectorize()))
xk = C.DMatrix(list(self._guessMap.vectorize()))
for k in range(100):
############# plot stuff ###############
print "iteration: ",k
# import nmheMaps
# xOpt = np.array(xk).squeeze()
# traj = nmheMaps.VectorizedReadOnlyNmheMap(self.dae,self.nk,xOpt)
#
# xsT = np.array([trajTrue.lookup('x',timestep=kk) for kk in range(self.nk+1)] )
# ysT = np.array([trajTrue.lookup('y',timestep=kk) for kk in range(self.nk+1)] )
# zsT = np.array([trajTrue.lookup('z',timestep=kk) for kk in range(self.nk+1)] )
#
# xs = np.array([traj.lookup('x',timestep=kk) for kk in range(self.nk+1)] )
# ys = np.array([traj.lookup('y',timestep=kk) for kk in range(self.nk+1)] )
# zs = np.array([traj.lookup('z',timestep=kk) for kk in range(self.nk+1)] )
#
# outputMap = nmheMaps.NmheOutputMap(self._outputMapGenerator, xOpt, U)
# c = np.array([outputMap.lookup('c',timestep=kk) for kk in range(self.nk)])
# cdot = np.array([outputMap.lookup('cdot',timestep=kk) for kk in range(self.nk)])
#
# figure()
# title(str(float(k)))
# subplot(3,2,1)
# plot(xs)
# plot(xsT)
# ylabel('x '+str(k))
#
# subplot(3,2,3)
# plot(ys)
# plot(ysT)
# ylabel('y '+str(k))
#
# subplot(3,2,5)
# plot(zs)
# plot(zsT)
# ylabel('z '+str(k))
#
## subplot(2,2,2)
## plot(dxs,-dzs)
## ylabel('vel')
## axis('equal')
#
# subplot(3,2,2)
# plot(c)
# ylabel('c')
#
# subplot(3,2,4)
# plot(cdot)
# ylabel('cdot')
# ##########################################
self.masterFun.setInput(xk,0)
self.masterFun.setInput(U,1)
t0 = time.time()
try:
self.masterFun.evaluate()
except RuntimeError as e:
print "ERRRRRRRRRRRRROR"
show()
raise e
t1 = time.time()
masterFunTime = (t1-t0)*1000
hessL = self.masterFun.output(0)
gradF = self.masterFun.output(1)
g = self.masterFun.output(2)
jacobG = self.masterFun.output(3)
f = self.masterFun.output(4)
self.qp.setInput(0, C.QP_X_INIT)
self.qp.setInput(hessL, C.QP_H)
self.qp.setInput(jacobG, C.QP_A)
self.qp.setInput(gradF, C.QP_G)
assert all((lbx-xk) <= 0), "lower bounds violation"
assert all((ubx-xk) >= 0), "upper bounds violation"
self.qp.setInput(lbx-xk,C.QP_LBX)
self.qp.setInput(ubx-xk,C.QP_UBX)
self.qp.setInput(self.glb-g, C.QP_LBA)
self.qp.setInput(self.gub-g, C.QP_UBA)
t0 = time.time()
self.qp.evaluate()
t1 = time.time()
# print "gradF: ",gradF
# print 'dim(jacobG): "gra
# print "rank: ",np.linalg.matrix_rank(jacobG)
print "masterFun delta time: %.3f ms" % masterFunTime
print "f: ",f,'\tmax constraint: ',max(C.fabs(g))
print "qp delta time: %.3f ms" % ((t1-t0)*1000)
print ""
deltaX = self.qp.output(C.QP_PRIMAL)
# import scipy.io
# scipy.io.savemat('hessL.mat',{'hessL':np.array(hessL),
# 'gradF':np.array(gradF),
# 'x0':0*np.array(deltaX),
# 'xopt':np.array(deltaX),
# 'lbx':np.array(lbx-xk),
# 'ubx':np.array(ubx-xk),
# 'jacobG':np.array(jacobG),
# 'lba':np.array(self.glb-g),
# 'uba':np.array(self.gub-g)})
# import sys; sys.exit()
# print deltaX
xk += deltaX
# show()
| ghorn/rawesome | rawe/newton/nmhe.py | Python | lgpl-3.0 | 10,700 |
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
import nltk
import json
from nltk_contrib import timex
import time
import sys
import getopt
USAGE = """
nltk-rest --port -p <port> -v units -u [--help -h]
Expose NLTK over REST as a server using Python Flask. Submit content to the
`/nltk` endpoint in the REST body request.
-h, --help Prints this message.
-p, --port Sets the port for the REST server, default is 8881.
-u, --units Enable parser to extract measurements from text
"""
Verbose = 0
Port = 8881 #default port
Units = 0
def echo2(*s): sys.stderr.write('server.py [NLTK]: ' + ' '.join(map(str, s)) + '\n')
app = Flask(__name__)
@app.route('/')
def status():
msg = '''
<html><head><title>NLTK REST Server</title></head><body><h3>NLTK REST server</h3>
<p>This app exposes the Python <a href="http://nltk.org/">Natural Language Toolkit (NLTK)</a>
as a REST server.</p>
<h2>Status: Running</h2>
<p>More apps from the <a href="//irds.usc.edu/">USC Information Retrieval & Data Science Group</a>.</p>
'''
return msg
@app.route('/nltk', methods=["PUT", "POST"])
def namedEntityRecognizer():
echo2("Performing NER on incoming stream")
content = request.stream.read()
if Verbose:
echo2("Incoming content is "+content)
start = time.time()
date_time = timex.tag(content)
tokenized = nltk.word_tokenize(content.decode("utf-8"))
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
names = extract_entity_names(namedEnt, 'NE')
names.extend(date_time)
result = {"result" : "success", "names" : names}
if Units:
grammar = '''unit: {<CD><NNS>?<NN.*>?},
unit: {<CD><JJ>?<NN.*>}
'''
parser = nltk.RegexpParser(grammar)
units = extract_entity_names(parser.parse(tagged),'unit')
result['units'] = units
jsonDoc = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
end = time.time()
print "NER took "+str(end - start)+" seconds"
return jsonDoc
# Based on example from:
# https://gist.github.com/onyxfish/322906
def extract_entity_names(t, label):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == label:
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child, label))
return entity_names
def main(argv=None):
"""Run NLTK REST server from command line according to USAGE."""
global Verbose
global Units
if argv is None:
argv = sys.argv
try:
opts, argv = getopt.getopt(argv[1:], 'hp:vu',
['help', 'port=', 'verbose', 'units'])
except getopt.GetoptError, (msg, bad_opt):
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--port'): port = int(val)
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-u', '--units'): Units = 1
else: die(USAGE)
app.run(debug=Verbose, port=port)
if __name__ == '__main__':
main(sys.argv)
| chrismattmann/NLTKRest | nltkrest/nltkrest/server.py | Python | apache-2.0 | 4,079 |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List Command."""
from __future__ import print_function
from biggraphite.cli import command
from biggraphite.glob_utils import graphite_glob
def list_metrics(accessor, pattern, graphite=True):
"""Return the list of metrics corresponding to pattern.
Exit with error message if None.
Args:
accessor: Accessor, a connected accessor
pattern: string, e.g. my.metric.a or my.metric.**.a
Optional Args:
graphite: bool, use graphite globbing if True.
Returns:
iterable(Metric)
"""
if not graphite:
metrics_names = accessor.glob_metric_names(pattern)
else:
metrics, _ = graphite_glob(
accessor, pattern, metrics=True, directories=False
)
metrics_names = [metric.name for metric in metrics]
for metric in metrics_names:
if metric is None:
continue
yield accessor.get_metric(metric)
class CommandList(command.BaseCommand):
"""List for metrics."""
NAME = "list"
HELP = "List metrics."
def add_arguments(self, parser):
"""Add custom arguments.
See command.CommandBase.
"""
parser.add_argument("glob", help="One metric name or globbing on metrics names")
parser.add_argument(
"--graphite",
default=False,
action="store_true",
help="Enable Graphite globbing",
)
def run(self, accessor, opts):
"""List metrics and directories.
See command.CommandBase.
"""
accessor.connect()
if not opts.graphite:
directories_names = accessor.glob_directory_names(opts.glob)
else:
_, directories_names = graphite_glob(
accessor, opts.glob, metrics=False, directories=True
)
for directory in directories_names:
print("d %s" % directory)
for metric in list_metrics(accessor, opts.glob, opts.graphite):
if metric:
print("m %s %s" % (metric.name, metric.metadata.as_string_dict()))
| Thib17/biggraphite | biggraphite/cli/command_list.py | Python | apache-2.0 | 2,668 |
from sys import maxsize
class Group:
def __init__(self, group_name=None, group_header=None, group_footer=None, id=None):
self.group_name = group_name
self.group_header = group_header
self.group_footer = group_footer
self.id = id
def __repr__(self):
return '%s:%s' % (self.id, self.group_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.group_name == other.group_name
def if_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
class GroupBase:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith('/group.php') and len(wd.find_elements_by_name('new')) > 0):
wd.find_element_by_link_text("groups").click()
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
def validation_of_group_exist(self):
if self.count() == 0:
self.create(Group(group_name='test'))
self.click_group_page()
def group_line(self, field, text):
wd = self.app.wd
if text:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
def create(self, Group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("new").click()
self.group_line('group_name', Group.group_name)
self.group_line('group_header', Group.group_header)
self.group_line('group_footer', Group.group_footer)
wd.find_element_by_name("submit").click()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)
def click_group_page(self):
wd = self.app.wd
wd.find_element_by_css_selector("div.msgbox").click()
wd.find_element_by_link_text("group page").click()
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector('span.group'):
text = element.text
id = element.find_element_by_name('selected[]').get_attribute('value')
self.group_cache.append(Group(group_name=text, id=id))
return list(self.group_cache)
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name('delete').click()
self.click_group_page()
self.group_cache = None
def edit_group_by_index(self, Group, index):
wd = self.app.wd
self.open_group_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_name("edit").click()
self.group_line('group_name', Group.group_name)
self.group_line('group_header', Group.group_header)
self.group_line('group_footer', Group.group_footer)
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("groups").click()
self.group_cache = None
| werbk/task-5.14 | tests_group/group_lib.py | Python | apache-2.0 | 3,504 |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import gevent
import logging
import kazoo.client
import kazoo.exceptions
import kazoo.handlers.gevent
import kazoo.recipe.election
from kazoo.client import KazooState
from kazoo.retry import KazooRetry
from bitarray import bitarray
from cfgm_common.exceptions import ResourceExhaustionError, ResourceExistsError
from gevent.coros import BoundedSemaphore
import uuid
LOG_DIR = '/var/log/contrail/'
class IndexAllocator(object):
def __init__(self, zookeeper_client, path, size=0, start_idx=0,
reverse=False,alloc_list=None, max_alloc=0):
self._size = size
self._start_idx = start_idx
if alloc_list is None:
self._alloc_list = [{'start':start_idx, 'end':start_idx+size}]
else:
sorted_alloc_list = sorted(alloc_list, key=lambda k: k['start'])
self._alloc_list = sorted_alloc_list
alloc_count = len(self._alloc_list)
total_size = 0
size = 0
#check for overlap in alloc_list --TODO
for alloc_idx in range (0, alloc_count -1):
idx_start_addr = self._alloc_list[alloc_idx]['start']
idx_end_addr = self._alloc_list[alloc_idx]['end']
next_start_addr = self._alloc_list[alloc_idx+1]['start']
if next_start_addr <= idx_end_addr:
raise Exception(
'Allocation Lists Overlapping: %s' %(alloc_list))
size += idx_end_addr - idx_start_addr + 1
size += self._alloc_list[alloc_count-1]['end'] - self._alloc_list[alloc_count-1]['start'] + 1
if max_alloc == 0:
self._max_alloc = size
else:
self._max_alloc = max_alloc
self._zookeeper_client = zookeeper_client
self._path = path
self._in_use = bitarray('0')
self._reverse = reverse
for idx in self._zookeeper_client.get_children(path):
idx_int = self._get_bit_from_zk_index(int(idx))
if idx_int >= 0:
self._set_in_use(idx_int)
# end for idx
# end __init__
def _get_zk_index_from_bit(self, idx):
size = idx
if self._reverse:
for alloc in reversed(self._alloc_list):
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['start']-size - 1
else:
for alloc in self._alloc_list:
size -= alloc['end'] - alloc['start'] + 1
if size < 0:
return alloc['end']+size + 1
raise ResourceExhaustionError(
'Cannot get zk index from bit %s' %(idx))
# end _get_zk_index
def _get_bit_from_zk_index(self, idx):
size = 0
if self._reverse:
for alloc in reversed(self._alloc_list):
if alloc['start'] <= idx <= alloc['end']:
return alloc['end'] - idx + size
size += alloc['end'] - alloc['start'] + 1
pass
else:
for alloc in self._alloc_list:
if alloc['start'] <= idx <= alloc['end']:
return idx - alloc['start'] + size
size += alloc['end'] - alloc['start'] + 1
return -1
# end _get_bit_from_zk_index
def _set_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
temp = bitarray(bitnum - self._in_use.length())
temp.setall(0)
temp.append('1')
self._in_use.extend(temp)
else:
self._in_use[bitnum] = 1
# end _set_in_use
def _reset_in_use(self, bitnum):
# if the index is higher than _max_alloc, do not use the bitarray, in
# order to reduce the size of the bitarray. Otherwise, set the bit
# corresponding to idx to 1 and extend the _in_use bitarray if needed
if bitnum > self._max_alloc:
return
if bitnum >= self._in_use.length():
return
else:
self._in_use[bitnum] = 0
# end _reset_in_use
def set_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._set_in_use(bit_idx)
# end set_in_use
def reset_in_use(self, idx):
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx < 0:
return
self._reset_in_use(bit_idx)
# end reset_in_use
def get_alloc_count(self):
return self._in_use.count()
# end get_alloc_count
def alloc(self, value=None):
# Allocates a index from the allocation list
if self._in_use.all():
idx = self._in_use.length()
if idx > self._max_alloc:
raise ResourceExhaustionError()
self._in_use.append(1)
else:
idx = self._in_use.index(0)
self._in_use[idx] = 1
idx = self._get_zk_index_from_bit(idx)
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
return idx
except ResourceExistsError:
return self.alloc(value)
# end alloc
def reserve(self, idx, value=None):
# Reserves the requested index if available
if not self._start_idx <= idx < self._start_idx + self._size:
return None
try:
# Create a node at path and return its integer value
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.create_node(self._path + id_str, value)
self.set_in_use(idx)
return idx
except ResourceExistsError:
self.set_in_use(idx)
existing_value = self.read(idx)
if (value == existing_value):
# idempotent reserve
return idx
msg = 'For index %s reserve conflicts with existing value %s.' \
%(idx, existing_value)
self._zookeeper_client.syslog(msg, level='notice')
raise
# end reserve
def delete(self, idx):
id_str = "%(#)010d" % {'#': idx}
self._zookeeper_client.delete_node(self._path + id_str)
bit_idx = self._get_bit_from_zk_index(idx)
if 0 <= bit_idx < self._in_use.length():
self._in_use[bit_idx] = 0
# end delete
def read(self, idx):
id_str = "%(#)010d" % {'#': idx}
id_val = self._zookeeper_client.read_node(self._path+id_str)
if id_val is not None:
bit_idx = self._get_bit_from_zk_index(idx)
if bit_idx >= 0:
self._set_in_use(bit_idx)
return id_val
# end read
def empty(self):
return not self._in_use.any()
# end empty
@classmethod
def delete_all(cls, zookeeper_client, path):
try:
zookeeper_client.delete_node(path, recursive=True)
except kazoo.exceptions.NotEmptyError:
#TODO: Add retries for NotEmptyError
zookeeper_client.syslog("NotEmptyError while deleting %s" % path)
# end delete_all
#end class IndexAllocator
class ZookeeperClient(object):
def __init__(self, module, server_list, logging_fn=None):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.DEBUG)
try:
handler = logging.handlers.RotatingFileHandler(
LOG_DIR + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in %s" %(LOG_DIR)
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
if logging_fn:
self.log = logging_fn
else:
self.log = self.syslog
# KazooRetry to retry keeper CRUD operations
self._retry = KazooRetry(max_tries=None, max_delay=300,
sleep_func=gevent.sleep)
self._zk_client = kazoo.client.KazooClient(
server_list,
timeout=400,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger,
connection_retry=self._retry,
command_retry=self._retry)
self._zk_client.add_listener(self._zk_listener)
self._logger = logger
self._election = None
self._server_list = server_list
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
self._lost_cb = None
self._suspend_cb = None
self.connect()
# end __init__
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
gevent.sleep(1)
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
# end
def is_connected(self):
return self._zk_client.state == KazooState.CONNECTED
# end is_connected
def syslog(self, msg, *args, **kwargs):
if not self._logger:
return
level = kwargs.get('level', 'info')
if isinstance(level, int):
from pysandesh.sandesh_logger import SandeshLogger
level = SandeshLogger.get_py_logger_level(level)
self._logger.log(level, msg)
return
log_method = getattr(self._logger, level, self._logger.info)
log_method(msg)
# end syslog
def set_lost_cb(self, lost_cb=None):
# set a callback to be called when kazoo state is lost
# set to None for default action
self._lost_cb = lost_cb
# end set_lost_cb
def set_suspend_cb(self, suspend_cb=None):
# set a callback to be called when kazoo state is suspend
# set to None for default action
self._suspend_cb = suspend_cb
# end set_suspend_cb
def _zk_listener(self, state):
if state == KazooState.CONNECTED:
if self._election:
self._election.cancel()
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
if self._lost_cb:
self._lost_cb()
else:
os._exit(2)
elif state == KazooState.SUSPENDED:
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
if self._suspend_cb:
self._suspend_cb()
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._election = self._zk_client.Election(path, identifier)
self._election.run(func, *args, **kwargs)
# end master_election
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
retry = self._retry.copy()
retry(self._zk_client.create, path, str(value), makepath=True)
except kazoo.exceptions.NodeExistsError:
current_value = self.read_node(path)
if current_value == value:
return True;
raise ResourceExistsError(path, str(current_value), 'zookeeper')
# end create_node
def delete_node(self, path, recursive=False):
try:
retry = self._retry.copy()
retry(self._zk_client.delete, path, recursive=recursive)
except kazoo.exceptions.NoNodeError:
pass
except Exception as e:
raise e
# end delete_node
def read_node(self, path, include_timestamp=False):
try:
retry = self._retry.copy()
value = retry(self._zk_client.get, path)
if include_timestamp:
return value
return value[0]
except Exception:
return None
# end read_node
def get_children(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.get_children, path)
except Exception:
return []
# end read_node
def exists(self, path):
try:
retry = self._retry.copy()
return retry(self._zk_client.exists, path)
except Exception:
return []
# end exists
def _sandesh_connection_info_update(self, status, message):
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._server_list.split(','))
if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self.log(msg, level=SandeshLevel.SYS_ERR)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self.log(msg, level=SandeshLevel.SYS_NOTICE)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
# end class ZookeeperClient
| tcpcloud/contrail-controller | src/config/common/zkclient.py | Python | apache-2.0 | 14,983 |
#==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# GeodesicActiveContourImageFilter.py
# Translated by Charl P. Botha <http://cpbotha.net/> from the cxx original.
# $Id: GeodesicActiveContourImageFilter.py,v 1.1 2006/09/06 20:58:42 glehmann
# Exp $
# example runs:
# ------------
# 1. Left ventricle:
# python GeodesicActiveContourImageFilter.py \
# ../Data/BrainProtonDensitySlice.png lventricle.png \
# 81 114 5 1 -0.5 3 2
#
# 2. White matter:
# python GeodesicActiveContourImageFilter.py \
# ../Data/BrainProtonDensitySlice.png wmatter.png \
# 56 92 5 1 -0.3 2 10
#
# See the ITK Software Guide, section 9.3.3 "Geodesic Active Contours
# Segmentation" as well as the CXX example for more comments.
import itk
from sys import argv, stderr
import os
itk.auto_progress(2)
def main():
if len(argv) < 10:
errMsg = "Missing parameters\n" \
"Usage: %s\n" % (argv[0],) + \
" inputImage outputImage\n" \
" seedX seedY InitialDistance\n" \
" Sigma SigmoidAlpha SigmoidBeta\n" \
" PropagationScaling\n"
print(errMsg, file=stderr)
return
# We're going to build the following pipelines:
# 1. reader -> smoothing -> gradientMagnitude -> sigmoid -> FI
# 2. fastMarching -> geodesicActiveContour(FI) -> thresholder -> writer
# The output of pipeline 1 is a feature image that is used by the
# geodesicActiveContour object. Also see figure 9.18 in the ITK
# Software Guide.
# we wan't to know what is happening
# itk.auto_progress(True)
InternalPixelType = itk.F
Dimension = 2
InternalImageType = itk.Image[InternalPixelType, Dimension]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, Dimension]
reader = itk.ImageFileReader[InternalImageType].New(FileName=argv[1])
# needed to give the size to the fastmarching filter
reader.Update()
outputDirectory = os.path.dirname(argv[2])
smoothing = itk.CurvatureAnisotropicDiffusionImageFilter[
InternalImageType,
InternalImageType].New(
reader,
TimeStep=0.125,
NumberOfIterations=5,
ConductanceParameter=9.0)
gradientMagnitude = itk.GradientMagnitudeRecursiveGaussianImageFilter[
InternalImageType,
InternalImageType].New(
smoothing,
Sigma=float(argv[6]))
sigmoid = itk.SigmoidImageFilter[InternalImageType, InternalImageType].New(
gradientMagnitude,
OutputMinimum=0.0,
OutputMaximum=1.1,
Alpha=float(argv[7]),
Beta=float(argv[8]))
seedPosition = itk.Index[2]()
seedPosition.SetElement(0, int(argv[3]))
seedPosition.SetElement(1, int(argv[4]))
node = itk.LevelSetNode[InternalPixelType, Dimension]()
node.SetValue(-float(argv[5]))
node.SetIndex(seedPosition)
seeds = itk.VectorContainer[
itk.UI, itk.LevelSetNode[InternalPixelType, Dimension]].New()
seeds.Initialize()
seeds.InsertElement(0, node)
fastMarching = itk.FastMarchingImageFilter[
InternalImageType,
InternalImageType].New(
sigmoid,
TrialPoints=seeds,
SpeedConstant=1.0,
OutputSize=reader.GetOutput().GetBufferedRegion().GetSize())
geodesicActiveContour = itk.GeodesicActiveContourLevelSetImageFilter[
InternalImageType,
InternalImageType,
InternalPixelType].New(
fastMarching,
# it is required to use the explicitly the FeatureImage
# - itk segfault without that :-(
FeatureImage=sigmoid.GetOutput(),
PropagationScaling=float(argv[9]),
CurvatureScaling=1.0,
AdvectionScaling=1.0,
MaximumRMSError=0.02,
NumberOfIterations=800)
thresholder = itk.BinaryThresholdImageFilter[
InternalImageType,
OutputImageType].New(
geodesicActiveContour,
LowerThreshold=-1000,
UpperThreshold=0,
OutsideValue=0,
InsideValue=255)
writer = itk.ImageFileWriter[OutputImageType].New(
thresholder,
FileName=argv[2])
def rescaleAndWrite(filter, fileName):
caster = itk.RescaleIntensityImageFilter[
InternalImageType,
OutputImageType].New(
filter,
OutputMinimum=0,
OutputMaximum=255)
itk.imwrite(caster, os.path.join(outputDirectory, fileName))
rescaleAndWrite(smoothing, "GeodesicActiveContourImageFilterOutput1.png")
rescaleAndWrite(
gradientMagnitude,
"GeodesicActiveContourImageFilterOutput2.png")
rescaleAndWrite(sigmoid, "GeodesicActiveContourImageFilterOutput3.png")
rescaleAndWrite(
fastMarching,
"GeodesicActiveContourImageFilterOutput4.png")
writer.Update()
print("")
print(
"Max. no. iterations: %d" %
(geodesicActiveContour.GetNumberOfIterations()))
print(
"Max. RMS error: %.3f" %
(geodesicActiveContour.GetMaximumRMSError()))
print("")
print(
"No. elapsed iterations: %d"
% (geodesicActiveContour.GetElapsedIterations()))
print("RMS change: %.3f" % (geodesicActiveContour.GetRMSChange()))
itk.imwrite(fastMarching, os.path.join(outputDirectory,
"GeodesicActiveContourImageFilterOutput4.mha"))
itk.imwrite(sigmoid, os.path.join(outputDirectory,
"GeodesicActiveContourImageFilterOutput3.mha"))
itk.imwrite(gradientMagnitude, os.path.join(outputDirectory,
"GeodesicActiveContourImageFilterOutput2.mha"))
if __name__ == "__main__":
main()
| malaterre/ITK | Modules/Segmentation/LevelSets/wrapping/test/GeodesicActiveContourImageFilterTest.py | Python | apache-2.0 | 6,301 |
# Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError(msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
| PKU-Cloud-Lab/xLearn | python-package/xlearn/base.py | Python | apache-2.0 | 2,861 |
"""Cron job implementation of Zulip's incoming email gateway's helper
for forwarding emails into Zulip.
https://zulip.readthedocs.io/en/latest/production/email-gateway.html
The email gateway supports two major modes of operation: An email
server (using postfix) where the email address configured in
EMAIL_GATEWAY_PATTERN delivers emails directly to Zulip, and this, a
cron job that connects to an IMAP inbox (which receives the emails)
periodically.
Run this in a cronjob every N minutes if you have configured Zulip to
poll an external IMAP mailbox for messages. The script will then
connect to your IMAP server and batch-process all messages.
We extract and validate the target stream from information in the
recipient address and retrieve, forward, and archive the message.
"""
import email
import email.policy
import logging
from email.message import EmailMessage
from imaplib import IMAP4_SSL
from typing import Any, Generator
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from zerver.lib.email_mirror import logger, process_message
## Setup ##
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.EMAIL_MIRROR_LOG_PATH)
file_handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
def get_imap_messages() -> Generator[EmailMessage, None, None]:
mbox = IMAP4_SSL(settings.EMAIL_GATEWAY_IMAP_SERVER, settings.EMAIL_GATEWAY_IMAP_PORT)
mbox.login(settings.EMAIL_GATEWAY_LOGIN, settings.EMAIL_GATEWAY_PASSWORD)
try:
mbox.select(settings.EMAIL_GATEWAY_IMAP_FOLDER)
try:
status, num_ids_data = mbox.search(None, 'ALL')
for message_id in num_ids_data[0].split():
status, msg_data = mbox.fetch(message_id, '(RFC822)')
assert isinstance(msg_data[0], tuple)
msg_as_bytes = msg_data[0][1]
message = email.message_from_bytes(msg_as_bytes, policy=email.policy.default)
# https://github.com/python/typeshed/issues/2417
assert isinstance(message, EmailMessage)
yield message
mbox.store(message_id, '+FLAGS', '\\Deleted')
mbox.expunge()
finally:
mbox.close()
finally:
mbox.logout()
class Command(BaseCommand):
help = __doc__
def handle(self, *args: Any, **options: str) -> None:
# We're probably running from cron, try to batch-process mail
if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
raise CommandError("Please configure the email mirror gateway in /etc/zulip/, "
"or specify $ORIGINAL_RECIPIENT if piping a single mail.")
for message in get_imap_messages():
process_message(message)
| showell/zulip | zerver/management/commands/email_mirror.py | Python | apache-2.0 | 3,125 |
"""
Provides functionality to emulate keyboard presses on host machine.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/keyboard/
"""
import voluptuous as vol
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP)
REQUIREMENTS = ['pyuserinput==0.1.11']
DOMAIN = 'keyboard'
TAP_KEY_SCHEMA = vol.Schema({})
def setup(hass, config):
"""Listen for keyboard events."""
import pykeyboard # pylint: disable=import-error
keyboard = pykeyboard.PyKeyboard()
keyboard.special_key_assignment()
hass.services.register(DOMAIN, SERVICE_VOLUME_UP,
lambda service:
keyboard.tap_key(keyboard.volume_up_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME_DOWN,
lambda service:
keyboard.tap_key(keyboard.volume_down_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE,
lambda service:
keyboard.tap_key(keyboard.volume_mute_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE,
lambda service:
keyboard.tap_key(keyboard.media_play_pause_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_NEXT_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_next_track_key),
schema=TAP_KEY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK,
lambda service:
keyboard.tap_key(keyboard.media_prev_track_key),
schema=TAP_KEY_SCHEMA)
return True
| PetePriority/home-assistant | homeassistant/components/keyboard/__init__.py | Python | apache-2.0 | 2,078 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Metadata request handler."""
import hashlib
import hmac
import os
from oslo_config import cfg
from oslo_log import log as logging
import six
import webob.dec
import webob.exc
from nova.api.metadata import base
from nova import conductor
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import memorycache
from nova import utils
from nova import wsgi
CACHE_EXPIRATION = 15 # in seconds
CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
metadata_proxy_opts = [
cfg.BoolOpt(
'service_metadata_proxy',
default=False,
help='Set flag to indicate Neutron will proxy metadata requests and '
'resolve instance ids.'),
cfg.StrOpt(
'metadata_proxy_shared_secret',
default='', secret=True,
help='Shared secret to validate proxies Neutron metadata requests'),
]
CONF.register_opts(metadata_proxy_opts, 'neutron')
LOG = logging.getLogger(__name__)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
self._cache = memorycache.get_client()
self.conductor_api = conductor.API()
def get_metadata_by_remote_address(self, address):
if not address:
raise exception.FixedIpNotFoundForAddress(address=address)
cache_key = 'metadata-%s' % address
data = self._cache.get(cache_key)
if data:
return data
try:
data = base.get_metadata_by_address(self.conductor_api, address)
except exception.NotFound:
return None
self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
def get_metadata_by_instance_id(self, instance_id, address):
cache_key = 'metadata-%s' % instance_id
data = self._cache.get(cache_key)
if data:
return data
try:
data = base.get_metadata_by_instance_id(self.conductor_api,
instance_id, address)
except exception.NotFound:
return None
self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if os.path.normpath(req.path_info) == "/":
resp = base.ec2_md_print(base.VERSIONS + ["latest"])
req.response.body = resp
req.response.content_type = base.MIME_TYPE_TEXT_PLAIN
return req.response
if CONF.neutron.service_metadata_proxy:
meta_data = self._handle_instance_id_request(req)
else:
if req.headers.get('X-Instance-ID'):
LOG.warning(
_LW("X-Instance-ID present in request headers. The "
"'service_metadata_proxy' option must be "
"enabled to process this header."))
meta_data = self._handle_remote_ip_request(req)
if meta_data is None:
raise webob.exc.HTTPNotFound()
try:
data = meta_data.lookup(req.path_info)
except base.InvalidMetadataPath:
raise webob.exc.HTTPNotFound()
if callable(data):
return data(req, meta_data)
resp = base.ec2_md_print(data)
if isinstance(resp, six.text_type):
req.response.text = resp
else:
req.response.body = resp
req.response.content_type = meta_data.get_mimetype()
return req.response
def _handle_remote_ip_request(self, req):
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
meta_data = self.get_metadata_by_remote_address(remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for ip: %s'),
remote_address)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for ip: %s'),
remote_address)
return meta_data
def _handle_instance_id_request(self, req):
instance_id = req.headers.get('X-Instance-ID')
tenant_id = req.headers.get('X-Tenant-ID')
signature = req.headers.get('X-Instance-ID-Signature')
remote_address = req.headers.get('X-Forwarded-For')
# Ensure that only one header was passed
if instance_id is None:
msg = _('X-Instance-ID header is missing from request.')
elif signature is None:
msg = _('X-Instance-ID-Signature header is missing from request.')
elif tenant_id is None:
msg = _('X-Tenant-ID header is missing from request.')
elif not isinstance(instance_id, six.string_types):
msg = _('Multiple X-Instance-ID headers found within request.')
elif not isinstance(tenant_id, six.string_types):
msg = _('Multiple X-Tenant-ID headers found within request.')
else:
msg = None
if msg:
raise webob.exc.HTTPBadRequest(explanation=msg)
expected_signature = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
if not utils.constant_time_compare(expected_signature, signature):
if instance_id:
LOG.warning(_LW('X-Instance-ID-Signature: %(signature)s does '
'not match the expected value: '
'%(expected_signature)s for id: '
'%(instance_id)s. Request From: '
'%(remote_address)s'),
{'signature': signature,
'expected_signature': expected_signature,
'instance_id': instance_id,
'remote_address': remote_address})
msg = _('Invalid proxy request signature.')
raise webob.exc.HTTPForbidden(explanation=msg)
try:
meta_data = self.get_metadata_by_instance_id(instance_id,
remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for instance id: %s'),
instance_id)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for instance id: %s'),
instance_id)
elif meta_data.instance.project_id != tenant_id:
LOG.warning(_LW("Tenant_id %(tenant_id)s does not match tenant_id "
"of instance %(instance_id)s."),
{'tenant_id': tenant_id, 'instance_id': instance_id})
# causes a 404 to be raised
meta_data = None
return meta_data
| cloudbase/nova-virtualbox | nova/api/metadata/handler.py | Python | apache-2.0 | 8,152 |
""" Test for the remove.py module in the vcontrol/rest/providers directory """
from os import remove as delete_file
from web import threadeddict
from vcontrol.rest.providers import remove
PROVIDERS_FILE_PATH = "../vcontrol/rest/providers/providers.txt"
class ContextDummy():
env = threadeddict()
env['HTTP_HOST'] = 'localhost:8080'
class WebDummy():
# dummy class to emulate the web.ctx.env call in remove.py
ctx = ContextDummy()
def test_successful_provider_removal():
""" Here we give the module a text file with PROVIDER: written in it,
it should remove that line in the file """
remove_provider = remove.RemoveProviderR()
remove.web = WebDummy() # override the web variable in remove.py
test_provider = "PROV"
expected_providers_contents = ['What:\n', 'Test:'] # what we expect to see in providers.txt after we call GET
# create the file
with open(PROVIDERS_FILE_PATH, 'w') as f:
f.writelines([
"What:",
"\n",
test_provider + ":",
"\n",
"Test:"
])
assert remove_provider.GET(test_provider) == "removed " + test_provider
# read the file and see if it has removed the line with the test_provider
with open(PROVIDERS_FILE_PATH, 'r') as f:
provider_contents = f.readlines()
delete_file(PROVIDERS_FILE_PATH) # delete the file
assert provider_contents == expected_providers_contents
def test_unsuccessful_provider_removal():
""" Here we give the module a text file without the provider written in it,
it should tell us that it couldn't find the provider we gave it as an argument"""
remove_provider = remove.RemoveProviderR()
remove.web = WebDummy() # override the web variable in remove.py
test_provider = "PROV"
expected_providers_contents = ['What:\n', 'NOTPROV:\n', 'Test:'] # what we expect to see in providers.txt after GET
# create the file
with open(PROVIDERS_FILE_PATH, 'w') as f:
f.writelines([
"What:",
"\n",
"NOTPROV:",
"\n",
"Test:"
])
assert remove_provider.GET(test_provider) == test_provider + " not found, couldn't remove"
# read the file and see if it's the same
with open(PROVIDERS_FILE_PATH, 'r') as f:
provider_contents = f.readlines()
delete_file(PROVIDERS_FILE_PATH) # delete the file
assert provider_contents == expected_providers_contents
| CyberReboot/vcontrol | tests/test_rest_providers_remove.py | Python | apache-2.0 | 2,487 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=no-member, too-many-lines
"""Online evaluation metric module."""
from __future__ import absolute_import
import math
from collections import OrderedDict
import numpy
from .base import numeric_types, string_types
from . import ndarray
from . import registry
def check_label_shapes(labels, preds, wrap=False, shape=False):
"""Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boolean
If True, check the shape of labels and preds;
Otherwise only check their length.
"""
if not shape:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
if wrap:
if isinstance(labels, ndarray.ndarray.NDArray):
labels = [labels]
if isinstance(preds, ndarray.ndarray.NDArray):
preds = [preds]
return labels, preds
class EvalMetric(object):
"""Base class for all evaluation metrics.
.. note::
This is a base class that provides common metric interfaces.
One should not use this class directly, but instead create new metric
classes that extend it.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self, name, output_names=None,
label_names=None, **kwargs):
self.name = str(name)
self.output_names = output_names
self.label_names = label_names
self._kwargs = kwargs
self.reset()
def __str__(self):
return "EvalMetric: {}".format(dict(self.get_name_value()))
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(**config)
"""
config = self._kwargs.copy()
config.update({
'metric': self.__class__.__name__,
'name': self.name,
'output_names': self.output_names,
'label_names': self.label_names})
return config
def update_dict(self, label, pred):
"""Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
"""
if self.output_names is not None:
pred = [pred[name] for name in self.output_names]
else:
pred = list(pred.values())
if self.label_names is not None:
label = [label[name] for name in self.label_names]
else:
label = list(label.values())
self.update(label, pred)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
raise NotImplementedError()
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0
self.sum_metric = 0.0
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
# pylint: disable=invalid-name
register = registry.get_register_func(EvalMetric, 'metric')
alias = registry.get_alias_func(EvalMetric, 'metric')
_create = registry.get_create_func(EvalMetric, 'metric')
# pylint: enable=invalid-name
def create(metric, *args, **kwargs):
"""Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
- A list, each element of which is a metric or a metric name.
- An evaluation function that computes custom metric for a given batch of
labels and predictions.
*args : list
Additional arguments to metric constructor.
Only used when metric is str.
**kwargs : dict
Additional arguments to metric constructor.
Only used when metric is str
Examples
--------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label - pred))
...
>>> metric1 = mx.metric.create('acc')
>>> metric2 = mx.metric.create(custom_metric)
>>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
"""
if callable(metric):
return CustomMetric(metric, *args, **kwargs)
elif isinstance(metric, list):
composite_metric = CompositeEvalMetric()
for child_metric in metric:
composite_metric.add(create(child_metric, *args, **kwargs))
return composite_metric
return _create(metric, *args, **kwargs)
@register
@alias('composite')
class CompositeEvalMetric(EvalMetric):
"""Manages multiple evaluation metrics.
Parameters
----------
metrics : list of EvalMetric
List of child metrics.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> eval_metrics_1 = mx.metric.Accuracy()
>>> eval_metrics_2 = mx.metric.F1()
>>> eval_metrics = mx.metric.CompositeEvalMetric()
>>> for child_metric in [eval_metrics_1, eval_metrics_2]:
>>> eval_metrics.add(child_metric)
>>> eval_metrics.update(labels = labels, preds = predicts)
>>> print eval_metrics.get()
(['accuracy', 'f1'], [0.6666666666666666, 0.8])
"""
def __init__(self, metrics=None, name='composite',
output_names=None, label_names=None):
super(CompositeEvalMetric, self).__init__(
'composite', output_names=output_names, label_names=label_names)
if metrics is None:
metrics = []
self.metrics = [create(i) for i in metrics]
def add(self, metric):
"""Adds a child metric.
Parameters
----------
metric
A metric instance.
"""
self.metrics.append(create(metric))
def get_metric(self, index):
"""Returns a child metric.
Parameters
----------
index : int
Index of child metric in the list of metrics.
"""
try:
return self.metrics[index]
except IndexError:
return ValueError("Metric index {} is out of range 0 and {}".format(
index, len(self.metrics)))
def update_dict(self, labels, preds): # pylint: disable=arguments-differ
if self.label_names is not None:
labels = OrderedDict([i for i in labels.items()
if i[0] in self.label_names])
if self.output_names is not None:
preds = OrderedDict([i for i in preds.items()
if i[0] in self.output_names])
for metric in self.metrics:
metric.update_dict(labels, preds)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
for metric in self.metrics:
metric.update(labels, preds)
def reset(self):
"""Resets the internal evaluation result to initial state."""
try:
for metric in self.metrics:
metric.reset()
except AttributeError:
pass
def get(self):
"""Returns the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
names = []
values = []
for metric in self.metrics:
name, value = metric.get()
if isinstance(name, string_types):
name = [name]
if isinstance(value, numeric_types):
value = [value]
names.extend(name)
values.extend(value)
return (names, values)
def get_config(self):
config = super(CompositeEvalMetric, self).get_config()
config.update({'metrics': [i.get_config() for i in self.metrics]})
return config
########################
# CLASSIFICATION METRICS
########################
@register
@alias('acc')
class Accuracy(EvalMetric):
"""Computes accuracy classification score.
The accuracy score is defined as
.. math::
\\text{accuracy}(y, \\hat{y}) = \\frac{1}{n} \\sum_{i=0}^{n-1}
\\text{1}(\\hat{y_i} == y_i)
Parameters
----------
axis : int, default=1
The axis that represents classes
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> acc = mx.metric.Accuracy()
>>> acc.update(preds = predicts, labels = labels)
>>> print acc.get()
('accuracy', 0.6666666666666666)
"""
def __init__(self, axis=1, name='accuracy',
output_names=None, label_names=None):
super(Accuracy, self).__init__(
name, axis=axis,
output_names=output_names, label_names=label_names)
self.axis = axis
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data with class indices as values, one per sample.
preds : list of `NDArray`
Prediction values for samples. Each prediction value can either be the class index,
or a vector of likelihoods for all classes.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred_label in zip(labels, preds):
if pred_label.shape != label.shape:
pred_label = ndarray.argmax(pred_label, axis=self.axis)
pred_label = pred_label.asnumpy().astype('int32')
label = label.asnumpy().astype('int32')
# flatten before checking shapes to avoid shape miss match
label = label.flat
pred_label = pred_label.flat
labels, preds = check_label_shapes(label, pred_label)
self.sum_metric += (pred_label == label).sum()
self.num_inst += len(pred_label)
@register
@alias('top_k_accuracy', 'top_k_acc')
class TopKAccuracy(EvalMetric):
"""Computes top k predictions accuracy.
`TopKAccuracy` differs from Accuracy in that it considers the prediction
to be ``True`` as long as the ground truth label is in the top K
predicated labels.
If `top_k` = ``1``, then `TopKAccuracy` is identical to `Accuracy`.
Parameters
----------
top_k : int
Whether targets are in top k predictions.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> np.random.seed(999)
>>> top_k = 3
>>> labels = [mx.nd.array([2, 6, 9, 2, 3, 4, 7, 8, 9, 6])]
>>> predicts = [mx.nd.array(np.random.rand(10, 10))]
>>> acc = mx.metric.TopKAccuracy(top_k=top_k)
>>> acc.update(labels, predicts)
>>> print acc.get()
('top_k_accuracy', 0.3)
"""
def __init__(self, top_k=1, name='top_k_accuracy',
output_names=None, label_names=None):
super(TopKAccuracy, self).__init__(
name, top_k=top_k,
output_names=output_names, label_names=label_names)
self.top_k = top_k
assert(self.top_k > 1), 'Please use Accuracy if top_k is no more than 1'
self.name += '_%d' % self.top_k
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred_label in zip(labels, preds):
assert(len(pred_label.shape) <= 2), 'Predictions should be no more than 2 dims'
pred_label = numpy.argsort(pred_label.asnumpy().astype('float32'), axis=1)
label = label.asnumpy().astype('int32')
check_label_shapes(label, pred_label)
num_samples = pred_label.shape[0]
num_dims = len(pred_label.shape)
if num_dims == 1:
self.sum_metric += (pred_label.flat == label.flat).sum()
elif num_dims == 2:
num_classes = pred_label.shape[1]
top_k = min(num_classes, self.top_k)
for j in range(top_k):
self.sum_metric += (pred_label[:, num_classes - 1 - j].flat == label.flat).sum()
self.num_inst += num_samples
class _BinaryClassificationMetrics(object):
"""
Private container class for classification metric statistics. True/false positive and
true/false negative counts are sufficient statistics for various classification metrics.
This class provides the machinery to track those statistics across mini-batches of
(label, prediction) pairs.
"""
def __init__(self):
self.true_positives = 0
self.false_negatives = 0
self.false_positives = 0
self.true_negatives = 0
def update_binary_stats(self, label, pred):
"""
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError("%s currently only supports binary classification."
% self.__class__.__name__)
pred_true = (pred_label == 1)
pred_false = 1 - pred_true
label_true = (label == 1)
label_false = 1 - label_true
self.true_positives += (pred_true * label_true).sum()
self.false_positives += (pred_true * label_false).sum()
self.false_negatives += (pred_false * label_true).sum()
self.true_negatives += (pred_false * label_false).sum()
@property
def precision(self):
if self.true_positives + self.false_positives > 0:
return float(self.true_positives) / (self.true_positives + self.false_positives)
else:
return 0.
@property
def recall(self):
if self.true_positives + self.false_negatives > 0:
return float(self.true_positives) / (self.true_positives + self.false_negatives)
else:
return 0.
@property
def fscore(self):
if self.precision + self.recall > 0:
return 2 * self.precision * self.recall / (self.precision + self.recall)
else:
return 0.
@property
def total_examples(self):
return self.false_negatives + self.false_positives + \
self.true_negatives + self.true_positives
def reset_stats(self):
self.false_positives = 0
self.false_negatives = 0
self.true_positives = 0
self.true_negatives = 0
@register
class F1(EvalMetric):
"""Computes the F1 score of a binary classification problem.
The F1 score is equivalent to weighted average of the precision and recall,
where the best value is 1.0 and the worst value is 0.0. The formula for F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
The formula for precision and recall is::
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
.. note::
This F1 score only supports binary classification.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
average : str, default 'macro'
Strategy to be used for aggregating across mini-batches.
"macro": average the F1 scores for each batch.
"micro": compute a single F1 score across all batches.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0., 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0., 1., 1.])]
>>> f1 = mx.metric.F1()
>>> f1.update(preds = predicts, labels = labels)
>>> print f1.get()
('f1', 0.8)
"""
def __init__(self, name='f1',
output_names=None, label_names=None, average="macro"):
self.average = average
self.metrics = _BinaryClassificationMetrics()
EvalMetric.__init__(self, name=name,
output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
self.metrics.update_binary_stats(label, pred)
if self.average == "macro":
self.sum_metric += self.metrics.fscore
self.num_inst += 1
self.metrics.reset_stats()
else:
self.sum_metric = self.metrics.fscore * self.metrics.total_examples
self.num_inst = self.metrics.total_examples
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.sum_metric = 0.
self.num_inst = 0.
self.metrics.reset_stats()
@register
class Perplexity(EvalMetric):
"""Computes perplexity.
Perplexity is a measurement of how well a probability distribution
or model predicts a sample. A low perplexity indicates the model
is good at predicting the sample.
The perplexity of a model q is defined as
.. math::
b^{\\big(-\\frac{1}{N} \\sum_{i=1}^N \\log_b q(x_i) \\big)}
= \\exp \\big(-\\frac{1}{N} \\sum_{i=1}^N \\log q(x_i)\\big)
where we let `b = e`.
:math:`q(x_i)` is the predicted value of its ground truth
label on sample :math:`x_i`.
For example, we have three samples :math:`x_1, x_2, x_3` and their labels
are :math:`[0, 1, 1]`.
Suppose our model predicts :math:`q(x_1) = p(y_1 = 0 | x_1) = 0.3`
and :math:`q(x_2) = 1.0`,
:math:`q(x_3) = 0.6`. The perplexity of model q is
:math:`exp\\big(-(\\log 0.3 + \\log 1.0 + \\log 0.6) / 3\\big) = 1.77109762852`.
Parameters
----------
ignore_label : int or None
Index of invalid label to ignore when
counting. By default, sets to -1.
If set to `None`, it will include all entries.
axis : int (default -1)
The axis from prediction that was used to
compute softmax. By default use the last
axis.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> perp = mx.metric.Perplexity(ignore_label=None)
>>> perp.update(labels, predicts)
>>> print perp.get()
('Perplexity', 1.7710976285155853)
"""
def __init__(self, ignore_label, axis=-1, name='perplexity',
output_names=None, label_names=None):
super(Perplexity, self).__init__(
name, ignore_label=ignore_label,
output_names=output_names, label_names=label_names)
self.ignore_label = ignore_label
self.axis = axis
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
assert len(labels) == len(preds)
loss = 0.
num = 0
for label, pred in zip(labels, preds):
assert label.size == pred.size/pred.shape[-1], \
"shape mismatch: %s vs. %s"%(label.shape, pred.shape)
label = label.as_in_context(pred.context).reshape((label.size,))
pred = ndarray.pick(pred, label.astype(dtype='int32'), axis=self.axis)
if self.ignore_label is not None:
ignore = (label == self.ignore_label).astype(pred.dtype)
num -= ndarray.sum(ignore).asscalar()
pred = pred*(1-ignore) + ignore
loss -= ndarray.sum(ndarray.log(ndarray.maximum(1e-10, pred))).asscalar()
num += pred.size
self.sum_metric += loss
self.num_inst += num
def get(self):
"""Returns the current evaluation result.
Returns
-------
Tuple of (str, float)
Representing name of the metric and evaluation result.
"""
return (self.name, math.exp(self.sum_metric/self.num_inst))
####################
# REGRESSION METRICS
####################
@register
class MAE(EvalMetric):
"""Computes Mean Absolute Error (MAE) loss.
The mean absolute error is given by
.. math::
\\frac{\\sum_i^n |y_i - \\hat{y}_i|}{n}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> mean_absolute_error = mx.metric.MAE()
>>> mean_absolute_error.update(labels = labels, preds = predicts)
>>> print mean_absolute_error.get()
('mae', 0.5)
"""
def __init__(self, name='mae',
output_names=None, label_names=None):
super(MAE, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
if len(label.shape) == 1:
label = label.reshape(label.shape[0], 1)
if len(pred.shape) == 1:
pred = pred.reshape(pred.shape[0], 1)
self.sum_metric += numpy.abs(label - pred).mean()
self.num_inst += 1 # numpy.prod(label.shape)
@register
class MSE(EvalMetric):
"""Computes Mean Squared Error (MSE) loss.
The mean squared error is given by
.. math::
\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> mean_squared_error = mx.metric.MSE()
>>> mean_squared_error.update(labels = labels, preds = predicts)
>>> print mean_squared_error.get()
('mse', 0.375)
"""
def __init__(self, name='mse',
output_names=None, label_names=None):
super(MSE, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
if len(label.shape) == 1:
label = label.reshape(label.shape[0], 1)
if len(pred.shape) == 1:
pred = pred.reshape(pred.shape[0], 1)
self.sum_metric += ((label - pred)**2.0).mean()
self.num_inst += 1 # numpy.prod(label.shape)
@register
class RMSE(EvalMetric):
"""Computes Root Mean Squred Error (RMSE) loss.
The root mean squared error is given by
.. math::
\\sqrt{\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> root_mean_squared_error = mx.metric.RMSE()
>>> root_mean_squared_error.update(labels = labels, preds = predicts)
>>> print root_mean_squared_error.get()
('rmse', 0.612372457981)
"""
def __init__(self, name='rmse',
output_names=None, label_names=None):
super(RMSE, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
if len(label.shape) == 1:
label = label.reshape(label.shape[0], 1)
if len(pred.shape) == 1:
pred = pred.reshape(pred.shape[0], 1)
self.sum_metric += numpy.sqrt(((label - pred)**2.0).mean())
self.num_inst += 1
@register
@alias('ce')
class CrossEntropy(EvalMetric):
"""Computes Cross Entropy loss.
The cross entropy over a batch of sample size :math:`N` is given by
.. math::
-\\sum_{n=1}^{N}\\sum_{k=1}^{K}t_{nk}\\log (y_{nk}),
where :math:`t_{nk}=1` if and only if sample :math:`n` belongs to class :math:`k`.
:math:`y_{nk}` denotes the probability of sample :math:`n` belonging to
class :math:`k`.
Parameters
----------
eps : float
Cross Entropy loss is undefined for predicted value is 0 or 1,
so predicted values are added with the small constant.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> ce = mx.metric.CrossEntropy()
>>> ce.update(labels, predicts)
>>> print ce.get()
('cross-entropy', 0.57159948348999023)
"""
def __init__(self, eps=1e-12, name='cross-entropy',
output_names=None, label_names=None):
super(CrossEntropy, self).__init__(
name, eps=eps,
output_names=output_names, label_names=label_names)
self.eps = eps
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
label = label.ravel()
assert label.shape[0] == pred.shape[0]
prob = pred[numpy.arange(label.shape[0]), numpy.int64(label)]
self.sum_metric += (-numpy.log(prob + self.eps)).sum()
self.num_inst += label.shape[0]
@register
@alias('nll_loss')
class NegativeLogLikelihood(EvalMetric):
"""Computes the negative log-likelihood loss.
The negative log-likelihoodd loss over a batch of sample size :math:`N` is given by
.. math::
-\\sum_{n=1}^{N}\\sum_{k=1}^{K}t_{nk}\\log (y_{nk}),
where :math:`K` is the number of classes, :math:`y_{nk}` is the prediceted probability for
:math:`k`-th class for :math:`n`-th sample. :math:`t_{nk}=1` if and only if sample
:math:`n` belongs to class :math:`k`.
Parameters
----------
eps : float
Negative log-likelihood loss is undefined for predicted value is 0,
so predicted values are added with the small constant.
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> nll_loss = mx.metric.NegativeLogLikelihood()
>>> nll_loss.update(labels, predicts)
>>> print nll_loss.get()
('nll-loss', 0.57159948348999023)
"""
def __init__(self, eps=1e-12, name='nll-loss',
output_names=None, label_names=None):
super(NegativeLogLikelihood, self).__init__(
name, eps=eps,
output_names=output_names, label_names=label_names)
self.eps = eps
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
label = label.ravel()
num_examples = pred.shape[0]
assert label.shape[0] == num_examples, (label.shape[0], num_examples)
prob = pred[numpy.arange(num_examples, dtype=numpy.int64), numpy.int64(label)]
self.sum_metric += (-numpy.log(prob + self.eps)).sum()
self.num_inst += num_examples
@register
@alias('pearsonr')
class PearsonCorrelation(EvalMetric):
"""Computes Pearson correlation.
The pearson correlation is given by
.. math::
\\frac{cov(y, \\hat{y})}{\\sigma{y}\\sigma{\\hat{y}}}
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([[1, 0], [0, 1], [0, 1]])]
>>> pr = mx.metric.PearsonCorrelation()
>>> pr.update(labels, predicts)
>>> print pr.get()
('pearson-correlation', 0.42163704544016178)
"""
def __init__(self, name='pearsonr',
output_names=None, label_names=None):
super(PearsonCorrelation, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
check_label_shapes(label, pred, False, True)
label = label.asnumpy()
pred = pred.asnumpy()
self.sum_metric += numpy.corrcoef(pred.ravel(), label.ravel())[0, 1]
self.num_inst += 1
@register
class Loss(EvalMetric):
"""Dummy metric for directly printing loss.
Parameters
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self, name='loss',
output_names=None, label_names=None):
super(Loss, self).__init__(
name, output_names=output_names, label_names=label_names)
def update(self, _, preds):
for pred in preds:
self.sum_metric += ndarray.sum(pred).asscalar()
self.num_inst += pred.size
@register
class Torch(Loss):
"""Dummy metric for torch criterions."""
def __init__(self, name='torch',
output_names=None, label_names=None):
super(Torch, self).__init__(
name, output_names=output_names, label_names=label_names)
@register
class Caffe(Loss):
"""Dummy metric for caffe criterions."""
def __init__(self, name='caffe',
output_names=None, label_names=None):
super(Caffe, self).__init__(
name, output_names=output_names, label_names=label_names)
@register
class CustomMetric(EvalMetric):
"""Computes a customized evaluation metric.
The `feval` function can return a `tuple` of (sum_metric, num_inst) or return
an `int` sum_metric.
Parameters
----------
feval : callable(label, pred)
Customized evaluation function.
name : str, optional
The name of the metric. (the default is None).
allow_extra_outputs : bool, optional
If true, the prediction outputs can have extra outputs.
This is useful in RNN, where the states are also produced
in outputs for forwarding. (the default is False).
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> feval = lambda x, y : (x + y).mean()
>>> eval_metrics = mx.metric.CustomMetric(feval=feval)
>>> eval_metrics.update(labels, predicts)
>>> print eval_metrics.get()
('custom(<lambda>)', 6.0)
"""
def __init__(self, feval, name=None, allow_extra_outputs=False,
output_names=None, label_names=None):
if name is None:
name = feval.__name__
if name.find('<') != -1:
name = 'custom(%s)' % name
super(CustomMetric, self).__init__(
name, feval=feval,
allow_extra_outputs=allow_extra_outputs,
output_names=output_names, label_names=label_names)
self._feval = feval
self._allow_extra_outputs = allow_extra_outputs
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
if not self._allow_extra_outputs:
labels, preds = check_label_shapes(labels, preds, True)
for pred, label in zip(preds, labels):
label = label.asnumpy()
pred = pred.asnumpy()
reval = self._feval(label, pred)
if isinstance(reval, tuple):
(sum_metric, num_inst) = reval
self.sum_metric += sum_metric
self.num_inst += num_inst
else:
self.sum_metric += reval
self.num_inst += 1
def get_config(self):
raise NotImplementedError("CustomMetric cannot be serialized")
# pylint: disable=invalid-name
def np(numpy_feval, name=None, allow_extra_outputs=False):
"""Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating point number.
name : str, optional
Name of the custom metric.
allow_extra_outputs : bool, optional
Whether prediction output is allowed to have extra outputs. This is useful in cases
like RNN where states are also part of output which can then be fed back to the RNN
in the next step. By default, extra outputs are not allowed.
Returns
-------
float
Custom metric corresponding to the provided labels and predictions.
Example
-------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label-pred))
...
>>> metric = mx.metric.np(custom_metric)
"""
def feval(label, pred):
"""Internal eval function."""
return numpy_feval(label, pred)
feval.__name__ = numpy_feval.__name__
return CustomMetric(feval, name, allow_extra_outputs)
# pylint: enable=invalid-name
| TuSimple/mxnet | python/mxnet/metric.py | Python | apache-2.0 | 43,258 |
import re
import sys
class URI():
def __init__( self, root_path ):
super().__init__()
if root_path[-1] != '/' or root_path[0] != '/':
raise ValueError( 'root_path must start and end with "/"' )
self.root_path = root_path
self.uri_regex = re.compile( r'^({0}|/)(([a-zA-Z0-9\-_.!~*<>]+/)*)([a-zA-Z0-9\-_.!~*<>]+)?(:([a-zA-Z0-9\-_.!~*\'<>]*:)*)?(\([a-zA-Z0-9\-_.!~*<>]+\))?$'.format( self.root_path ) )
def split( self, uri, root_optional=False ):
uri_match = self.uri_regex.match( uri )
if not uri_match:
raise ValueError( 'Unable to parse URI "{0}"'.format( uri ) )
( root, namespace, _, model, rec_id, _, action ) = uri_match.groups()
if root != self.root_path and not root_optional:
raise ValueError( 'URI does not start in the root_path' )
if namespace != '':
namespace_list = namespace.rstrip( '/' ).split( '/' )
else:
namespace_list = []
if rec_id is not None:
id_list = rec_id.strip( ':' ).split( ':' )
multi = len( id_list ) > 1
else:
id_list = None # id_list = [] is an empty list of ids, where None means the list is not even present
multi = False
if action is not None:
action = action[ 1:-1 ]
return ( namespace_list, model, action, id_list, multi )
def build( self, namespace=None, model=None, action=None, id_list=None, in_root=True ):
"""
build a uri, NOTE: if model is None, id_list and action are skiped
"""
if in_root:
result = self.root_path
else:
result = '/'
if namespace is not None:
if not isinstance( namespace, list ):
namespace = [ namespace ]
if len( namespace ) > 0:
result = '{0}{1}/'.format( result, '/'.join( namespace ) )
if model is None:
return result
result = '{0}{1}'.format( result, model )
if id_list is not None and id_list != []:
if not isinstance( id_list, list ):
id_list = [ id_list ]
result = '{0}:{1}:'.format( result, ':'.join( id_list ) )
if action is not None:
result = '{0}({1})'.format( result, action )
return result
def extractIds( self, uri_list ): # TODO: should we make sure the namespace/model do not change in the list?
"""
extract the record IDs from the URI's in uri_list, can handle some/all/none
of the URIs having multiple IDs in them allready, does not force uniqunes
order should remain intact
"""
if isinstance( uri_list, str ):
uri_list = [ uri_list ]
if not isinstance( uri_list, list ):
raise ValueError( 'uri_list must be string or list of strings' )
result = []
for uri in uri_list:
uri_match = self.uri_regex.match( uri )
if not uri_match:
raise ValueError( 'Unable to parse URI "{0}"'.format( uri ) )
( _, _, _, _, rec_id, _, _ ) = uri_match.groups()
if rec_id is None:
continue
result += rec_id.strip( ':' ).split( ':' )
return result
def uriListToMultiURI( self, uri_list ):
"""
runs extract Ids on the list, then takes the first uri and applies all
the ids to it
"""
if not uri_list:
return []
id_list = self.extractIds( uri_list )
if not id_list:
return []
( namespace_list, model, action, _, _ ) = self.split( uri_list[0] )
return self.build( namespace_list, model, action, id_list, True )
# barrowed from https://www.python.org/dev/peps/pep-0257/
def doccstring_prep( docstring ):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[ 1: ]:
stripped = line.lstrip()
if stripped:
indent = min( indent, len( line ) - len( stripped ) )
# Remove indentation (first line is special):
trimmed = [ lines[0].strip() ]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append( line[ indent: ].rstrip() )
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop( 0 )
# Return a single string:
return '\n'.join( trimmed )
| cinp/python | cinp/common.py | Python | apache-2.0 | 4,279 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import bz2
import gzip
import os
import random
import shutil
import sys
from helpers import unittest
import mock
import luigi.format
from luigi import LocalTarget
from luigi.local_target import LocalFileSystem
from luigi.target import FileAlreadyExists, MissingParentDirectory
from target_test import FileSystemTargetTestMixin
import itertools
import io
from errno import EEXIST, EXDEV
class LocalTargetTest(unittest.TestCase, FileSystemTargetTestMixin):
PATH_PREFIX = '/tmp/test.txt'
def setUp(self):
self.path = self.PATH_PREFIX + '-' + str(self.id())
self.copy = self.PATH_PREFIX + '-copy-' + str(self.id())
if os.path.exists(self.path):
os.remove(self.path)
if os.path.exists(self.copy):
os.remove(self.copy)
def tearDown(self):
if os.path.exists(self.path):
os.remove(self.path)
if os.path.exists(self.copy):
os.remove(self.copy)
def create_target(self, format=None):
return LocalTarget(self.path, format=format)
def assertCleanUp(self, tmp_path=''):
self.assertFalse(os.path.exists(tmp_path))
def test_exists(self):
t = self.create_target()
p = t.open('w')
self.assertEqual(t.exists(), os.path.exists(self.path))
p.close()
self.assertEqual(t.exists(), os.path.exists(self.path))
@unittest.skipIf(tuple(sys.version_info) < (3, 4), 'only for Python>=3.4')
def test_pathlib(self):
"""Test work with pathlib.Path"""
import pathlib
path = pathlib.Path(self.path)
self.assertFalse(path.exists())
target = LocalTarget(path)
self.assertFalse(target.exists())
with path.open('w') as stream:
stream.write('test me')
self.assertTrue(target.exists())
def test_gzip_with_module(self):
t = LocalTarget(self.path, luigi.format.Gzip)
p = t.open('w')
test_data = b'test'
p.write(test_data)
print(self.path)
self.assertFalse(os.path.exists(self.path))
p.close()
self.assertTrue(os.path.exists(self.path))
# Using gzip module as validation
f = gzip.open(self.path, 'r')
self.assertTrue(test_data == f.read())
f.close()
# Verifying our own gzip reader
f = LocalTarget(self.path, luigi.format.Gzip).open('r')
self.assertTrue(test_data == f.read())
f.close()
def test_bzip2(self):
t = LocalTarget(self.path, luigi.format.Bzip2)
p = t.open('w')
test_data = b'test'
p.write(test_data)
print(self.path)
self.assertFalse(os.path.exists(self.path))
p.close()
self.assertTrue(os.path.exists(self.path))
# Using bzip module as validation
f = bz2.BZ2File(self.path, 'r')
self.assertTrue(test_data == f.read())
f.close()
# Verifying our own bzip2 reader
f = LocalTarget(self.path, luigi.format.Bzip2).open('r')
self.assertTrue(test_data == f.read())
f.close()
def test_copy(self):
t = LocalTarget(self.path)
f = t.open('w')
test_data = 'test'
f.write(test_data)
f.close()
self.assertTrue(os.path.exists(self.path))
self.assertFalse(os.path.exists(self.copy))
t.copy(self.copy)
self.assertTrue(os.path.exists(self.path))
self.assertTrue(os.path.exists(self.copy))
self.assertEqual(t.open('r').read(), LocalTarget(self.copy).open('r').read())
def test_move(self):
t = LocalTarget(self.path)
f = t.open('w')
test_data = 'test'
f.write(test_data)
f.close()
self.assertTrue(os.path.exists(self.path))
self.assertFalse(os.path.exists(self.copy))
t.move(self.copy)
self.assertFalse(os.path.exists(self.path))
self.assertTrue(os.path.exists(self.copy))
def test_move_across_filesystems(self):
t = LocalTarget(self.path)
with t.open('w') as f:
f.write('test_data')
def rename_across_filesystems(src, dst):
err = OSError()
err.errno = EXDEV
raise err
real_rename = os.rename
def mockrename(src, dst):
if '-across-fs' in src:
real_rename(src, dst)
else:
rename_across_filesystems(src, dst)
copy = '%s-across-fs' % self.copy
with mock.patch('os.rename', mockrename):
t.move(copy)
self.assertFalse(os.path.exists(self.path))
self.assertTrue(os.path.exists(copy))
self.assertEqual('test_data', LocalTarget(copy).open('r').read())
def test_format_chain(self):
UTF8WIN = luigi.format.TextFormat(encoding='utf8', newline='\r\n')
t = LocalTarget(self.path, UTF8WIN >> luigi.format.Gzip)
a = u'我é\nçф'
with t.open('w') as f:
f.write(a)
f = gzip.open(self.path, 'rb')
b = f.read()
f.close()
self.assertEqual(b'\xe6\x88\x91\xc3\xa9\r\n\xc3\xa7\xd1\x84', b)
def test_format_chain_reverse(self):
t = LocalTarget(self.path, luigi.format.UTF8 >> luigi.format.Gzip)
f = gzip.open(self.path, 'wb')
f.write(b'\xe6\x88\x91\xc3\xa9\r\n\xc3\xa7\xd1\x84')
f.close()
with t.open('r') as f:
b = f.read()
self.assertEqual(u'我é\nçф', b)
@mock.patch('os.linesep', '\r\n')
def test_format_newline(self):
t = LocalTarget(self.path, luigi.format.SysNewLine)
with t.open('w') as f:
f.write(b'a\rb\nc\r\nd')
with t.open('r') as f:
b = f.read()
with open(self.path, 'rb') as f:
c = f.read()
self.assertEqual(b'a\nb\nc\nd', b)
self.assertEqual(b'a\r\nb\r\nc\r\nd', c)
def theoretical_io_modes(
self,
rwax='rwax',
bt=['', 'b', 't'],
plus=['', '+']):
p = itertools.product(rwax, plus, bt)
return {''.join(c) for c in list(
itertools.chain.from_iterable(
[itertools.permutations(m) for m in p]))}
def valid_io_modes(self, *a, **kw):
modes = set()
t = LocalTarget(is_tmp=True)
t.open('w').close()
for mode in self.theoretical_io_modes(*a, **kw):
try:
io.FileIO(t.path, mode).close()
except ValueError:
pass
except IOError as err:
if err.errno == EEXIST:
modes.add(mode)
else:
raise
else:
modes.add(mode)
return modes
def valid_write_io_modes_for_luigi(self):
return self.valid_io_modes('w', plus=[''])
def valid_read_io_modes_for_luigi(self):
return self.valid_io_modes('r', plus=[''])
def invalid_io_modes_for_luigi(self):
return self.valid_io_modes().difference(
self.valid_write_io_modes_for_luigi(),
self.valid_read_io_modes_for_luigi())
def test_open_modes(self):
t = LocalTarget(is_tmp=True)
print('Valid write mode:', end=' ')
for mode in self.valid_write_io_modes_for_luigi():
print(mode, end=' ')
p = t.open(mode)
p.close()
print()
print('Valid read mode:', end=' ')
for mode in self.valid_read_io_modes_for_luigi():
print(mode, end=' ')
p = t.open(mode)
p.close()
print()
print('Invalid mode:', end=' ')
for mode in self.invalid_io_modes_for_luigi():
print(mode, end=' ')
self.assertRaises(Exception, t.open, mode)
print()
class LocalTargetCreateDirectoriesTest(LocalTargetTest):
path = '/tmp/%s/xyz/test.txt' % random.randint(0, 999999999)
copy = '/tmp/%s/xyz_2/copy.txt' % random.randint(0, 999999999)
class LocalTargetRelativeTest(LocalTargetTest):
# We had a bug that caused relative file paths to fail, adding test for it
path = 'test.txt'
copy = 'copy.txt'
class TmpFileTest(unittest.TestCase):
def test_tmp(self):
t = LocalTarget(is_tmp=True)
self.assertFalse(t.exists())
self.assertFalse(os.path.exists(t.path))
p = t.open('w')
print('test', file=p)
self.assertFalse(t.exists())
self.assertFalse(os.path.exists(t.path))
p.close()
self.assertTrue(t.exists())
self.assertTrue(os.path.exists(t.path))
q = t.open('r')
self.assertEqual(q.readline(), 'test\n')
q.close()
path = t.path
del t # should remove the underlying file
self.assertFalse(os.path.exists(path))
class FileSystemTest(unittest.TestCase):
path = '/tmp/luigi-test-dir'
fs = LocalFileSystem()
def setUp(self):
if os.path.exists(self.path):
shutil.rmtree(self.path)
def tearDown(self):
self.setUp()
def test_copy(self):
src = os.path.join(self.path, 'src.txt')
dest = os.path.join(self.path, 'newdir', 'dest.txt')
LocalTarget(src).open('w').close()
self.fs.copy(src, dest)
self.assertTrue(os.path.exists(src))
self.assertTrue(os.path.exists(dest))
def test_mkdir(self):
testpath = os.path.join(self.path, 'foo/bar')
self.assertRaises(MissingParentDirectory, self.fs.mkdir, testpath, parents=False)
self.fs.mkdir(testpath)
self.assertTrue(os.path.exists(testpath))
self.assertTrue(self.fs.isdir(testpath))
self.assertRaises(FileAlreadyExists, self.fs.mkdir, testpath, raise_if_exists=True)
def test_exists(self):
self.assertFalse(self.fs.exists(self.path))
os.mkdir(self.path)
self.assertTrue(self.fs.exists(self.path))
self.assertTrue(self.fs.isdir(self.path))
def test_listdir(self):
os.mkdir(self.path)
with open(self.path + '/file', 'w'):
pass
self.assertTrue([self.path + '/file'], list(self.fs.listdir(self.path + '/')))
def test_move_to_new_dir(self):
# Regression test for a bug in LocalFileSystem.move
src = os.path.join(self.path, 'src.txt')
dest = os.path.join(self.path, 'newdir', 'dest.txt')
LocalTarget(src).open('w').close()
self.fs.move(src, dest)
self.assertTrue(os.path.exists(dest))
| riga/luigi | test/local_target_test.py | Python | apache-2.0 | 11,097 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions call' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Call(base.Command):
"""Call function synchronously for testing."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='Name of the function to be called.',
type=util.ValidateFunctionNameOrRaise)
parser.add_argument(
'--data', default='',
help='Data passed to the function (JSON string)')
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Function call results (error or result with execution id)
"""
project = properties.VALUES.core.project.Get(required=True)
registry = self.context['registry']
client = self.context['functions_client']
messages = self.context['functions_messages']
function_ref = registry.Parse(
args.name, params={'projectsId': project, 'locationsId': args.region},
collection='cloudfunctions.projects.locations.functions')
return client.projects_locations_functions.Call(
messages.CloudfunctionsProjectsLocationsFunctionsCallRequest(
name=function_ref.RelativeName(),
callFunctionRequest=messages.CallFunctionRequest(data=args.data)))
| KaranToor/MA450 | google-cloud-sdk/lib/surface/functions/call.py | Python | apache-2.0 | 2,127 |
"""The ReCollect Waste integration."""
from __future__ import annotations
from datetime import date, timedelta
from aiorecollect.client import Client, PickupEvent
from aiorecollect.errors import RecollectError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DEFAULT_NAME = "recollect_waste"
DEFAULT_UPDATE_INTERVAL = timedelta(days=1)
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up RainMachine as config entry."""
session = aiohttp_client.async_get_clientsession(hass)
client = Client(
entry.data[CONF_PLACE_ID], entry.data[CONF_SERVICE_ID], session=session
)
async def async_get_pickup_events() -> list[PickupEvent]:
"""Get the next pickup."""
try:
return await client.async_get_pickup_events(
start_date=date.today(), end_date=date.today() + timedelta(weeks=4)
)
except RecollectError as err:
raise UpdateFailed(
f"Error while requesting data from ReCollect: {err}"
) from err
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=f"Place {entry.data[CONF_PLACE_ID]}, Service {entry.data[CONF_SERVICE_ID]}",
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=async_get_pickup_events,
)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle an options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an RainMachine config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| home-assistant/home-assistant | homeassistant/components/recollect_waste/__init__.py | Python | apache-2.0 | 2,425 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2002-2018, Neo4j
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from unittest import TestCase
from cypy.graph import Node, relationship_type, Path
from cypy.encoding import cypher_repr, cypher_escape
KNOWS = relationship_type("KNOWS")
LOVES = relationship_type("LOVES")
HATES = relationship_type("HATES")
KNOWS_FR = relationship_type(u"CONNAÎT")
class CypherEscapeTestCase(TestCase):
def test_can_write_simple_identifier(self):
escaped = cypher_escape("foo")
assert escaped == "foo"
def test_can_write_identifier_with_odd_chars(self):
escaped = cypher_escape("foo bar")
assert escaped == "`foo bar`"
def test_can_write_identifier_containing_back_ticks(self):
escaped = cypher_escape("foo `bar`")
assert escaped == "`foo ``bar```"
def test_cannot_write_empty_identifier(self):
with self.assertRaises(ValueError):
_ = cypher_escape("")
def test_cannot_write_none_identifier(self):
with self.assertRaises(TypeError):
_ = cypher_escape(None)
class CypherNoneRepresentationTestCase(TestCase):
def test_should_encode_none(self):
encoded = cypher_repr(None)
assert encoded == u"null"
class CypherBooleanRepresentationTestCase(TestCase):
def test_should_encode_true(self):
encoded = cypher_repr(True)
assert encoded == u"true"
def test_should_encode_false(self):
encoded = cypher_repr(False)
assert encoded == u"false"
class CypherIntegerRepresentationTestCase(TestCase):
def test_should_encode_zero(self):
encoded = cypher_repr(0)
assert encoded == u"0"
def test_should_encode_positive_integer(self):
encoded = cypher_repr(123)
assert encoded == u"123"
def test_should_encode_negative_integer(self):
encoded = cypher_repr(-123)
assert encoded == u"-123"
class CypherFloatRepresentationTestCase(TestCase):
def test_should_encode_zero(self):
encoded = cypher_repr(0.0)
assert encoded == u"0.0"
def test_should_encode_positive_float(self):
encoded = cypher_repr(123.456)
assert encoded == u"123.456"
def test_should_encode_negative_float(self):
encoded = cypher_repr(-123.456)
assert encoded == u"-123.456"
class CypherStringRepresentationTestCase(TestCase):
def test_should_encode_bytes(self):
encoded = cypher_repr(b"hello, world")
assert encoded == u"'hello, world'"
def test_should_encode_unicode(self):
encoded = cypher_repr(u"hello, world")
assert encoded == u"'hello, world'"
def test_should_encode_bytes_with_escaped_chars(self):
encoded = cypher_repr(b"hello, 'world'", quote=u"'")
assert encoded == u"'hello, \\'world\\''"
def test_should_encode_unicode_with_escaped_chars(self):
encoded = cypher_repr(u"hello, 'world'", quote=u"'")
assert encoded == u"'hello, \\'world\\''"
def test_should_encode_empty_string(self):
encoded = cypher_repr(u"")
assert encoded == u"''"
def test_should_encode_bell(self):
encoded = cypher_repr(u"\a")
assert encoded == u"'\\u0007'"
def test_should_encode_backspace(self):
encoded = cypher_repr(u"\b")
assert encoded == u"'\\b'"
def test_should_encode_form_feed(self):
encoded = cypher_repr(u"\f")
assert encoded == u"'\\f'"
def test_should_encode_new_line(self):
encoded = cypher_repr(u"\n")
assert encoded == u"'\\n'"
def test_should_encode_carriage_return(self):
encoded = cypher_repr(u"\r")
assert encoded == u"'\\r'"
def test_should_encode_horizontal_tab(self):
encoded = cypher_repr(u"\t")
assert encoded == u"'\\t'"
def test_should_encode_double_quote_when_single_quoted(self):
encoded = cypher_repr(u"\"")
assert encoded == u"'\"'"
def test_should_encode_single_quote_when_single_quoted(self):
encoded = cypher_repr(u"'", quote=u"'")
assert encoded == u"'\\''"
def test_should_encode_double_quote_when_double_quoted(self):
encoded = cypher_repr(u"\"", quote=u"\"")
assert encoded == u'"\\""'
def test_should_encode_single_quote_when_double_quoted(self):
encoded = cypher_repr(u"'", quote=u"\"")
assert encoded == u'"\'"'
def test_should_encode_2_byte_extended_character(self):
encoded = cypher_repr(u"\xAB")
assert encoded == u"'\\u00ab'"
def test_should_encode_4_byte_extended_character(self):
encoded = cypher_repr(u"\uABCD")
assert encoded == u"'\\uabcd'"
def test_should_encode_8_byte_extended_character(self):
encoded = cypher_repr(u"\U0010ABCD")
assert encoded == u"'\\U0010abcd'"
def test_should_encode_complex_sequence(self):
encoded = cypher_repr(u"' '' '''")
assert encoded == u"\"' '' '''\""
class CypherListRepresentationTestCase(TestCase):
def test_should_encode_list(self):
encoded = cypher_repr([1, 2.0, u"three"])
assert encoded == u"[1, 2.0, 'three']"
def test_should_encode_empty_list(self):
encoded = cypher_repr([])
assert encoded == u"[]"
class CypherMapRepresentationTestCase(TestCase):
def test_should_encode_map(self):
encoded = cypher_repr(OrderedDict([("one", 1), ("two", 2.0), ("number three", u"three")]))
assert encoded == u"{one: 1, two: 2.0, `number three`: 'three'}"
def test_should_encode_empty_map(self):
encoded = cypher_repr({})
assert encoded == u"{}"
class CypherNodeRepresentationTestCase(TestCase):
def test_should_encode_empty_node(self):
a = Node()
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({})"
def test_should_encode_node_with_property(self):
a = Node(name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({name: 'Alice'})"
def test_should_encode_node_with_label(self):
a = Node("Person")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {})"
def test_should_encode_node_with_label_and_property(self):
a = Node("Person", name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {name: 'Alice'})"
class CypherRelationshipRepresentationTestCase(TestCase):
def test_can_encode_relationship(self):
a = Node(name="Alice")
b = Node(name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def test_can_encode_relationship_with_names(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def test_can_encode_relationship_with_alternative_names(self):
a = Node("Person", nom=u"Aimée")
b = Node("Person", nom=u"Baptiste")
ab = KNOWS_FR(a, b)
encoded = cypher_repr(ab, related_node_template=u"{property.nom}")
self.assertEqual(u"(Aimée)-[:CONNAÎT {}]->(Baptiste)", encoded)
def test_can_encode_relationship_with_properties(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b, since=1999)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {since: 1999}]->(Bob)", encoded)
class CypherPathRepresentationTestCase(TestCase):
def test_can_write_path(self):
alice, bob, carol, dave = Node(name="Alice"), Node(name="Bob"), \
Node(name="Carol"), Node(name="Dave")
ab = LOVES(alice, bob)
cb = HATES(carol, bob)
cd = KNOWS(carol, dave)
path = Path(alice, ab, bob, cb, carol, cd, dave)
encoded = cypher_repr(path, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:LOVES {}]->(Bob)<-[:HATES {}]-(Carol)-[:KNOWS {}]->(Dave)", encoded)
| technige/cypy | test/test_encoding.py | Python | apache-2.0 | 8,939 |
# n peg hanoi tower problem, use bfs instead of dfs, and don't have a full
# analytical solution
import sys
import copy
def solutionWorks(currentSolution, stacksAfterSolution, initialStacks, finalStacks):
for x in range(len(currentSolution)):
i, j = currentSolution[x]
stacksAfterSolution[j].append(stacksAfterSolution[i].pop())
if str(stacksAfterSolution) == str(finalStacks):
return True
else:
return False
def stepLegitimate(stacksAfterSolution, i, j):
if len(stacksAfterSolution[i]) == 0 or \
(len(stacksAfterSolution[j]) > 0 and stacksAfterSolution[i][-1] > stacksAfterSolution[j][-1]):
return False
return True
# DFS cannot work, need to use BFS
def moveDiscs(initialStacks, finalStacks, results):
import collections
solutions = collections.deque()
solutions.append([])
K = len(initialStacks) - 1
while len(solutions) > 0:
currentSolution = copy.deepcopy(solutions.popleft())
if len(currentSolution) > 7:
continue
stacksAfterSolution = copy.deepcopy(initialStacks)
if solutionWorks(currentSolution, stacksAfterSolution, initialStacks, finalStacks):
for x in range(len(currentSolution)):
results.append(list(currentSolution[x]))
return
# add other solutions in queue
for i in range(1, K + 1):
for j in range(1, K + 1):
if j != i and stepLegitimate(stacksAfterSolution, i, j):
currentSolution.append([i, j])
solutions.append(copy.deepcopy(currentSolution))
currentSolution.pop()
if __name__ == '__main__':
# N, K = [int(x) for x in sys.stdin.readline().split()]
N, K = 6, 4
initialStacks = [[] for x in range(K + 1)]
finalStacks = [[] for x in range(K + 1)]
# initial = [int(x) for x in sys.stdin.readline().split()]
# final = [int(x) for x in sys.stdin.readline().split()]
initial = [4, 2, 4, 3, 1, 1]
final = [1, 1, 1, 1, 1, 1]
for i in range(N - 1, -1, -1):
initialStacks[initial[i]].append(i + 1)
for i in range(N - 1, -1, -1):
finalStacks[final[i]].append(i + 1)
print(initialStacks)
print(finalStacks)
results = []
moveDiscs(initialStacks, finalStacks, results)
print(len(results))
for i in range(len(results)):
print(results[i][0], results[i][1])
| baiyubin/python_practice | pegs.py | Python | apache-2.0 | 2,445 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.storage_policies """
import contextlib
import six
import logging
import unittest
import os
import mock
from functools import partial
from six.moves.configparser import ConfigParser
from tempfile import NamedTemporaryFile
from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE
import swift.common.storage_policy
from swift.common.storage_policy import (
StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies,
reload_storage_policies, get_policy_string, split_policy_string,
BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY,
VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache)
from swift.common.ring import RingData
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver
class CapturingHandler(logging.Handler):
def __init__(self):
super(CapturingHandler, self).__init__()
self._records = []
def emit(self, record):
self._records.append(record)
@contextlib.contextmanager
def capture_logging(log_name):
captured = CapturingHandler()
logger = logging.getLogger(log_name)
logger.addHandler(captured)
try:
yield captured._records
finally:
logger.removeHandler(captured)
@BaseStoragePolicy.register('fake')
class FakeStoragePolicy(BaseStoragePolicy):
"""
Test StoragePolicy class - the only user at the moment is
test_validate_policies_type_invalid()
"""
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
super(FakeStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
class TestStoragePolicies(unittest.TestCase):
def _conf(self, conf_str):
conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
if six.PY2:
conf = ConfigParser()
else:
conf = ConfigParser(strict=False)
conf.readfp(six.StringIO(conf_str))
return conf
def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
try:
f(*args, **kwargs)
except exc_class as err:
err_msg = str(err)
self.assertTrue(message in err_msg, 'Error message %r did not '
'have expected substring %r' % (err_msg, message))
else:
self.fail('%r did not raise %s' % (message, exc_class.__name__))
def test_policy_baseclass_instantiate(self):
self.assertRaisesWithMessage(TypeError,
"Can't instantiate BaseStoragePolicy",
BaseStoragePolicy, 1, 'one')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
{'aliases': 'two', 'name': 'two'},
{'aliases': 'one', 'name': 'one'},
{'aliases': 'ten', 'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEqual(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@patch_policies
def test_get_policy_string(self):
self.assertEqual(get_policy_string('something', 0), 'something')
self.assertEqual(get_policy_string('something', None), 'something')
self.assertEqual(get_policy_string('something', ''), 'something')
self.assertEqual(get_policy_string('something', 1),
'something' + '-1')
self.assertRaises(PolicyError, get_policy_string, 'something', 99)
@patch_policies
def test_split_policy_string(self):
expectations = {
'something': ('something', POLICIES[0]),
'something-1': ('something', POLICIES[1]),
'tmp': ('tmp', POLICIES[0]),
'objects': ('objects', POLICIES[0]),
'tmp-1': ('tmp', POLICIES[1]),
'objects-1': ('objects', POLICIES[1]),
'objects-': PolicyError,
'objects-0': PolicyError,
'objects--1': ('objects-', POLICIES[1]),
'objects-+1': PolicyError,
'objects--': PolicyError,
'objects-foo': PolicyError,
'objects--bar': PolicyError,
'objects-+bar': PolicyError,
# questionable, demonstrated as inverse of get_policy_string
'objects+0': ('objects+0', POLICIES[0]),
'': ('', POLICIES[0]),
'0': ('0', POLICIES[0]),
'-1': ('', POLICIES[1]),
}
for policy_string, expected in expectations.items():
if expected == PolicyError:
try:
invalid = split_policy_string(policy_string)
except PolicyError:
continue # good
else:
self.fail('The string %r returned %r '
'instead of raising a PolicyError' %
(policy_string, invalid))
self.assertEqual(expected, split_policy_string(policy_string))
# should be inverse of get_policy_string
self.assertEqual(policy_string, get_policy_string(*expected))
def test_defaults(self):
self.assertGreater(len(POLICIES), 0)
# test class functions
default_policy = POLICIES.default
self.assertTrue(default_policy.is_default)
zero_policy = POLICIES.get_by_index(0)
self.assertTrue(zero_policy.idx == 0)
zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
self.assertTrue(zero_policy_by_name.idx == 0)
def test_storage_policy_repr(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'eleven',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3,
ec_duplication_factor=2)]
policies = StoragePolicyCollection(test_policies)
for policy in policies:
policy_repr = repr(policy)
self.assertTrue(policy.__class__.__name__ in policy_repr)
self.assertTrue('is_default=%s' % policy.is_default in policy_repr)
self.assertTrue('is_deprecated=%s' % policy.is_deprecated in
policy_repr)
self.assertTrue(policy.name in policy_repr)
if policy.policy_type == EC_POLICY:
self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr)
self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr)
self.assertTrue('ec_nparity=%s' %
policy.ec_nparity in policy_repr)
self.assertTrue('ec_segment_size=%s' %
policy.ec_segment_size in policy_repr)
if policy.ec_duplication_factor > 1:
self.assertTrue('ec_duplication_factor=%s' %
policy.ec_duplication_factor in
policy_repr)
collection_repr = repr(policies)
collection_repr_lines = collection_repr.splitlines()
self.assertTrue(
policies.__class__.__name__ in collection_repr_lines[0])
self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
for policy, line in zip(policies, collection_repr_lines[1:-1]):
self.assertTrue(repr(policy) in line)
with patch_policies(policies):
self.assertEqual(repr(POLICIES), collection_repr)
def test_validate_policies_defaults(self):
# 0 explicit default
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
# non-zero explicit default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[2])
self.assertEqual(policies.default.name, 'two')
# multiple defaults
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', True)]
self.assertRaisesWithMessage(
PolicyError, 'Duplicate default', StoragePolicyCollection,
test_policies)
# nothing specified
test_policies = []
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
self.assertEqual(policies.default.name, 'Policy-0')
# no default specified with only policy index 0
test_policies = [StoragePolicy(0, 'zero')]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
# no default specified with multiple policies
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_deprecate_policies(self):
# deprecation specified
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False, is_deprecated=True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
self.assertEqual(len(policies), 3)
# multiple policies requires default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False, is_deprecated=True),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_validate_policies_indexes(self):
# duplicate indexes
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(1, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policy_params(self):
StoragePolicy(0, 'name') # sanity
# bogus indexes
self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')
# non-zero Policy-0
self.assertRaisesWithMessage(PolicyError, 'reserved',
FakeStoragePolicy, 1, 'policy-0')
# deprecate default
self.assertRaisesWithMessage(
PolicyError, 'Deprecated policy can not be default',
FakeStoragePolicy, 1, 'Policy-1', is_default=True,
is_deprecated=True)
# weird names
names = (
'',
'name_foo',
'name\nfoo',
'name foo',
u'name \u062a',
'name \xd8\xaa',
)
for name in names:
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
FakeStoragePolicy, 1, name)
def test_validate_policies_names(self):
# duplicate names
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'zero', False),
StoragePolicy(2, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policies_type_default(self):
# no type specified - make sure the policy is initialized to
# DEFAULT_POLICY_TYPE
test_policy = FakeStoragePolicy(0, 'zero', True)
self.assertEqual(test_policy.policy_type, 'fake')
def test_validate_policies_type_invalid(self):
class BogusStoragePolicy(FakeStoragePolicy):
policy_type = 'bogus'
# unsupported policy type - initialization with FakeStoragePolicy
self.assertRaisesWithMessage(PolicyError, 'Invalid type',
BogusStoragePolicy, 1, 'one')
def test_policies_type_attribute(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.get_by_index(0).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(1).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(2).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(3).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(10).policy_type,
EC_POLICY)
def test_names_are_normalized(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'ZERO', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
policies = StoragePolicyCollection([StoragePolicy(0, 'zEro', True),
StoragePolicy(1, 'One', False)])
pol0 = policies[0]
pol1 = policies[1]
for name in ('zero', 'ZERO', 'zErO', 'ZeRo'):
self.assertEqual(pol0, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'zEro')
for name in ('one', 'ONE', 'oNe', 'OnE'):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
def test_multiple_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False, aliases='zero')]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
# checking correct retrival using other names
test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'),
StoragePolicy(1, 'one', False, aliases='uno, tahi'),
StoragePolicy(2, 'two', False, aliases='dos, rua')]
policies = StoragePolicyCollection(test_policies)
for name in ('zero', 'cero', 'kore'):
self.assertEqual(policies.get_by_name(name), test_policies[0])
for name in ('two', 'dos', 'rua'):
self.assertEqual(policies.get_by_name(name), test_policies[2])
# Testing parsing of conf files/text
good_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, tahi
default = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.get_by_name('one'),
policies[0])
self.assertEqual(policies.get_by_name('one'),
policies.get_by_name('tahi'))
name_repeat_conf = self._conf("""
[storage-policy:0]
name = one
aliases = one
default = yes
""")
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
policies = parse_storage_policies(name_repeat_conf)
extra_commas_conf = self._conf("""
[storage-policy:0]
name = one
aliases = ,,one, ,
default = yes
""")
# Extra blank entries should be silently dropped
policies = parse_storage_policies(extra_commas_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, uno
default = yes
""")
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_conf)
def test_multiple_names_EC(self):
# checking duplicate names on insert
test_policies_ec = [
ECStoragePolicy(
0, 'ec8-2',
aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(
1, 'ec10-4',
aliases='ec8-2',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10))]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies_ec)
# checking correct retrival using other names
good_test_policies_EC = [
ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10),
is_default=True),
ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=14)),
ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
object_ring=FakeRing(replicas=6)),
ECStoragePolicy(3, 'ec4-2-dup', aliases='uzuki, rin',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2,
object_ring=FakeRing(replicas=12)),
]
ec_policies = StoragePolicyCollection(good_test_policies_EC)
for name in ('ec8-2', 'zeus', 'jupiter'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
for name in ('ec10-4', 'athena', 'minerva'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])
for name in ('ec4-2', 'poseidon', 'neptune'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[2])
for name in ('ec4-2-dup', 'uzuki', 'rin'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[3])
# Testing parsing of conf files/text
good_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, jupiter
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
[storage-policy:1]
name = ec10-4
aliases = poseidon, neptune
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
[storage-policy:2]
name = ec4-2-dup
aliases = uzuki, rin
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 4
ec_num_parity_fragments = 2
ec_duplication_factor = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
ec_policies = parse_storage_policies(good_ec_conf)
self.assertEqual(ec_policies.get_by_name('ec8-2'),
ec_policies[0])
self.assertEqual(ec_policies.get_by_name('ec10-4'),
ec_policies.get_by_name('poseidon'))
self.assertEqual(ec_policies.get_by_name('ec4-2-dup'),
ec_policies.get_by_name('uzuki'))
name_repeat_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = ec8-2
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
ec_policies = parse_storage_policies(name_repeat_ec_conf)
bad_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, zeus
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_ec_conf)
def test_add_remove_names(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
# add names
policies.add_policy_alias(1, 'tahi')
self.assertEqual(policies.get_by_name('tahi'), test_policies[1])
policies.add_policy_alias(2, 'rua', 'dos')
self.assertEqual(policies.get_by_name('rua'), test_policies[2])
self.assertEqual(policies.get_by_name('dos'), test_policies[2])
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, 'double\n')
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, '')
# try to add existing name
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 2, 'two')
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 1, 'two')
# remove name
policies.remove_policy_alias('tahi')
self.assertIsNone(policies.get_by_name('tahi'))
# remove only name
self.assertRaisesWithMessage(PolicyError,
'Policies must have at least one name.',
policies.remove_policy_alias, 'zero')
# remove non-existent name
self.assertRaisesWithMessage(PolicyError,
'No policy with name',
policies.remove_policy_alias, 'three')
# remove default name
policies.remove_policy_alias('two')
self.assertIsNone(policies.get_by_name('two'))
self.assertEqual(policies.get_by_index(2).name, 'rua')
# change default name to a new name
policies.change_policy_primary_name(2, 'two')
self.assertEqual(policies.get_by_name('two'), test_policies[2])
self.assertEqual(policies.get_by_index(2).name, 'two')
# change default name to an existing alias
policies.change_policy_primary_name(2, 'dos')
self.assertEqual(policies.get_by_index(2).name, 'dos')
# change default name to a bad new name
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.change_policy_primary_name,
2, 'bad\nname')
# change default name to a name belonging to another policy
self.assertRaisesWithMessage(PolicyError,
'Other policy',
policies.change_policy_primary_name,
1, 'dos')
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
deprecated = yes
default = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Deprecated policy can not be default",
parse_storage_policies, bad_conf)
def test_multiple_policies_with_no_policy_index_zero(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
default = yes
""")
# Policy-0 will not be implicitly added if other policies are defined
self.assertRaisesWithMessage(
PolicyError, "must specify a storage policy section "
"for policy index 0", parse_storage_policies, bad_conf)
@mock.patch.object(swift.common.storage_policy, 'VALID_EC_TYPES',
['isa_l_rs_vand', 'isa_l_rs_cauchy'])
@mock.patch('swift.common.storage_policy.ECDriver')
def test_known_bad_ec_config(self, mock_driver):
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records, \
self.assertRaises(PolicyError) as exc_mgr:
parse_storage_policies(bad_conf)
self.assertEqual(exc_mgr.exception.args[0],
'Storage policy bad-policy uses an EC '
'configuration known to harm data durability. This '
'policy MUST be deprecated.')
mock_driver.assert_not_called()
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
slightly_less_bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
deprecated = true
[storage-policy:1]
name = good-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
default = true
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(slightly_less_bad_conf)
self.assertEqual(2, mock_driver.call_count)
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
def test_no_default(self):
orig_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
default = yes
""")
policies = parse_storage_policies(orig_conf)
self.assertEqual(policies.default, policies[1])
self.assertTrue(policies[0].name, 'Policy-0')
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
deprecated = yes
""")
# multiple polices and no explicit default
self.assertRaisesWithMessage(
PolicyError, "Unable to find default",
parse_storage_policies, bad_conf)
good_conf = self._conf("""
[storage-policy:0]
name = Policy-0
default = yes
[storage-policy:1]
name = one
deprecated = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.default, policies[0])
self.assertTrue(policies[1].is_deprecated, True)
def test_parse_storage_policies(self):
# ValueError when deprecating policy 0
bad_conf = self._conf("""
[storage-policy:0]
name = zero
deprecated = yes
[storage-policy:1]
name = one
deprecated = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Unable to find policy that's not deprecated",
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x:1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = zero
boo = berries
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid option',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name =
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:3]
name = Policy-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = policY-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
[storage-policy:1]
name = ONE
""")
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = good_stuff
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
# policy_type = erasure_coding
# missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# missing ec_type, but other options valid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# ec_type specified, but invalid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
default = yes
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = garbage_alg
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError,
'Wrong ec_type garbage_alg for policy '
'ec10-4, should be one of "%s"' %
(', '.join(VALID_EC_TYPES)),
parse_storage_policies, bad_conf)
# missing and invalid ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
for num_parity in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = %(num_parity)s
""" % {'ec_type': DEFAULT_TEST_EC_TYPE,
'num_parity': num_parity})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
# missing and invalid ec_num_data_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_parity_fragments = 4
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
for num_data in ('-10', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = %(num_data)s
ec_num_parity_fragments = 4
""" % {'num_data': num_data, 'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
# invalid ec_object_segment_size
for segment_size in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_object_segment_size = %(segment_size)s
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""" % {'segment_size': segment_size,
'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_object_segment_size',
parse_storage_policies, bad_conf)
# Additional section added to ensure parser ignores other sections
conf = self._conf("""
[some-other-section]
foo = bar
[storage-policy:0]
name = zero
[storage-policy:5]
name = one
default = yes
[storage-policy:6]
name = duplicate-sections-are-ignored
[storage-policy:6]
name = apple
""")
policies = parse_storage_policies(conf)
self.assertEqual(True, policies.get_by_index(5).is_default)
self.assertEqual(False, policies.get_by_index(0).is_default)
self.assertEqual(False, policies.get_by_index(6).is_default)
self.assertEqual("object", policies.get_by_name("zero").ring_name)
self.assertEqual("object-5", policies.get_by_name("one").ring_name)
self.assertEqual("object-6", policies.get_by_name("apple").ring_name)
self.assertEqual(0, int(policies.get_by_name('zero')))
self.assertEqual(5, int(policies.get_by_name('one')))
self.assertEqual(6, int(policies.get_by_name('apple')))
self.assertEqual("zero", policies.get_by_index(0).name)
self.assertEqual("zero", policies.get_by_index("0").name)
self.assertEqual("one", policies.get_by_index(5).name)
self.assertEqual("apple", policies.get_by_index(6).name)
self.assertEqual("zero", policies.get_by_index(None).name)
self.assertEqual("zero", policies.get_by_index('').name)
self.assertEqual(policies.get_by_index(0), policies.legacy)
def test_reload_invalid_storage_policies(self):
conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:00]
name = double-zero
""")
with NamedTemporaryFile(mode='w+t') as f:
conf.write(f)
f.flush()
with mock.patch('swift.common.utils.SWIFT_CONF_FILE',
new=f.name):
try:
reload_storage_policies()
except SystemExit as e:
err_msg = str(e)
else:
self.fail('SystemExit not raised')
parts = [
'Invalid Storage Policy Configuration',
'Duplicate index',
]
for expected in parts:
self.assertTrue(
expected in err_msg, '%s was not in %s' % (expected,
err_msg))
def test_storage_policy_ordering(self):
test_policies = StoragePolicyCollection([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(503, 'error'),
StoragePolicy(204, 'empty'),
StoragePolicy(404, 'missing'),
])
self.assertEqual([0, 204, 404, 503], [int(p) for p in
sorted(list(test_policies))])
p503 = test_policies[503]
self.assertTrue(501 < p503 < 507)
def test_get_object_ring(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
policies = StoragePolicyCollection(test_policies)
class NamedFakeRing(FakeRing):
def __init__(self, swift_dir, ring_name=None):
self.ring_name = ring_name
super(NamedFakeRing, self).__init__()
with mock.patch('swift.common.storage_policy.Ring',
new=NamedFakeRing):
for policy in policies:
self.assertFalse(policy.object_ring)
ring = policies.get_object_ring(int(policy), '/path/not/used')
self.assertEqual(ring.ring_name, policy.ring_name)
self.assertTrue(policy.object_ring)
self.assertTrue(isinstance(policy.object_ring, NamedFakeRing))
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
with mock.patch('swift.common.storage_policy.Ring', new=blow_up):
for policy in policies:
policy.load_ring('/path/not/used')
expected = policies.get_object_ring(int(policy),
'/path/not/used')
self.assertEqual(policy.object_ring, expected)
# bad policy index
self.assertRaises(PolicyError, policies.get_object_ring, 99,
'/path/not/used')
def test_bind_ports_cache(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
my_ips = ['1.2.3.4', '2.3.4.5']
other_ips = ['3.4.5.6', '4.5.6.7']
bind_ip = my_ips[1]
devs_by_ring_name1 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6006},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6007},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6008},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6009}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6006}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6010},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6011},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6012}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6010}, # on our IP and a not-us IP
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6013},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6014},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6015}],
}
devs_by_ring_name2 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6016},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6019}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6016}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6022}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6020},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6025}],
}
ring_files = [ring_name + '.ring.gz'
for ring_name in sorted(devs_by_ring_name1)]
def _fake_load(gz_path, stub_objs, metadata_only=False):
return RingData(
devs=stub_objs[os.path.basename(gz_path)[:-8]],
replica2part2dev_id=[],
part_shift=24)
with mock.patch(
'swift.common.storage_policy.RingData.load'
) as mock_ld, \
patch_policies(test_policies), \
mock.patch('swift.common.storage_policy.whataremyips') \
as mock_whataremyips, \
temptree(ring_files) as tempdir:
mock_whataremyips.return_value = my_ips
cache = BindPortsCache(tempdir, bind_ip)
self.assertEqual([
mock.call(bind_ip),
], mock_whataremyips.mock_calls)
mock_whataremyips.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name1)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name2)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# but when all the file mtimes are made different, it'll
# reload
for gz_file in [os.path.join(tempdir, n)
for n in ring_files]:
os.utime(gz_file, (88, 88))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
# Don't do something stupid like crash if a ring file is missing.
os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# whataremyips() is only called in the constructor
self.assertEqual([], mock_whataremyips.mock_calls)
def test_singleton_passthrough(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
with patch_policies(test_policies):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
def test_quorum_size_replication(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
for n, expected in expected_sizes.items():
policy = StoragePolicy(0, 'zero',
object_ring=FakeRing(replicas=n))
self.assertEqual(policy.quorum, expected)
def test_quorum_size_erasure_coding(self):
test_ec_policies = [
ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2),
ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
ec_ndata=10, ec_nparity=6),
ECStoragePolicy(12, 'ec4-2-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2, ec_duplication_factor=2),
]
for ec_policy in test_ec_policies:
k = ec_policy.ec_ndata
expected_size = (
(k + ec_policy.pyeclib_driver.min_parity_fragments_needed())
* ec_policy.ec_duplication_factor
)
self.assertEqual(expected_size, ec_policy.quorum)
def test_validate_ring(self):
test_policies = [
ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
is_default=True),
ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2),
ECStoragePolicy(3, 'ec4-2-2dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2)
]
policies = StoragePolicyCollection(test_policies)
class MockRingData(object):
def __init__(self, num_replica):
self.replica_count = num_replica
def do_test(actual_load_ring_replicas):
for policy, ring_replicas in zip(policies,
actual_load_ring_replicas):
with mock.patch('swift.common.ring.ring.RingData.load',
return_value=MockRingData(ring_replicas)):
necessary_replica_num = (policy.ec_n_unique_fragments *
policy.ec_duplication_factor)
with mock.patch(
'swift.common.ring.ring.validate_configuration'):
msg = 'EC ring for policy %s needs to be configured ' \
'with exactly %d replicas.' % \
(policy.name, necessary_replica_num)
self.assertRaisesWithMessage(RingLoadError, msg,
policy.load_ring, 'mock')
# first, do somethign completely different
do_test([8, 10, 7, 11])
# then again, closer to true, but fractional
do_test([9.9, 14.1, 5.99999, 12.000000001])
def test_storage_policy_get_info(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one', is_deprecated=True,
aliases='tahi, uno'),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'done', is_deprecated=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
expected = {
# default replication
(0, True): {
'name': 'zero',
'aliases': 'zero',
'default': True,
'deprecated': False,
'policy_type': REPL_POLICY
},
(0, False): {
'name': 'zero',
'aliases': 'zero',
'default': True,
},
# deprecated replication
(1, True): {
'name': 'one',
'aliases': 'one, tahi, uno',
'default': False,
'deprecated': True,
'policy_type': REPL_POLICY
},
(1, False): {
'name': 'one',
'aliases': 'one, tahi, uno',
'deprecated': True,
},
# enabled ec
(10, True): {
'name': 'ten',
'aliases': 'ten',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(10, False): {
'name': 'ten',
'aliases': 'ten',
},
# deprecated ec
(11, True): {
'name': 'done',
'aliases': 'done',
'default': False,
'deprecated': True,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(11, False): {
'name': 'done',
'aliases': 'done',
'deprecated': True,
},
# enabled ec with ec_duplication
(12, True): {
'name': 'twelve',
'aliases': 'twelve',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 2,
},
(12, False): {
'name': 'twelve',
'aliases': 'twelve',
},
}
self.maxDiff = None
for policy in policies:
expected_info = expected[(int(policy), True)]
self.assertEqual(policy.get_info(config=True), expected_info)
expected_info = expected[(int(policy), False)]
self.assertEqual(policy.get_info(config=False), expected_info)
def test_ec_fragment_size_cached(self):
policy = ECStoragePolicy(
0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3),
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True)
ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE,
k=2, m=1)
expected_fragment_size = ec_driver.get_segment_info(
DEFAULT_EC_OBJECT_SEGMENT_SIZE,
DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size']
with mock.patch.object(
policy.pyeclib_driver, 'get_segment_info') as fake:
fake.return_value = {
'fragment_size': expected_fragment_size}
for x in range(10):
self.assertEqual(expected_fragment_size,
policy.fragment_size)
# pyeclib_driver.get_segment_info is called only once
self.assertEqual(1, fake.call_count)
if __name__ == '__main__':
unittest.main()
| matthewoliver/swift | test/unit/common/test_storage_policy.py | Python | apache-2.0 | 57,607 |
"""Support for Switchbot bot."""
from __future__ import annotations
import logging
from typing import Any
from switchbot import Switchbot # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.switch import (
DEVICE_CLASS_SWITCH,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_SENSOR_TYPE,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import ATTR_BOT, CONF_RETRY_COUNT, DATA_COORDINATOR, DEFAULT_NAME, DOMAIN
from .coordinator import SwitchbotDataUpdateCoordinator
from .entity import SwitchbotEntity
# Initialize the logger
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: entity_platform.AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Import yaml config and initiates config flow for Switchbot devices."""
# Check if entry config exists and skips import if it does.
if hass.config_entries.async_entries(DOMAIN):
return
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_NAME: config[CONF_NAME],
CONF_PASSWORD: config.get(CONF_PASSWORD, None),
CONF_MAC: config[CONF_MAC].replace("-", ":").lower(),
CONF_SENSOR_TYPE: ATTR_BOT,
},
)
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: entity_platform.AddEntitiesCallback,
) -> None:
"""Set up Switchbot based on a config entry."""
coordinator: SwitchbotDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
async_add_entities(
[
SwitchBotBotEntity(
coordinator,
entry.unique_id,
entry.data[CONF_MAC],
entry.data[CONF_NAME],
coordinator.switchbot_api.Switchbot(
mac=entry.data[CONF_MAC],
password=entry.data.get(CONF_PASSWORD),
retry_count=entry.options[CONF_RETRY_COUNT],
),
)
]
)
class SwitchBotBotEntity(SwitchbotEntity, SwitchEntity, RestoreEntity):
"""Representation of a Switchbot."""
coordinator: SwitchbotDataUpdateCoordinator
_attr_device_class = DEVICE_CLASS_SWITCH
def __init__(
self,
coordinator: SwitchbotDataUpdateCoordinator,
idx: str | None,
mac: str,
name: str,
device: Switchbot,
) -> None:
"""Initialize the Switchbot."""
super().__init__(coordinator, idx, mac, name)
self._attr_unique_id = idx
self._device = device
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if not last_state:
return
self._attr_is_on = last_state.state == STATE_ON
self._last_run_success = last_state.attributes["last_run_success"]
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn device on."""
_LOGGER.info("Turn Switchbot bot on %s", self._mac)
async with self.coordinator.api_lock:
self._last_run_success = bool(
await self.hass.async_add_executor_job(self._device.turn_on)
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn device off."""
_LOGGER.info("Turn Switchbot bot off %s", self._mac)
async with self.coordinator.api_lock:
self._last_run_success = bool(
await self.hass.async_add_executor_job(self._device.turn_off)
)
@property
def assumed_state(self) -> bool:
"""Return true if unable to access real state of entity."""
if not self.data["data"]["switchMode"]:
return True
return False
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self.data["data"]["isOn"]
@property
def extra_state_attributes(self) -> dict:
"""Return the state attributes."""
return {
**super().extra_state_attributes,
"switch_mode": self.data["data"]["switchMode"],
}
| aronsky/home-assistant | homeassistant/components/switchbot/switch.py | Python | apache-2.0 | 5,033 |
"""Support for Zigbee switches."""
import voluptuous as vol
from homeassistant.components.switch import SwitchDevice
from homeassistant.components.zigbee import (
ZigBeeDigitalOut, ZigBeeDigitalOutConfig, PLATFORM_SCHEMA)
DEPENDENCIES = ['zigbee']
CONF_ON_STATE = 'on_state'
DEFAULT_ON_STATE = 'high'
DEPENDENCIES = ['zigbee']
STATES = ['high', 'low']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ON_STATE): vol.In(STATES),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zigbee switch platform."""
add_entities([ZigBeeSwitch(hass, ZigBeeDigitalOutConfig(config))])
class ZigBeeSwitch(ZigBeeDigitalOut, SwitchDevice):
"""Representation of a Zigbee Digital Out device."""
pass
| HydrelioxGitHub/home-assistant | homeassistant/components/zigbee/switch.py | Python | apache-2.0 | 765 |
# -*- coding: utf-8 -*-
# compute the times of action(rec|click|msg) for each user
from math import sqrt
def getActionScore(action):
if action == "rec":
return 0
elif action == "click" :
return 1
else:
return 2
def compute_interaction(data):
interaction = {}
for line in data:
(userA,userB,times,action) = line.split(' ')
action = action[:-1]
key = userB + " " + action
interaction.setdefault(key, 0)
interaction[key] += 1
return interaction
def compute_user_history_interaction(trainFile):
records = []
lineList = []
lineNum = 1
result = []
lineList = [line for line in file(trainFile)]
for line in lineList:
if lineNum == 1: #ignore the title in first line
lineNum += 1
continue
records.append(line)
lineNum += 1
interaction = compute_interaction(records)
out = file('user_interaction.txt', 'w')
for (key, times) in interaction.items():
out.write('%s %d' % (key, times))
out.write('\n')
for (key, times) in interaction.items():
user, action = key.split(' ');
result.append((user, action, times))
return result
#get the weight for each type of action
def get_action_weight(action):
pop = 0;
if action == "rec":
pop = 1
elif action == "click":
pop = 10
elif action == "msg":
pop = 100
return pop;
#trainFile line like: [userA, userB, action_times, action_type(rec|click|msg)]
def compute_user_popularity(trainFile, user_popularity_file):
popDict = {}
rankedscores = []
result = []
print "-----compute_user_history_interaction ... "
interaction = compute_user_history_interaction(trainFile)
print "-----compute_user_popularity ... "
for (user, action, times) in interaction[0:len(interaction)]:
popDict.setdefault(user, 0)
popDict[user] += get_action_weight(action) * times
ranked_popularity = [(popularity, user) for (user, popularity) in popDict.items()]
ranked_popularity.sort()
ranked_popularity.reverse()
print "-----ranking_user_popularity ... "
result = [(user, popularity) for (popularity, user) in ranked_popularity[0:len(ranked_popularity)]]
print "-----output user_popularity ... "
out = file(user_popularity_file, 'w')
for (user, popularity) in result[0:len(result)]:
out.write('%s %d\n' % (user, popularity))
print "-----Ending ... "
return result | bingtianbaihua/MachineLearning | 世纪佳缘会员推荐之投票加权/compute_user_popularity.py | Python | apache-2.0 | 2,399 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: str) -> str:
# Sample function parameter name in delete_specialist_pool_sample
name = name
return name
| googleapis/python-aiplatform | .sample_configs/param_handlers/delete_specialist_pool_sample.py | Python | apache-2.0 | 714 |
import subprocess
import pytest
from utils import *
@all_available_simulators()
def test_filter(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only the passing test should block the fail')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'some_ut.some_passing_test'], cwd=tmp_path)
assert 'FAILED' not in log.read_text()
print('No explicit filter should cause both tests to run, hence trigger the fail')
subprocess.check_call(['runSVUnit', '-s', simulator], cwd=tmp_path)
assert 'FAILED' in log.read_text()
@all_available_simulators()
def test_filter_wildcards(tmp_path, simulator):
failing_unit_test = tmp_path.joinpath('some_failing_unit_test.sv')
failing_unit_test.write_text('''
module some_failing_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_failing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_test)
`FAIL_IF(1)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
passing_unit_test = tmp_path.joinpath('some_passing_unit_test.sv')
passing_unit_test.write_text('''
module some_passing_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only the passing testcase should block the fail')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'some_passing_ut.*'], cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_test' in log.read_text()
print('Filtering only for the test should cause both tests to run, hence trigger the fail')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', "*.some_test"], cwd=tmp_path)
assert 'FAILED' in log.read_text()
@all_available_simulators()
def test_filter_without_dot(tmp_path, simulator):
dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')
dummy_unit_test.write_text('''
module dummy_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVUNIT_TESTS_END
endmodule
''')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'some_string'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
@all_available_simulators()
def test_filter_with_extra_dot(tmp_path, simulator):
dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')
dummy_unit_test.write_text('''
module dummy_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVUNIT_TESTS_END
endmodule
''')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'a.b.c'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
@all_available_simulators()
def test_filter_with_partial_widlcard(tmp_path, simulator):
dummy_unit_test = tmp_path.joinpath('dummy_unit_test.sv')
dummy_unit_test.write_text('''
module dummy_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_passing_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVUNIT_TESTS_END
endmodule
''')
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'foo*.bar'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', 'foo.bar*'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
subprocess.check_call(['runSVUnit', '-s', simulator, '--filter', '*foo.bar'], cwd=tmp_path)
log = tmp_path.joinpath('run.log')
assert 'fatal' in log.read_text().lower()
@all_available_simulators()
def test_multiple_filter_expressions(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVTEST(some_other_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVTEST(yet_another_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only the passing testcases should block the fail')
subprocess.check_call(
[
'runSVUnit',
'-s', simulator,
'--filter', '*.some_passing_test:*.some_other_passing_test:*.yet_another_passing_test',
],
cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_passing_test' in log.read_text()
assert 'some_other_passing_test' in log.read_text()
assert 'yet_another_passing_test' in log.read_text()
@all_available_simulators()
def test_negative_filter(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_other_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering out the failing tests should block the fail')
subprocess.check_call(
['runSVUnit',
'-s', simulator,
'--filter', '-some_ut.some_failing_test:some_ut.some_other_failing_test',
],
cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_passing_test' in log.read_text()
@all_available_simulators()
def test_positive_and_negative_filter(tmp_path, simulator):
unit_test = tmp_path.joinpath('some_unit_test.sv')
unit_test.write_text('''
module some_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVTEST(some_passing_test)
`FAIL_IF(0)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
other_unit_test = tmp_path.joinpath('some_other_unit_test.sv')
other_unit_test.write_text('''
module some_other_unit_test;
import svunit_pkg::*;
`include "svunit_defines.svh"
string name = "some_other_ut";
svunit_testcase svunit_ut;
function void build();
svunit_ut = new(name);
endfunction
task setup();
svunit_ut.setup();
endtask
task teardown();
svunit_ut.teardown();
endtask
`SVUNIT_TESTS_BEGIN
`SVTEST(some_other_failing_test)
`FAIL_IF(1)
`SVTEST_END
`SVUNIT_TESTS_END
endmodule
''')
log = tmp_path.joinpath('run.log')
print('Filtering only tests from the first unit test'
+ ' and then filtering out the failing test should block the fail')
subprocess.check_call(
['runSVUnit',
'-s', simulator,
'--filter', 'some_ut.*-some_ut.some_failing_test',
],
cwd=tmp_path)
assert 'FAILED' not in log.read_text()
assert 'some_passing_test' in log.read_text()
| svunit/svunit | test/test_run_script.py | Python | apache-2.0 | 9,783 |
# import asyncio
#
# async def compute(x, y):
# print("Compute %s + %s ..." % (x, y))
# await asyncio.sleep(1.0)
# return x + y
#
# async def print_sum(x, y):
# for i in range(10):
# result = await compute(x, y)
# print("%s + %s = %s" % (x, y, result))
#
# loop = asyncio.get_event_loop()
# loop.run_until_complete(print_sum(1,2))
# asyncio.ensure_future(print_sum(1, 2))
# asyncio.ensure_future(print_sum(3, 4))
# asyncio.ensure_future(print_sum(5, 6))
# loop.run_forever()
import asyncio
async def display_date(who, num):
i = 0
while True:
if i > num:
return
print('{}: Before loop {}'.format(who, i))
await asyncio.sleep(1)
i += 1
loop = asyncio.get_event_loop()
asyncio.ensure_future(display_date('AAA', 4))
asyncio.ensure_future(display_date('BBB', 6))
loop.run_forever()
| fs714/concurrency-example | asynchronous/py36/asyncio/async_test.py | Python | apache-2.0 | 868 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from contextlib import contextmanager
import os
import unittest2 as unittest
from pants.fs.fs import expand_path
from pants.util.contextutil import environment_as, pushd, temporary_dir
class ExpandPathTest(unittest.TestCase):
def test_pure_relative(self):
with self.root() as root:
self.assertEquals(os.path.join(root, 'a'), expand_path('a'))
def test_dot_relative(self):
with self.root() as root:
self.assertEquals(os.path.join(root, 'a'), expand_path('./a'))
def test_absolute(self):
self.assertEquals('/tmp/jake/bob', expand_path('/tmp/jake/bob'))
def test_user_expansion(self):
with environment_as(HOME='/tmp/jake'):
self.assertEquals('/tmp/jake/bob', expand_path('~/bob'))
def test_env_var_expansion(self):
with self.root() as root:
with environment_as(A='B', C='D'):
self.assertEquals(os.path.join(root, 'B/D/E'), expand_path('$A/${C}/E'))
@contextmanager
def root(self):
with temporary_dir() as root:
# Avoid OSX issues where tmp dirs are reported as symlinks.
real_root = os.path.realpath(root)
with pushd(real_root):
yield real_root
| square/pants | tests/python/pants_test/fs/test_expand_path.py | Python | apache-2.0 | 1,446 |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import socket
import base64
import time
from threading import Lock
import six
import dns
import dns.exception
import dns.zone
import eventlet
from dns import rdatatype
from oslo_log import log as logging
from oslo_config import cfg
from designate import context
from designate import exceptions
from designate import objects
from designate.i18n import _LE
from designate.i18n import _LI
LOG = logging.getLogger(__name__)
util_opts = [
cfg.IntOpt('xfr_timeout', help="Timeout in seconds for XFR's.", default=10)
]
class DNSMiddleware(object):
"""Base DNS Middleware class with some utility methods"""
def __init__(self, application):
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
def __call__(self, request):
response = self.process_request(request)
if response:
return response
response = self.application(request)
return self.process_response(response)
def _build_error_response(self):
response = dns.message.make_response(
dns.message.make_query('unknown', dns.rdatatype.A))
response.set_rcode(dns.rcode.FORMERR)
return response
class SerializationMiddleware(DNSMiddleware):
"""DNS Middleware to serialize/deserialize DNS Packets"""
def __init__(self, application, tsig_keyring=None):
self.application = application
self.tsig_keyring = tsig_keyring
def __call__(self, request):
# Generate the initial context. This may be updated by other middleware
# as we learn more information about the Request.
ctxt = context.DesignateContext.get_admin_context(all_tenants=True)
try:
message = dns.message.from_wire(request['payload'],
self.tsig_keyring)
if message.had_tsig:
LOG.debug('Request signed with TSIG key: %s', message.keyname)
# Create + Attach the initial "environ" dict. This is similar to
# the environ dict used in typical WSGI middleware.
message.environ = {
'context': ctxt,
'addr': request['addr'],
}
except dns.message.UnknownTSIGKey:
LOG.error(_LE("Unknown TSIG key from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
except dns.tsig.BadSignature:
LOG.error(_LE("Invalid TSIG signature from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
except dns.exception.DNSException:
LOG.error(_LE("Failed to deserialize packet from %(host)s:"
"%(port)d") % {'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
except Exception:
LOG.exception(_LE("Unknown exception deserializing packet "
"from %(host)s %(port)d") %
{'host': request['addr'][0],
'port': request['addr'][1]})
response = self._build_error_response()
else:
# Hand the Deserialized packet onto the Application
for response in self.application(message):
# Serialize and return the response if present
if isinstance(response, dns.message.Message):
yield response.to_wire(max_size=65535)
elif isinstance(response, dns.renderer.Renderer):
yield response.get_wire()
class TsigInfoMiddleware(DNSMiddleware):
"""Middleware which looks up the information available for a TsigKey"""
def __init__(self, application, storage):
super(TsigInfoMiddleware, self).__init__(application)
self.storage = storage
def process_request(self, request):
if not request.had_tsig:
return None
try:
criterion = {'name': request.keyname.to_text(True)}
tsigkey = self.storage.find_tsigkey(
context.get_current(), criterion)
request.environ['tsigkey'] = tsigkey
request.environ['context'].tsigkey_id = tsigkey.id
except exceptions.TsigKeyNotFound:
# This should never happen, as we just validated the key.. Except
# for race conditions..
return self._build_error_response()
return None
class TsigKeyring(object):
"""Implements the DNSPython KeyRing API, backed by the Designate DB"""
def __init__(self, storage):
self.storage = storage
def __getitem__(self, key):
return self.get(key)
def get(self, key, default=None):
try:
criterion = {'name': key.to_text(True)}
tsigkey = self.storage.find_tsigkey(
context.get_current(), criterion)
return base64.decodestring(tsigkey.secret)
except exceptions.TsigKeyNotFound:
return default
class ZoneLock(object):
"""A Lock across all zones that enforces a rate limit on NOTIFYs"""
def __init__(self, delay):
self.lock = Lock()
self.data = {}
self.delay = delay
def acquire(self, zone):
with self.lock:
# If no one holds the lock for the zone, grant it
if zone not in self.data:
self.data[zone] = time.time()
return True
# Otherwise, get the time that it was locked
locktime = self.data[zone]
now = time.time()
period = now - locktime
# If it has been locked for longer than the allowed period
# give the lock to the new requester
if period > self.delay:
self.data[zone] = now
return True
LOG.debug('Lock for %(zone)s can\'t be releaesed for %(period)s'
'seconds' % {'zone': zone,
'period': str(self.delay - period)})
# Don't grant the lock for the zone
return False
def release(self, zone):
# Release the lock
with self.lock:
try:
self.data.pop(zone)
except KeyError:
pass
class LimitNotifyMiddleware(DNSMiddleware):
"""Middleware that rate limits NOTIFYs to the Agent"""
def __init__(self, application):
super(LimitNotifyMiddleware, self).__init__(application)
self.delay = cfg.CONF['service:agent'].notify_delay
self.locker = ZoneLock(self.delay)
def process_request(self, request):
opcode = request.opcode()
if opcode != dns.opcode.NOTIFY:
return None
zone_name = request.question[0].name.to_text()
if self.locker.acquire(zone_name):
time.sleep(self.delay)
self.locker.release(zone_name)
return None
else:
LOG.debug('Threw away NOTIFY for %(zone)s, already '
'working on an update.' % {'zone': zone_name})
response = dns.message.make_response(request)
# Provide an authoritative answer
response.flags |= dns.flags.AA
return (response,)
def from_dnspython_zone(dnspython_zone):
# dnspython never builds a zone with more than one SOA, even if we give
# it a zonefile that contains more than one
soa = dnspython_zone.get_rdataset(dnspython_zone.origin, 'SOA')
if soa is None:
raise exceptions.BadRequest('An SOA record is required')
email = soa[0].rname.to_text().rstrip('.')
email = email.replace('.', '@', 1)
values = {
'name': dnspython_zone.origin.to_text(),
'email': email,
'ttl': soa.ttl,
'serial': soa[0].serial,
'retry': soa[0].retry,
'expire': soa[0].expire
}
zone = objects.Domain(**values)
rrsets = dnspyrecords_to_recordsetlist(dnspython_zone.nodes)
zone.recordsets = rrsets
return zone
def dnspyrecords_to_recordsetlist(dnspython_records):
rrsets = objects.RecordList()
for rname in six.iterkeys(dnspython_records):
for rdataset in dnspython_records[rname]:
rrset = dnspythonrecord_to_recordset(rname, rdataset)
if rrset is None:
continue
rrsets.append(rrset)
return rrsets
def dnspythonrecord_to_recordset(rname, rdataset):
record_type = rdatatype.to_text(rdataset.rdtype)
# Create the other recordsets
values = {
'name': rname.to_text(),
'type': record_type
}
if rdataset.ttl != 0:
values['ttl'] = rdataset.ttl
rrset = objects.RecordSet(**values)
rrset.records = objects.RecordList()
for rdata in rdataset:
rr = objects.Record(data=rdata.to_text())
rrset.records.append(rr)
return rrset
def bind_tcp(host, port, tcp_backlog):
# Bind to the TCP port
LOG.info(_LI('Opening TCP Listening Socket on %(host)s:%(port)d') %
{'host': host, 'port': port})
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# NOTE: Linux supports socket.SO_REUSEPORT only in 3.9 and later releases.
try:
sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
pass
sock_tcp.setblocking(True)
sock_tcp.bind((host, port))
sock_tcp.listen(tcp_backlog)
return sock_tcp
def bind_udp(host, port):
# Bind to the UDP port
LOG.info(_LI('Opening UDP Listening Socket on %(host)s:%(port)d') %
{'host': host, 'port': port})
sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: Linux supports socket.SO_REUSEPORT only in 3.9 and later releases.
try:
sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception:
pass
sock_udp.setblocking(True)
sock_udp.bind((host, port))
return sock_udp
def do_axfr(zone_name, servers, timeout=None, source=None):
"""
Performs an AXFR for a given zone name
"""
random.shuffle(servers)
timeout = timeout or cfg.CONF["service:mdns"].xfr_timeout
xfr = None
for srv in servers:
to = eventlet.Timeout(timeout)
log_info = {'name': zone_name, 'host': srv}
try:
LOG.info(_LI("Doing AXFR for %(name)s from %(host)s") % log_info)
xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
timeout=1, port=srv['port'], source=source)
raw_zone = dns.zone.from_xfr(xfr, relativize=False)
break
except eventlet.Timeout as t:
if t == to:
msg = _LE("AXFR timed out for %(name)s from %(host)s")
LOG.error(msg % log_info)
continue
except dns.exception.FormError:
msg = _LE("Domain %(name)s is not present on %(host)s."
"Trying next server.")
LOG.error(msg % log_info)
except socket.error:
msg = _LE("Connection error when doing AXFR for %(name)s from "
"%(host)s")
LOG.error(msg % log_info)
except Exception:
msg = _LE("Problem doing AXFR %(name)s from %(host)s. "
"Trying next server.")
LOG.exception(msg % log_info)
finally:
to.cancel()
continue
else:
msg = _LE("XFR failed for %(name)s. No servers in %(servers)s was "
"reached.")
raise exceptions.XFRFailure(
msg % {"name": zone_name, "servers": servers})
LOG.debug("AXFR Successful for %s" % raw_zone.origin.to_text())
return raw_zone
| tonyli71/designate | designate/dnsutils.py | Python | apache-2.0 | 13,265 |
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines input readers for MapReduce."""
__all__ = [
"AbstractDatastoreInputReader",
"ALLOW_CHECKPOINT",
"BadReaderParamsError",
"BlobstoreLineInputReader",
"BlobstoreZipInputReader",
"BlobstoreZipLineInputReader",
"COUNTER_IO_READ_BYTES",
"COUNTER_IO_READ_MSEC",
"DatastoreEntityInputReader",
"DatastoreInputReader",
"DatastoreKeyInputReader",
"GoogleCloudStorageInputReader",
"GoogleCloudStorageRecordInputReader",
"RandomStringInputReader",
"RawDatastoreInputReader",
"Error",
"InputReader",
"LogInputReader",
"NamespaceInputReader",
"GoogleCloudStorageLineInputReader",
"GoogleCloudStorageZipInputReader",
"GoogleCloudStorageZipLineInputReader"
]
# pylint: disable=protected-access
import base64
import copy
import logging
import pickle
import random
import string
import StringIO
import time
import zipfile
from google.net.proto import ProtocolBuffer
from google.appengine.ext import ndb
from google.appengine.api import datastore
from google.appengine.api import logservice
from google.appengine.api.logservice import log_service_pb
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import key_range
from google.appengine.ext.db import metadata
from mapreduce import context
from mapreduce import datastore_range_iterators as db_iters
from mapreduce import errors
from mapreduce import json_util
from mapreduce import key_ranges
from mapreduce import kv_pb
from mapreduce import model
from mapreduce import namespace_range
from mapreduce import operation
from mapreduce import property_range
from mapreduce import records
from mapreduce import util
# TODO(user): Cleanup imports if/when cloudstorage becomes part of runtime.
try:
# Check if the full cloudstorage package exists. The stub part is in runtime.
cloudstorage = None
import cloudstorage
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
pass # CloudStorage library not available
# Attempt to load cloudstorage from the bundle (availble in some tests)
if cloudstorage is None:
try:
import cloudstorage
except ImportError:
pass # CloudStorage library really not available
# Classes moved to errors module. Copied here for compatibility.
Error = errors.Error
BadReaderParamsError = errors.BadReaderParamsError
# Counter name for number of bytes read.
COUNTER_IO_READ_BYTES = "io-read-bytes"
# Counter name for milliseconds spent reading data.
COUNTER_IO_READ_MSEC = "io-read-msec"
# Special value that can be yielded by InputReaders if they want to give the
# framework an opportunity to save the state of the mapreduce without having
# to yield an actual value to the handler.
ALLOW_CHECKPOINT = object()
"""
InputReader's lifecycle is the following:
0) validate called to validate mapper specification.
1) split_input splits the input for each shard.
2) __init__ is called for each shard. It takes the input, including ranges,
sent by the split_input.
3) from_json()/to_json() are used to persist writer's state across
multiple slices.
4) __str__ is the string representation of the reader.
5) next is called to send one piece of data to the user defined mapper.
It will continue to return data until it reaches the end of the range
specified in the split_input
"""
class InputReader(json_util.JsonMixin):
"""Abstract base class for input readers.
InputReaders have the following properties:
* They are created by using the split_input method to generate a set of
InputReaders from a MapperSpec.
* They generate inputs to the mapper via the iterator interface.
* After creation, they can be serialized and resumed using the JsonMixin
interface.
* They are cast to string for a user-readable description; it may be
valuable to implement __str__.
"""
# When expand_parameters is False, then value yielded by reader is passed
# to handler as is. If it's true, then *value is passed, expanding arguments
# and letting handler be a multi-parameter function.
expand_parameters = False
# Mapreduce parameters.
_APP_PARAM = "_app"
NAMESPACE_PARAM = "namespace"
NAMESPACES_PARAM = "namespaces" # Obsolete.
def __iter__(self):
return self
def next(self):
"""Returns the next input from this input reader as a key, value pair.
Returns:
The next input from this input reader.
"""
raise NotImplementedError("next() not implemented in %s" % self.__class__)
@classmethod
def from_json(cls, input_shard_state):
"""Creates an instance of the InputReader for the given input shard state.
Args:
input_shard_state: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
raise NotImplementedError("from_json() not implemented in %s" % cls)
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
raise NotImplementedError("to_json() not implemented in %s" %
self.__class__)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers.
This method creates a list of input readers, each for one shard.
It attempts to split inputs among readers evenly.
Args:
mapper_spec: model.MapperSpec specifies the inputs and additional
parameters to define the behavior of input readers.
Returns:
A list of InputReaders. None or [] when no input data can be found.
"""
raise NotImplementedError("split_input() not implemented in %s" % cls)
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Input reader parameters are expected to be passed as "input_reader"
subdictionary in mapper_spec.params.
Pre 1.6.4 API mixes input reader parameters with all other parameters. Thus
to be compatible, input reader check mapper_spec.params as well and
issue a warning if "input_reader" subdicationary is not present.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
def _get_params(mapper_spec, allowed_keys=None, allow_old=True):
"""Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
allow_old: Allow parameters to exist outside of the input_reader
subdictionary for compatability.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
"""
if "input_reader" not in mapper_spec.params:
message = ("Input reader's parameters should be specified in "
"input_reader subdictionary.")
if not allow_old or allowed_keys:
raise errors.BadReaderParamsError(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("input_reader"), dict):
raise errors.BadReaderParamsError(
"Input reader parameters should be a dictionary")
params = mapper_spec.params.get("input_reader")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadReaderParamsError(
"Invalid input_reader parameters: %s" % ",".join(params_diff))
return params
class AbstractDatastoreInputReader(InputReader):
"""Abstract class for datastore input readers."""
# Number of entities to fetch at once while doing scanning.
_BATCH_SIZE = 50
# Maximum number of shards we'll create.
_MAX_SHARD_COUNT = 256
# The maximum number of namespaces that will be sharded by datastore key
# before switching to a strategy where sharding is done lexographically by
# namespace.
MAX_NAMESPACES_FOR_KEY_SHARD = 10
# reader parameters.
ENTITY_KIND_PARAM = "entity_kind"
KEYS_ONLY_PARAM = "keys_only"
BATCH_SIZE_PARAM = "batch_size"
KEY_RANGE_PARAM = "key_range"
FILTERS_PARAM = "filters"
_KEY_RANGE_ITER_CLS = db_iters.AbstractKeyRangeIterator
def __init__(self, iterator):
"""Create new DatastoreInputReader object.
This is internal constructor. Use split_input to create readers instead.
Args:
iterator: an iterator that generates objects for this input reader.
"""
self._iter = iterator
def __iter__(self):
"""Yields whatever internal iterator yields."""
for o in self._iter:
yield o
def __str__(self):
"""Returns the string representation of this InputReader."""
return repr(self._iter)
def to_json(self):
"""Serializes input reader to json compatible format.
Returns:
all the data in json-compatible map.
"""
return self._iter.to_json()
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from json, encoded by to_json.
Args:
json: json representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
return cls(db_iters.RangeIteratorFactory.from_json(json))
@classmethod
def _get_query_spec(cls, mapper_spec):
"""Construct a model.QuerySpec from model.MapperSpec."""
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
filters = params.get(cls.FILTERS_PARAM)
app = params.get(cls._APP_PARAM)
ns = params.get(cls.NAMESPACE_PARAM)
return model.QuerySpec(
entity_kind=cls._get_raw_entity_kind(entity_kind),
keys_only=bool(params.get(cls.KEYS_ONLY_PARAM, False)),
filters=filters,
batch_size=int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)),
model_class_path=entity_kind,
app=app,
ns=ns)
@classmethod
def split_input(cls, mapper_spec):
"""Inherit doc."""
shard_count = mapper_spec.shard_count
query_spec = cls._get_query_spec(mapper_spec)
namespaces = None
if query_spec.ns is not None:
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, [query_spec.ns], shard_count, query_spec)
else:
ns_keys = namespace_range.get_namespace_keys(
query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD + 1)
# No namespace means the app may have some data but those data are not
# visible yet. Just return.
if not ns_keys:
return
# If the number of ns is small, we shard each ns by key and assign each
# shard a piece of a ns.
elif len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD:
namespaces = [ns_key.name() or "" for ns_key in ns_keys]
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, namespaces, shard_count, query_spec)
# When number of ns is large, we can only split lexicographically by ns.
else:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=False,
can_query=lambda: True,
_app=query_spec.app)
k_ranges = [key_ranges.KeyRangesFactory.create_from_ns_range(ns_range)
for ns_range in ns_ranges]
iters = [db_iters.RangeIteratorFactory.create_key_ranges_iterator(
r, query_spec, cls._KEY_RANGE_ITER_CLS) for r in k_ranges]
return [cls(i) for i in iters]
@classmethod
def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):
"""Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
"""
key_ranges_by_ns = []
# Split each ns into n splits. If a ns doesn't have enough scatter to
# split into n, the last few splits are None.
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(
shard_count,
namespace,
query_spec.entity_kind,
app)
# The nth split of each ns will be assigned to the nth shard.
# Shuffle so that None are not all by the end.
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
# KeyRanges from different namespaces might be very different in size.
# Use round robin to make sure each shard can have at most one split
# or a None from a ns.
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for i, k_range in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(
ranges))
return key_ranges_by_shard
@classmethod
def _split_ns_by_scatter(cls,
shard_count,
namespace,
raw_entity_kind,
app):
"""Split a namespace by scatter index into key_range.KeyRange.
TODO(user): Power this with key_range.KeyRange.compute_split_points.
Args:
shard_count: number of shards.
namespace: namespace name to split. str.
raw_entity_kind: low level datastore API entity kind.
app: app id in str.
Returns:
A list of key_range.KeyRange objects. If there are not enough entities to
splits into requested shards, the returned list will contain KeyRanges
ordered lexicographically with any Nones appearing at the end.
"""
if shard_count == 1:
# With one shard we don't need to calculate any split points at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
oversampling_factor = 32
random_keys = ds_query.Get(shard_count * oversampling_factor)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
k_ranges = []
k_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
k_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i + 1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
k_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(k_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
k_ranges += [None] * (shard_count - len(k_ranges))
return k_ranges
@classmethod
def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of datastore.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)]
@classmethod
def validate(cls, mapper_spec):
"""Inherit docs."""
params = _get_params(mapper_spec)
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing input reader parameter 'entity_kind'")
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
try:
bool(params.get(cls.KEYS_ONLY_PARAM, False))
except:
raise BadReaderParamsError("keys_only expects a boolean value but got %s",
params[cls.KEYS_ONLY_PARAM])
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise BadReaderParamsError(
"Expected a single namespace string")
if cls.NAMESPACES_PARAM in params:
raise BadReaderParamsError("Multiple namespaces are no longer supported")
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise BadReaderParamsError("Filter should be a tuple or list: %s", f)
if len(f) != 3:
raise BadReaderParamsError("Filter should be a 3-tuple: %s", f)
prop, op, _ = f
if not isinstance(prop, basestring):
raise BadReaderParamsError("Property should be string: %s", prop)
if not isinstance(op, basestring):
raise BadReaderParamsError("Operator should be string: %s", op)
@classmethod
def _get_raw_entity_kind(cls, entity_kind_or_model_classpath):
"""Returns the entity kind to use with low level datastore calls.
Args:
entity_kind_or_model_classpath: user specified entity kind or model
classpath.
Returns:
the entity kind in str to use with low level datastore calls.
"""
return entity_kind_or_model_classpath
class RawDatastoreInputReader(AbstractDatastoreInputReader):
"""Iterates over an entity kind and yields datastore.Entity."""
_KEY_RANGE_ITER_CLS = db_iters.KeyRangeEntityIterator
@classmethod
def validate(cls, mapper_spec):
"""Inherit docs."""
super(RawDatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
for f in filters:
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f)
class DatastoreInputReader(AbstractDatastoreInputReader):
"""Iterates over a Model and yields model instances.
Supports both db.model and ndb.model.
"""
_KEY_RANGE_ITER_CLS = db_iters.KeyRangeModelIterator
@classmethod
def _get_raw_entity_kind(cls, model_classpath):
entity_type = util.for_name(model_classpath)
if isinstance(entity_type, db.Model):
return entity_type.kind()
elif isinstance(entity_type, (ndb.Model, ndb.MetaModel)):
# pylint: disable=protected-access
return entity_type._get_kind()
else:
return util.get_short_name(model_classpath)
@classmethod
def validate(cls, mapper_spec):
"""Inherit docs."""
super(DatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
# Fail fast if Model cannot be located.
try:
model_class = util.for_name(entity_kind)
except ImportError, e:
raise BadReaderParamsError("Bad entity kind: %s" % e)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if issubclass(model_class, db.Model):
cls._validate_filters(filters, model_class)
else:
cls._validate_filters_ndb(filters, model_class)
property_range.PropertyRange(filters, entity_kind)
@classmethod
def _validate_filters(cls, filters, model_class):
"""Validate user supplied filters.
Validate filters are on existing properties and filter values
have valid semantics.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type is up to the property's type.
model_class: the db.Model class for the entity type to apply filters on.
Raises:
BadReaderParamsError: if any filter is invalid in any way.
"""
if not filters:
return
properties = model_class.properties()
for f in filters:
prop, _, val = f
if prop not in properties:
raise errors.BadReaderParamsError(
"Property %s is not defined for entity type %s",
prop, model_class.kind())
# Validate the value of each filter. We need to know filters have
# valid value to carry out splits.
try:
properties[prop].validate(val)
except db.BadValueError, e:
raise errors.BadReaderParamsError(e)
@classmethod
# pylint: disable=protected-access
def _validate_filters_ndb(cls, filters, model_class):
"""Validate ndb.Model filters."""
if not filters:
return
properties = model_class._properties
for f in filters:
prop, _, val = f
if prop not in properties:
raise errors.BadReaderParamsError(
"Property %s is not defined for entity type %s",
prop, model_class._get_kind())
# Validate the value of each filter. We need to know filters have
# valid value to carry out splits.
try:
properties[prop]._do_validate(val)
except db.BadValueError, e:
raise errors.BadReaderParamsError(e)
@classmethod
def split_input(cls, mapper_spec):
"""Inherit docs."""
shard_count = mapper_spec.shard_count
query_spec = cls._get_query_spec(mapper_spec)
if not property_range.should_shard_by_property_range(query_spec.filters):
return super(DatastoreInputReader, cls).split_input(mapper_spec)
p_range = property_range.PropertyRange(query_spec.filters,
query_spec.model_class_path)
p_ranges = p_range.split(shard_count)
# User specified a namespace.
if query_spec.ns:
ns_range = namespace_range.NamespaceRange(
namespace_start=query_spec.ns,
namespace_end=query_spec.ns,
_app=query_spec.app)
ns_ranges = [copy.copy(ns_range) for _ in p_ranges]
else:
ns_keys = namespace_range.get_namespace_keys(
query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD + 1)
if not ns_keys:
return
# User doesn't specify ns but the number of ns is small.
# We still split by property range.
if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app)
for _ in p_ranges]
# Lots of namespaces. Split by ns.
else:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=False,
can_query=lambda: True,
_app=query_spec.app)
p_ranges = [copy.copy(p_range) for _ in ns_ranges]
assert len(p_ranges) == len(ns_ranges)
iters = [
db_iters.RangeIteratorFactory.create_property_range_iterator(
p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)]
return [cls(i) for i in iters]
class DatastoreKeyInputReader(RawDatastoreInputReader):
"""Iterate over an entity kind and yields datastore.Key."""
_KEY_RANGE_ITER_CLS = db_iters.KeyRangeKeyIterator
# For backward compatibility.
DatastoreEntityInputReader = RawDatastoreInputReader
# TODO(user): Remove this after the only dependency GroomerMarkReader is
class _OldAbstractDatastoreInputReader(InputReader):
"""Abstract base class for classes that iterate over datastore entities.
Concrete subclasses must implement _iter_key_range(self, k_range). See the
docstring for that method for details.
"""
# Number of entities to fetch at once while doing scanning.
_BATCH_SIZE = 50
# Maximum number of shards we'll create.
_MAX_SHARD_COUNT = 256
# __scatter__ oversampling factor
_OVERSAMPLING_FACTOR = 32
# The maximum number of namespaces that will be sharded by datastore key
# before switching to a strategy where sharding is done lexographically by
# namespace.
MAX_NAMESPACES_FOR_KEY_SHARD = 10
# Mapreduce parameters.
ENTITY_KIND_PARAM = "entity_kind"
KEYS_ONLY_PARAM = "keys_only"
BATCH_SIZE_PARAM = "batch_size"
KEY_RANGE_PARAM = "key_range"
NAMESPACE_RANGE_PARAM = "namespace_range"
CURRENT_KEY_RANGE_PARAM = "current_key_range"
FILTERS_PARAM = "filters"
# TODO(user): Add support for arbitrary queries. It's not possible to
# support them without cursors since right now you can't even serialize query
# definition.
# pylint: disable=redefined-outer-name
def __init__(self,
entity_kind,
key_ranges=None,
ns_range=None,
batch_size=_BATCH_SIZE,
current_key_range=None,
filters=None):
"""Create new AbstractDatastoreInputReader object.
This is internal constructor. Use split_query in a concrete class instead.
Args:
entity_kind: entity kind as string.
key_ranges: a sequence of key_range.KeyRange instances to process. Only
one of key_ranges or ns_range can be non-None.
ns_range: a namespace_range.NamespaceRange to process. Only one of
key_ranges or ns_range can be non-None.
batch_size: size of read batch as int.
current_key_range: the current key_range.KeyRange being processed.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
"""
assert key_ranges is not None or ns_range is not None, (
"must specify one of 'key_ranges' or 'ns_range'")
assert key_ranges is None or ns_range is None, (
"can't specify both 'key_ranges ' and 'ns_range'")
self._entity_kind = entity_kind
# Reverse the KeyRanges so they can be processed in order as a stack of
# work items.
self._key_ranges = key_ranges and list(reversed(key_ranges))
self._ns_range = ns_range
self._batch_size = int(batch_size)
self._current_key_range = current_key_range
self._filters = filters
@classmethod
def _get_raw_entity_kind(cls, entity_kind):
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
return entity_kind
def __iter__(self):
"""Iterates over the given KeyRanges or NamespaceRange.
This method iterates over the given KeyRanges or NamespaceRange and sets
the self._current_key_range to the KeyRange currently being processed. It
then delegates to the _iter_key_range method to yield that actual
results.
Yields:
Forwards the objects yielded by the subclasses concrete _iter_key_range()
method. The caller must consume the result yielded because self.to_json()
will not include it.
"""
if self._key_ranges is not None:
for o in self._iter_key_ranges():
yield o
elif self._ns_range is not None:
for o in self._iter_ns_range():
yield o
else:
assert False, "self._key_ranges and self._ns_range are both None"
def _iter_key_ranges(self):
"""Iterates over self._key_ranges, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
if self._key_ranges:
self._current_key_range = self._key_ranges.pop()
# The most recently popped key_range may be None, so continue here
# to find the next keyrange that's valid.
continue
else:
break
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
self._current_key_range = None
def _iter_ns_range(self):
"""Iterates over self._ns_range, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
query = self._ns_range.make_datastore_query()
namespace_result = query.Get(1)
if not namespace_result:
break
namespace = namespace_result[0].name() or ""
self._current_key_range = key_range.KeyRange(
namespace=namespace, _app=self._ns_range.app)
yield ALLOW_CHECKPOINT
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
if (self._ns_range.is_single_namespace or
self._current_key_range.namespace == self._ns_range.namespace_end):
break
self._ns_range = self._ns_range.with_start_after(
self._current_key_range.namespace)
self._current_key_range = None
def _iter_key_range(self, k_range):
"""Yields a db.Key and the value that should be yielded by self.__iter__().
Args:
k_range: The key_range.KeyRange to iterate over.
Yields:
A 2-tuple containing the last db.Key processed and the value that should
be yielded by __iter__. The returned db.Key will be used to determine the
InputReader's current position in self._current_key_range.
"""
raise NotImplementedError("_iter_key_range() not implemented in %s" %
self.__class__)
def __str__(self):
"""Returns the string representation of this InputReader."""
if self._ns_range is None:
return repr(self._key_ranges)
else:
return repr(self._ns_range)
@classmethod
def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of db.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)]
# TODO(user): use query splitting functionality when it becomes available
# instead.
@classmethod
def _split_input_from_namespace(cls, app, namespace, entity_kind,
shard_count):
"""Helper for _split_input_from_params.
If there are not enough Entities to make all of the given shards, the
returned list of KeyRanges will include Nones. The returned list will
contain KeyRanges ordered lexographically with any Nones appearing at the
end.
Args:
app: the app.
namespace: the namespace.
entity_kind: entity kind as string.
shard_count: the number of shards.
Returns:
KeyRange objects.
"""
raw_entity_kind = cls._get_raw_entity_kind(entity_kind)
if shard_count == 1:
# With one shard we don't need to calculate any splitpoints at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
# pylint: disable=redefined-outer-name
key_ranges = []
key_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
key_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i + 1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
key_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(key_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
key_ranges += [None] * (shard_count - len(key_ranges))
return key_ranges
@classmethod
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count):
"""Return input reader objects. Helper for split_input."""
# pylint: disable=redefined-outer-name
key_ranges = [] # KeyRanges for all namespaces
for namespace in namespaces:
key_ranges.extend(
cls._split_input_from_namespace(app,
namespace,
entity_kind_name,
shard_count))
# Divide the KeyRanges into shard_count shards. The KeyRanges for different
# namespaces might be very different in size so the assignment of KeyRanges
# to shards is done round-robin.
shared_ranges = [[] for _ in range(shard_count)]
for i, k_range in enumerate(key_ranges):
shared_ranges[i % shard_count].append(k_range)
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
return [cls(entity_kind_name,
key_ranges=key_ranges,
ns_range=None,
batch_size=batch_size)
for key_ranges in shared_ranges if key_ranges]
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise BadReaderParamsError(
"Expected a single namespace string")
if cls.NAMESPACES_PARAM in params:
raise BadReaderParamsError("Multiple namespaces are no longer supported")
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise BadReaderParamsError("Filter should be a tuple or list: %s", f)
if len(f) != 3:
raise BadReaderParamsError("Filter should be a 3-tuple: %s", f)
if not isinstance(f[0], basestring):
raise BadReaderParamsError("First element should be string: %s", f)
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f)
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May have 'namespace' in the params as a string containing a single
namespace. If specified then the input reader will only yield values
in the given namespace. If 'namespace' is not given then values from
all namespaces will be yielded. May also have 'batch_size' in the params
to specify the number of entities to process in each batch.
Returns:
A list of InputReader objects. If the query results are empty then the
empty list will be returned. Otherwise, the list will always have a length
equal to number_of_shards but may be padded with Nones if there are too
few results for effective sharding.
"""
params = _get_params(mapper_spec)
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace = params.get(cls.NAMESPACE_PARAM)
app = params.get(cls._APP_PARAM)
filters = params.get(cls.FILTERS_PARAM)
if namespace is None:
# It is difficult to efficiently shard large numbers of namespaces because
# there can be an arbitrary number of them. So the strategy is:
# 1. if there are a small number of namespaces in the datastore then
# generate one KeyRange per namespace per shard and assign each shard a
# KeyRange for every namespace. This should lead to nearly perfect
# sharding.
# 2. if there are a large number of namespaces in the datastore then
# generate one NamespaceRange per worker. This can lead to very bad
# sharding because namespaces can contain very different numbers of
# entities and each NamespaceRange may contain very different numbers
# of namespaces.
namespace_query = datastore.Query("__namespace__",
keys_only=True,
_app=app)
namespace_keys = namespace_query.Get(
limit=cls.MAX_NAMESPACES_FOR_KEY_SHARD + 1)
if len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=True,
_app=app)
return [cls(entity_kind_name,
key_ranges=None,
ns_range=ns_range,
batch_size=batch_size,
filters=filters)
for ns_range in ns_ranges]
elif not namespace_keys:
return [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(_app=app),
batch_size=shard_count,
filters=filters)]
else:
namespaces = [namespace_key.name() or ""
for namespace_key in namespace_keys]
else:
namespaces = [namespace]
readers = cls._split_input_from_params(
app, namespaces, entity_kind_name, params, shard_count)
if filters:
for reader in readers:
reader._filters = filters
return readers
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
if self._key_ranges is None:
key_ranges_json = None
else:
key_ranges_json = []
for k in self._key_ranges:
if k:
key_ranges_json.append(k.to_json())
else:
key_ranges_json.append(None)
if self._ns_range is None:
namespace_range_json = None
else:
namespace_range_json = self._ns_range.to_json_object()
if self._current_key_range is None:
current_key_range_json = None
else:
current_key_range_json = self._current_key_range.to_json()
json_dict = {self.KEY_RANGE_PARAM: key_ranges_json,
self.NAMESPACE_RANGE_PARAM: namespace_range_json,
self.CURRENT_KEY_RANGE_PARAM: current_key_range_json,
self.ENTITY_KIND_PARAM: self._entity_kind,
self.BATCH_SIZE_PARAM: self._batch_size,
self.FILTERS_PARAM: self._filters}
return json_dict
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
if json[cls.KEY_RANGE_PARAM] is None:
# pylint: disable=redefined-outer-name
key_ranges = None
else:
key_ranges = []
for k in json[cls.KEY_RANGE_PARAM]:
if k:
key_ranges.append(key_range.KeyRange.from_json(k))
else:
key_ranges.append(None)
if json[cls.NAMESPACE_RANGE_PARAM] is None:
ns_range = None
else:
ns_range = namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM])
if json[cls.CURRENT_KEY_RANGE_PARAM] is None:
current_key_range = None
else:
current_key_range = key_range.KeyRange.from_json(
json[cls.CURRENT_KEY_RANGE_PARAM])
return cls(
json[cls.ENTITY_KIND_PARAM],
key_ranges,
ns_range,
json[cls.BATCH_SIZE_PARAM],
current_key_range,
filters=json.get(cls.FILTERS_PARAM))
class BlobstoreLineInputReader(InputReader):
"""Input reader for a newline delimited blob in Blobstore."""
# TODO(user): Should we set this based on MAX_BLOB_FETCH_SIZE?
_BLOB_BUFFER_SIZE = 64000
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parmaeters.
INITIAL_POSITION_PARAM = "initial_position"
END_POSITION_PARAM = "end_position"
BLOB_KEY_PARAM = "blob_key"
def __init__(self, blob_key, start_position, end_position):
"""Initializes this instance with the given blob key and character range.
This BlobstoreInputReader will read from the first record starting after
strictly after start_position until the first record ending at or after
end_position (exclusive). As an exception, if start_position is 0, then
this InputReader starts reading at the first record.
Args:
blob_key: the BlobKey that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
"""
self._blob_key = blob_key
self._blob_reader = blobstore.BlobReader(blob_key,
self._BLOB_BUFFER_SIZE,
start_position)
self._end_position = end_position
self._has_iterated = False
self._read_before_start = bool(start_position)
def next(self):
"""Returns the next input from as an (offset, line) tuple."""
self._has_iterated = True
if self._read_before_start:
self._blob_reader.readline()
self._read_before_start = False
start_position = self._blob_reader.tell()
if start_position > self._end_position:
raise StopIteration()
line = self._blob_reader.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this BlobstoreLineInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._blob_reader.tell(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
class BlobstoreZipInputReader(InputReader):
"""Input reader for files from a zip archive stored in the Blobstore.
Each instance of the reader will read the TOC, from the end of the zip file,
and then only the contained files which it is responsible for.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Mapreduce parameters.
BLOB_KEY_PARAM = "blob_key"
START_INDEX_PARAM = "start_index"
END_INDEX_PARAM = "end_index"
def __init__(self, blob_key, start_index, end_index,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipInputReader will read from the file with index start_index
up to but not including the file with index end_index.
Args:
blob_key: the BlobKey that this input reader is processing.
start_index: the index of the first file to read.
end_index: the index of the first file that will not be read.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_index = start_index
self._end_index = end_index
self._reader = _reader
self._zip = None
self._entries = None
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry))
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
def __str__(self):
"""Returns the string representation of this BlobstoreZipInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._start_index, self._end_index)
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEY_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_key = params[cls.BLOB_KEY_PARAM]
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
"""
params = _get_params(mapper_spec)
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
zfiles = zip_input.infolist()
total_size = sum(x.file_size for x in zfiles)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
# Break the list of files into sublists, each of approximately
# size_per_shard bytes.
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(zfiles):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(zfiles):
shard_start_indexes.append(len(zfiles))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
class BlobstoreZipLineInputReader(InputReader):
"""Input reader for newline delimited files in zip archives from Blobstore.
This has the same external interface as the BlobstoreLineInputReader, in that
it takes a list of blobs as its input and yields lines to the reader.
However the blobs themselves are expected to be zip archives of line delimited
files instead of the files themselves.
This is useful as many line delimited files gain greatly from compression.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parameters.
BLOB_KEY_PARAM = "blob_key"
START_FILE_INDEX_PARAM = "start_file_index"
END_FILE_INDEX_PARAM = "end_file_index"
OFFSET_PARAM = "offset"
def __init__(self, blob_key, start_file_index, end_file_index, offset,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipLineInputReader will read from the file with index
start_file_index up to but not including the file with index end_file_index.
It will return lines starting at offset within file[start_file_index]
Args:
blob_key: the BlobKey that this input reader is processing.
start_file_index: the index of the first file to read within the zip.
end_file_index: the index of the first file that will not be read.
offset: the byte offset within blob_key.zip[start_file_index] to start
reading. The reader will continue to the end of the file.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_file_index = start_file_index
self._end_file_index = end_file_index
self._initial_offset = offset
self._reader = _reader
self._zip = None
self._entries = None
self._filestream = None
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
# We can break on both blob key and file-within-zip boundaries.
# A shard will span at minimum a single blob key, but may only
# handle a few files within a blob.
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
bfiles = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in bfiles:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers
def next(self):
"""Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n"))
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
@classmethod
def from_json(cls, json, _reader=blobstore.BlobReader):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM],
_reader)
def __str__(self):
"""Returns the string representation of this reader.
Returns:
string blobkey:[start file num, end file num]:current offset.
"""
return "blobstore.BlobKey(%r):[%d, %d]:%d" % (
self._blob_key, self._start_file_index, self._end_file_index,
self._next_offset())
class RandomStringInputReader(InputReader):
"""RandomStringInputReader generates random strings as output.
Primary usage is to populate output with testing entries.
"""
# Total number of entries this reader should generate.
COUNT = "count"
# Length of the generated strings.
STRING_LENGTH = "string_length"
DEFAULT_STRING_LENGTH = 10
def __init__(self, count, string_length):
"""Initialize input reader.
Args:
count: number of entries this shard should generate.
string_length: the length of generated random strings.
"""
self._count = count
self._string_length = string_length
def __iter__(self):
ctx = context.get()
while self._count:
self._count -= 1
start_time = time.time()
content = "".join(random.choice(string.ascii_lowercase)
for _ in range(self._string_length))
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
yield content
@classmethod
def split_input(cls, mapper_spec):
params = _get_params(mapper_spec)
count = params[cls.COUNT]
string_length = cls.DEFAULT_STRING_LENGTH
if cls.STRING_LENGTH in params:
string_length = params[cls.STRING_LENGTH]
shard_count = mapper_spec.shard_count
count_per_shard = count // shard_count
mr_input_readers = [
cls(count_per_shard, string_length) for _ in range(shard_count)]
left = count - count_per_shard * shard_count
if left > 0:
mr_input_readers.append(cls(left, string_length))
return mr_input_readers
@classmethod
def validate(cls, mapper_spec):
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.COUNT not in params:
raise BadReaderParamsError("Must specify %s" % cls.COUNT)
if not isinstance(params[cls.COUNT], int):
raise BadReaderParamsError("%s should be an int but is %s" %
(cls.COUNT, type(params[cls.COUNT])))
if params[cls.COUNT] <= 0:
raise BadReaderParamsError("%s should be a positive int")
if cls.STRING_LENGTH in params and not (
isinstance(params[cls.STRING_LENGTH], int) and
params[cls.STRING_LENGTH] > 0):
raise BadReaderParamsError("%s should be a positive int but is %s" %
(cls.STRING_LENGTH, params[cls.STRING_LENGTH]))
if (not isinstance(mapper_spec.shard_count, int) or
mapper_spec.shard_count <= 0):
raise BadReaderParamsError(
"shard_count should be a positive int but is %s" %
mapper_spec.shard_count)
@classmethod
def from_json(cls, json):
return cls(json[cls.COUNT], json[cls.STRING_LENGTH])
def to_json(self):
return {self.COUNT: self._count, self.STRING_LENGTH: self._string_length}
# TODO(user): This reader always produces only one shard, because
# namespace entities use the mix of ids/names, and KeyRange-based splitting
# doesn't work satisfactory in this case.
# It's possible to implement specific splitting functionality for the reader
# instead of reusing generic one. Meanwhile 1 shard is enough for our
# applications.
class NamespaceInputReader(InputReader):
"""An input reader to iterate over namespaces.
This reader yields namespace names as string.
It will always produce only one shard.
"""
NAMESPACE_RANGE_PARAM = "namespace_range"
BATCH_SIZE_PARAM = "batch_size"
_BATCH_SIZE = 10
def __init__(self, ns_range, batch_size=_BATCH_SIZE):
self.ns_range = ns_range
self._batch_size = batch_size
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
return {self.NAMESPACE_RANGE_PARAM: self.ns_range.to_json_object(),
self.BATCH_SIZE_PARAM: self._batch_size}
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
return cls(
namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM]),
json[cls.BATCH_SIZE_PARAM])
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
batch_size = int(_get_params(mapper_spec).get(
cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace_ranges = namespace_range.NamespaceRange.split(shard_count,
contiguous=True)
return [NamespaceInputReader(ns_range, batch_size)
for ns_range in namespace_ranges]
def __iter__(self):
while True:
keys = self.ns_range.make_datastore_query().Get(limit=self._batch_size)
if not keys:
break
for key in keys:
namespace = metadata.Namespace.key_to_namespace(key)
self.ns_range = self.ns_range.with_start_after(namespace)
yield namespace
def __str__(self):
return repr(self.ns_range)
class LogInputReader(InputReader):
"""Input reader for a time range of logs via the Logs Reader API.
The number of input shards may be specified by the SHARDS_PARAM mapper
parameter. A starting and ending time (in seconds since the Unix epoch) are
required to generate time ranges over which to shard the input.
"""
# Parameters directly mapping to those available via logservice.fetch().
START_TIME_PARAM = "start_time"
END_TIME_PARAM = "end_time"
MINIMUM_LOG_LEVEL_PARAM = "minimum_log_level"
INCLUDE_INCOMPLETE_PARAM = "include_incomplete"
INCLUDE_APP_LOGS_PARAM = "include_app_logs"
VERSION_IDS_PARAM = "version_ids"
MODULE_VERSIONS_PARAM = "module_versions"
# Semi-hidden parameters used only internally or for privileged applications.
_OFFSET_PARAM = "offset"
_PROTOTYPE_REQUEST_PARAM = "prototype_request"
_PARAMS = frozenset([START_TIME_PARAM, END_TIME_PARAM, _OFFSET_PARAM,
MINIMUM_LOG_LEVEL_PARAM, INCLUDE_INCOMPLETE_PARAM,
INCLUDE_APP_LOGS_PARAM, VERSION_IDS_PARAM,
MODULE_VERSIONS_PARAM, _PROTOTYPE_REQUEST_PARAM])
_KWARGS = frozenset([_OFFSET_PARAM, _PROTOTYPE_REQUEST_PARAM])
def __init__(self,
start_time=None,
end_time=None,
minimum_log_level=None,
include_incomplete=False,
include_app_logs=False,
version_ids=None,
module_versions=None,
**kwargs):
"""Constructor.
Args:
start_time: The earliest request completion or last-update time of logs
that should be mapped over, in seconds since the Unix epoch.
end_time: The latest request completion or last-update time that logs
should be mapped over, in seconds since the Unix epoch.
minimum_log_level: An application log level which serves as a filter on
the requests mapped over--requests with no application log at or above
the specified level will be omitted, even if include_app_logs is False.
include_incomplete: Whether or not to include requests that have started
but not yet finished, as a boolean. Defaults to False.
include_app_logs: Whether or not to include application level logs in the
mapped logs, as a boolean. Defaults to False.
version_ids: A list of version ids whose logs should be read. This can not
be used with module_versions
module_versions: A list of tuples containing a module and version id
whose logs should be read. This can not be used with version_ids
**kwargs: A dictionary of keywords associated with this input reader.
"""
InputReader.__init__(self) # pylint: disable=non-parent-init-called
# The rule for __params is that its contents will always be suitable as
# input to logservice.fetch().
self.__params = dict(kwargs)
if start_time is not None:
self.__params[self.START_TIME_PARAM] = start_time
if end_time is not None:
self.__params[self.END_TIME_PARAM] = end_time
if minimum_log_level is not None:
self.__params[self.MINIMUM_LOG_LEVEL_PARAM] = minimum_log_level
if include_incomplete is not None:
self.__params[self.INCLUDE_INCOMPLETE_PARAM] = include_incomplete
if include_app_logs is not None:
self.__params[self.INCLUDE_APP_LOGS_PARAM] = include_app_logs
if version_ids:
self.__params[self.VERSION_IDS_PARAM] = version_ids
if module_versions:
self.__params[self.MODULE_VERSIONS_PARAM] = module_versions
# Any submitted prototype_request will be in encoded form.
if self._PROTOTYPE_REQUEST_PARAM in self.__params:
prototype_request = log_service_pb.LogReadRequest(
self.__params[self._PROTOTYPE_REQUEST_PARAM])
self.__params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request
def __iter__(self):
"""Iterates over logs in a given range of time.
Yields:
A RequestLog containing all the information for a single request.
"""
for log in logservice.fetch(**self.__params):
self.__params[self._OFFSET_PARAM] = log.offset
yield log
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
"""
# Strip out unrecognized parameters, as introduced by b/5960884.
params = dict((str(k), v) for k, v in json.iteritems()
if k in cls._PARAMS)
# This is not symmetric with to_json() wrt. PROTOTYPE_REQUEST_PARAM because
# the constructor parameters need to be JSON-encodable, so the decoding
# needs to happen there anyways.
if cls._OFFSET_PARAM in params:
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A JSON serializable version of the remaining input to read.
"""
params = dict(self.__params) # Shallow copy.
if self._PROTOTYPE_REQUEST_PARAM in params:
prototype_request = params[self._PROTOTYPE_REQUEST_PARAM]
params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request.Encode()
if self._OFFSET_PARAM in params:
params[self._OFFSET_PARAM] = base64.b64encode(params[self._OFFSET_PARAM])
return params
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the given input specification.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
# Pick out the overall start and end times and time step per shard.
start_time = params[cls.START_TIME_PARAM]
end_time = params[cls.END_TIME_PARAM]
seconds_per_shard = (end_time - start_time) / shard_count
# Create a LogInputReader for each shard, modulating the params as we go.
shards = []
for _ in xrange(shard_count - 1):
params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] +
seconds_per_shard)
shards.append(LogInputReader(**params))
params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM]
# Create a final shard to complete the time range.
params[cls.END_TIME_PARAM] = end_time
return shards + [LogInputReader(**params)]
@classmethod
def validate(cls, mapper_spec):
"""Validates the mapper's specification and all necessary parameters.
Args:
mapper_spec: The MapperSpec to be used with this InputReader.
Raises:
BadReaderParamsError: If the user fails to specify both a starting time
and an ending time, or if the starting time is later than the ending
time.
"""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)
if (cls.VERSION_IDS_PARAM not in params and
cls.MODULE_VERSIONS_PARAM not in params):
raise errors.BadReaderParamsError("Must specify a list of version ids or "
"module/version ids for mapper input")
if (cls.VERSION_IDS_PARAM in params and
cls.MODULE_VERSIONS_PARAM in params):
raise errors.BadReaderParamsError("Can not supply both version ids or "
"module/version ids. Use only one.")
if (cls.START_TIME_PARAM not in params or
params[cls.START_TIME_PARAM] is None):
raise errors.BadReaderParamsError("Must specify a starting time for "
"mapper input")
if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:
params[cls.END_TIME_PARAM] = time.time()
if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:
raise errors.BadReaderParamsError("The starting time cannot be later "
"than or the same as the ending time.")
if cls._PROTOTYPE_REQUEST_PARAM in params:
try:
params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(
params[cls._PROTOTYPE_REQUEST_PARAM])
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise errors.BadReaderParamsError("The prototype request must be "
"parseable as a LogReadRequest.")
# Pass the parameters to logservice.fetch() to verify any underlying
# constraints on types or values. This only constructs an iterator, it
# doesn't trigger any requests for actual log records.
try:
logservice.fetch(**params)
except logservice.InvalidArgumentError, e:
raise errors.BadReaderParamsError("One or more parameters are not valid "
"inputs to logservice.fetch(): %s" % e)
def __str__(self):
"""Returns the string representation of this LogInputReader."""
params = []
for key in sorted(self.__params.keys()):
value = self.__params[key]
if key is self._PROTOTYPE_REQUEST_PARAM:
params.append("%s='%s'" % (key, value))
elif key is self._OFFSET_PARAM:
params.append("%s='%s'" % (key, value))
else:
params.append("%s=%s" % (key, value))
return "LogInputReader(%s)" % ", ".join(params)
# pylint: disable=too-many-instance-attributes
class _GoogleCloudStorageInputReader(InputReader):
"""Input reader from Google Cloud Storage using the cloudstorage library.
This class is expected to be subclassed with a reader that understands
user-level records.
Required configuration in the mapper_spec.input_reader dictionary.
BUCKET_NAME_PARAM: name of the bucket to use (with no extra delimiters or
suffixed such as directories.
OBJECT_NAMES_PARAM: a list of object names or prefixes. All objects must be
in the BUCKET_NAME_PARAM bucket. If the name ends with a * it will be
treated as prefix and all objects with matching names will be read.
Entries should not start with a slash unless that is part of the object's
name. An example list could be:
["my-1st-input-file", "directory/my-2nd-file", "some/other/dir/input-*"]
To retrieve all files "*" will match every object in the bucket. If a file
is listed twice or is covered by multiple prefixes it will be read twice,
there is no deduplication.
Optional configuration in the mapper_sec.input_reader dictionary.
BUFFER_SIZE_PARAM: the size of the read buffer for each file handle.
DELIMITER_PARAM: if specified, turn on the shallow splitting mode.
The delimiter is used as a path separator to designate directory
hierarchy. Matching of prefixes from OBJECT_NAME_PARAM
will stop at the first directory instead of matching
all files under the directory. This allows MR to process bucket with
hundreds of thousands of files.
FAIL_ON_MISSING_INPUT: if specified and True, the MR will fail if any of
the input files are missing. Missing files will be skipped otherwise.
"""
# Supported parameters
BUCKET_NAME_PARAM = "bucket_name"
OBJECT_NAMES_PARAM = "objects"
BUFFER_SIZE_PARAM = "buffer_size"
DELIMITER_PARAM = "delimiter"
FAIL_ON_MISSING_INPUT = "fail_on_missing_input"
# Internal parameters
_ACCOUNT_ID_PARAM = "account_id"
# Other internal configuration constants
_JSON_PICKLE = "pickle"
_JSON_FAIL_ON_MISSING_INPUT = "fail_on_missing_input"
_STRING_MAX_FILES_LISTED = 10 # Max files shown in the str representation
# Input reader can also take in start and end filenames and do
# listbucket. This saves space but has two cons.
# 1. Files to read are less well defined: files can be added or removed over
# the lifetime of the MR job.
# 2. A shard has to process files from a contiguous namespace.
# May introduce staggering shard.
def __init__(self, filenames, index=0, buffer_size=None, _account_id=None,
delimiter=None):
"""Initialize a GoogleCloudStorageInputReader instance.
Args:
filenames: A list of Google Cloud Storage filenames of the form
'/bucket/objectname'.
index: Index of the next filename to read.
buffer_size: The size of the read buffer, None to use default.
_account_id: Internal use only. See cloudstorage documentation.
delimiter: Delimiter used as path separator. See class doc for details.
"""
self._filenames = filenames
self._index = index
self._buffer_size = buffer_size
self._account_id = _account_id
self._delimiter = delimiter
self._bucket = None
self._bucket_iter = None
# True iff we should fail on missing input (see class doc above). Set to
# None in constructor and overwritten in split_input and from_json.
# fail_on_missing_input is not parameter of the constructor to avoid
# breaking classes inheriting from _GoogleCloudStorageInputReader and
# overriding the constructor.
self._fail_on_missing_input = None
def _next_file(self):
"""Find next filename.
self._filenames may need to be expanded via listbucket.
Returns:
None if no more file is left. Filename otherwise.
"""
while True:
if self._bucket_iter:
try:
return self._bucket_iter.next().filename
except StopIteration:
self._bucket_iter = None
self._bucket = None
if self._index >= len(self._filenames):
return
filename = self._filenames[self._index]
self._index += 1
if self._delimiter is None or not filename.endswith(self._delimiter):
return filename
self._bucket = cloudstorage.listbucket(filename,
delimiter=self._delimiter)
self._bucket_iter = iter(self._bucket)
@classmethod
def get_params(cls, mapper_spec, allowed_keys=None, allow_old=True):
"""Extracts the parameters from the mapper_spec.
Extends the existing get_params
Returns:
Returns a dictionary with all the mapper parameters
"""
params = _get_params(mapper_spec, allowed_keys, allow_old)
# Use the bucket_name defined in mapper_spec params if one was not defined
# specifically in the input_reader params.
if (mapper_spec.params.get(cls.BUCKET_NAME_PARAM) is not None and
params.get(cls.BUCKET_NAME_PARAM) is None):
params[cls.BUCKET_NAME_PARAM] = mapper_spec.params[cls.BUCKET_NAME_PARAM]
return params
@classmethod
def validate(cls, mapper_spec):
"""Validate mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
"""
reader_spec = cls.get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError("Bad bucket name, %s" % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.OBJECT_NAMES_PARAM)
filenames = reader_spec[cls.OBJECT_NAMES_PARAM]
if not isinstance(filenames, list):
raise errors.BadReaderParamsError(
"Object name list is not a list but a %s" %
filenames.__class__.__name__)
for filename in filenames:
if not isinstance(filename, basestring):
raise errors.BadReaderParamsError(
"Object name is not a string but a %s" %
filename.__class__.__name__)
if cls.DELIMITER_PARAM in reader_spec:
delimiter = reader_spec[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
"%s is not a string but a %s" %
(cls.DELIMITER_PARAM, type(delimiter)))
#pylint: disable=too-many-locals
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
mapper_spec: an instance of model.MapperSpec.
Returns:
A list of InputReaders. None when no input data can be found.
"""
reader_spec = cls.get_params(mapper_spec, allow_old=False)
bucket = reader_spec[cls.BUCKET_NAME_PARAM]
filenames = reader_spec[cls.OBJECT_NAMES_PARAM]
delimiter = reader_spec.get(cls.DELIMITER_PARAM)
account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)
fail_on_missing_input = reader_spec.get(cls.FAIL_ON_MISSING_INPUT)
# Gather the complete list of files (expanding wildcards)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
# Split into shards
readers = []
for shard in range(0, mapper_spec.shard_count):
shard_filenames = all_filenames[shard::mapper_spec.shard_count]
if shard_filenames:
reader = cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter)
reader._fail_on_missing_input = fail_on_missing_input
readers.append(reader)
return readers
@classmethod
def from_json(cls, state):
obj = pickle.loads(state[cls._JSON_PICKLE])
# fail_on_missing_input might not be set - default to False.
obj._fail_on_missing_input = state.get(
cls._JSON_FAIL_ON_MISSING_INPUT, False)
if obj._bucket:
obj._bucket_iter = iter(obj._bucket)
return obj
def to_json(self):
before_iter = self._bucket_iter
self._bucket_iter = None
try:
return {
self._JSON_PICKLE: pickle.dumps(self),
# self._fail_on_missing_input gets pickled but we save it separately
# and override it in from_json to deal with version flipping.
self._JSON_FAIL_ON_MISSING_INPUT:
getattr(self, "_fail_on_missing_input", False)
}
return {self._JSON_PICKLE: pickle.dumps(self)}
finally:
self._bucket_itr = before_iter
def next(self):
"""Returns the next input from this input reader, a block of bytes.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
ctx = context.get()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return handle
except cloudstorage.NotFoundError:
# Fail the job if we're strict on missing input.
if getattr(self, "_fail_on_missing_input", False):
raise errors.FailJobError(
"File missing in GCS, aborting: %s" % filename)
# Move on otherwise.
logging.warning("File %s may have been removed. Skipping file.",
filename)
def __str__(self):
# Only show a limited number of files individually for readability
num_files = len(self._filenames)
if num_files > self._STRING_MAX_FILES_LISTED:
names = "%s...%s + %d not shown" % (
",".join(self._filenames[0:self._STRING_MAX_FILES_LISTED - 1]),
self._filenames[-1],
num_files - self._STRING_MAX_FILES_LISTED)
else:
names = ",".join(self._filenames)
if self._index > num_files:
status = "EOF"
else:
status = "Next %s (%d of %d)" % (
self._filenames[self._index],
self._index + 1, # +1 for human 1-indexing
num_files)
return "CloudStorage [%s, %s]" % (status, names)
GoogleCloudStorageInputReader = _GoogleCloudStorageInputReader
class _GoogleCloudStorageRecordInputReader(_GoogleCloudStorageInputReader):
"""Read data from a Google Cloud Storage file using LevelDB format.
See the _GoogleCloudStorageOutputWriter for additional configuration options.
"""
def __getstate__(self):
result = self.__dict__.copy()
# record reader may not exist if reader has not been used
if "_record_reader" in result:
# RecordsReader has no buffering, it can safely be reconstructed after
# deserialization
result.pop("_record_reader")
return result
def next(self):
"""Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
"""
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(_GoogleCloudStorageRecordInputReader,
self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None
GoogleCloudStorageRecordInputReader = _GoogleCloudStorageRecordInputReader
class _ReducerReader(_GoogleCloudStorageRecordInputReader):
"""Reader to read KeyValues records from GCS."""
expand_parameters = True
def __init__(self, filenames, index=0, buffer_size=None, _account_id=None,
delimiter=None):
super(_ReducerReader, self).__init__(filenames, index, buffer_size,
_account_id, delimiter)
self.current_key = None
self.current_values = None
def __iter__(self):
ctx = context.get()
combiner = None
if ctx:
combiner_spec = ctx.mapreduce_spec.mapper.params.get("combiner_spec")
if combiner_spec:
combiner = util.handler_for_name(combiner_spec)
try:
while True:
binary_record = super(_ReducerReader, self).next()
proto = kv_pb.KeyValues()
proto.ParseFromString(binary_record)
to_yield = None
if self.current_key is not None and self.current_key != proto.key():
to_yield = (self.current_key, self.current_values)
self.current_key = None
self.current_values = None
if self.current_key is None:
self.current_key = proto.key()
self.current_values = []
if combiner:
combiner_result = combiner(
self.current_key, proto.value_list(), self.current_values)
if not util.is_generator(combiner_result):
raise errors.BadCombinerOutputError(
"Combiner %s should yield values instead of returning them "
"(%s)" % (combiner, combiner_result))
self.current_values = []
for value in combiner_result:
if isinstance(value, operation.Operation):
value(ctx)
else:
# With combiner the current values always come from the combiner.
self.current_values.append(value)
# Check-point after each combiner call is run only when there's
# nothing that needs to be yielded below. Otherwise allowing a
# check-point here would cause the current to_yield data to be lost.
if not to_yield:
yield ALLOW_CHECKPOINT
else:
# Without combiner we just accumulate values.
self.current_values.extend(proto.value_list())
if to_yield:
yield to_yield
# Check-point after each key is yielded.
yield ALLOW_CHECKPOINT
except StopIteration:
pass
# There may be some accumulated values left at the end of an input file
# so be sure to yield those too.
if self.current_key is not None:
to_yield = (self.current_key, self.current_values)
self.current_key = None
self.current_values = None
yield to_yield
@staticmethod
def encode_data(data):
"""Encodes the given data, which may have include raw bytes.
Works around limitations in JSON encoding, which cannot handle raw bytes.
Args:
data: the data to encode.
Returns:
The data encoded.
"""
return base64.b64encode(pickle.dumps(data))
@staticmethod
def decode_data(data):
"""Decodes data encoded with the encode_data function."""
return pickle.loads(base64.b64decode(data))
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
result = super(_ReducerReader, self).to_json()
result["current_key"] = self.encode_data(self.current_key)
result["current_values"] = self.encode_data(self.current_values)
return result
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json["current_key"])
result.current_values = _ReducerReader.decode_data(json["current_values"])
return result
# pylint: disable=too-many-instance-attributes
class GoogleCloudStorageLineInputReader(InputReader):
"""Input reader for a newline delimited file in Google Cloud Storage.
Required configuration in the mapper_spec.input_reader dictionary.
bucket_name : name of the bucket to use (with no extra delimiters or
suffixed such as directories.
objects : a list of object names or prefixes. All objects must be
in the bucket_name. If the name ends with a * it will be
treated as prefix and all objects with matching names will be read.
Entries should not start with a slash unless that is part of the object's
name. An example list could be:
['my-1st-input-file', 'directory/my-2nd-file', 'some/other/dir/input-*']
To retrieve all files '*' will match every object in the bucket. If a file
is listed twice or is covered by multiple prefixes it will be read twice,
there is no deduplication.
Optional configuration in the mapper_sec.input_reader dictionary.
buffer_size : the size of the read buffer for each file handle.
delimiter : if specified, turn on the shallow splitting mode.
The delimiter is used as a path separator to designate directory
hierarchy. Matching of prefixes from objects
will stop at the first directory instead of matching
all files under the directory. This allows MR to process bucket with
hundreds of thousands of files.
Outputs:
A tuple containing an other tuple and the Line
((File name, start position), line)
File name : Name of the file the data came from
start position : Files index position for the start of the data
line : The data read till a '\n' was reached
"""
# Supported parameters
BUCKET_NAME_PARAM = 'bucket_name'
OBJECT_NAMES_PARAM = 'objects'
BUFFER_SIZE_PARAM = 'buffer_size'
DELIMITER_PARAM = 'delimiter'
# Internal parameters
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
_ACCOUNT_ID_PARAM = 'account_id'
# Serialization parameters.
INITIAL_POSITION_PARAM = 'initial_position'
END_POSITION_PARAM = 'end_position'
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
# Reads the parameters sent to the mapper
reader_spec = _get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
'%s is required for Google Cloud Storage' %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError('Bad bucket name, %s' % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
'%s is required for Google Cloud Storage' %
cls.OBJECT_NAMES_PARAM)
file_names = reader_spec[cls.OBJECT_NAMES_PARAM]
if not isinstance(file_names, list):
raise errors.BadReaderParamsError(
'Object name list is not a list but a %s' %
file_names.__class__.__name__)
for file_name in file_names:
if not isinstance(file_name, basestring):
raise errors.BadReaderParamsError(
'Object name is not a string but a %s' %
file_name.__class__.__name__)
if cls.DELIMITER_PARAM in reader_spec:
delimiter = reader_spec[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
'%s is not a string but a %s' %
(cls.DELIMITER_PARAM, type(delimiter)))
# pylint: disable=too-many-locals
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'objects' parameter with one or more file_names.
Returns:
A list of GCSInputReaders corresponding to the specified shards.
"""
reader_spec = _get_params(mapper_spec, allow_old=False)
bucket = reader_spec[cls.BUCKET_NAME_PARAM]
file_names = reader_spec[cls.OBJECT_NAMES_PARAM]
delimiter = reader_spec.get(cls.DELIMITER_PARAM)
account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)
# Gather the complete list of files (expanding wildcards)
all_file_names = []
for file_name in file_names:
if file_name.endswith('*'):
all_file_names.extend(
[file_stat for file_stat in cloudstorage.listbucket(
'/' + bucket + '/' + file_name[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
try:
all_file_names.append(cloudstorage
.stat(('/%s/%s') % (bucket, file_name)))
except cloudstorage.NotFoundError:
logging.warning('File /%s/%s may have been removed. Skipping file.',
bucket, file_name)
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
try:
shards_per_file = shard_count // len(all_file_names)
except ZeroDivisionError:
shards_per_file = 1
if shards_per_file == 0:
shards_per_file = 1
chunks = []
for file_stats in all_file_names:
file_name = file_stats.filename
file_size = file_stats.st_size
file_chunk_size = file_size // shards_per_file
for i in xrange(shards_per_file - 1):
chunks.append(GoogleCloudStorageLineInputReader.from_json(
{cls.OBJECT_NAMES_PARAM: file_name,
cls.INITIAL_POSITION_PARAM: file_chunk_size * i,
cls.END_POSITION_PARAM: file_chunk_size * (i + 1),
cls.BUFFER_SIZE_PARAM : buffer_size,
cls.DELIMITER_PARAM: delimiter,
cls._ACCOUNT_ID_PARAM : account_id
}))
chunks.append(GoogleCloudStorageLineInputReader.from_json(
{cls.OBJECT_NAMES_PARAM: file_name,
cls.INITIAL_POSITION_PARAM: file_chunk_size * (shards_per_file - 1),
cls.END_POSITION_PARAM: file_size,
cls.BUFFER_SIZE_PARAM : buffer_size,
cls.DELIMITER_PARAM: delimiter,
cls._ACCOUNT_ID_PARAM : account_id}))
return chunks
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._file_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.OBJECT_NAMES_PARAM: self._file_name,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this LineInputReader."""
return 'File Name(%r):[%d, %d]' % (
self._file_name, self._file_reader.tell(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.OBJECT_NAMES_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
# pylint: disable=too-many-arguments
def __init__(self, file_name, start_position, end_position,
buffer_size=None, delimiter=None, account_id=None):
"""Initializes this instance with the given file name and character range.
This GoogleCloudStorageLineInputReader will read from the first record
starting strictly after start_position until the first record ending at or
after end_position (exclusive). As an exception, if start_position is 0,
then this InputReader starts reading at the first record.
Args:
file_name: the file name that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
buffer_size: Used by the GCS reader to read data.
delimiter: The delimiter is used as a path separator to designate
directory hierarchy.
account_id: internal use
"""
self._file_name = file_name
self._buffer_size = buffer_size
self._account_id = account_id
self._delimiter = delimiter
self._start_position = start_position
options = {}
if self._buffer_size:
options['read_buffer_size'] = self._buffer_size
if self._account_id:
options['_account_id'] = self._account_id
try:
# pylint: disable=star-args
self._file_reader = cloudstorage.open(file_name, **options)
self._file_reader.seek(start_position)
except cloudstorage.NotFoundError:
logging.warning('File %s may have been removed. Skipping file.',
file_name)
raise StopIteration()
self._end_position = end_position
self._has_iterated = False
self._read_before_start = bool(start_position)
def next(self):
"""Returns the next input from as an (( file_name, offset), line) tuple."""
self._has_iterated = True
if self._read_before_start:
self._file_reader.readline()
self._read_before_start = False
start_position = self._file_reader.tell()
if start_position > self._end_position:
raise StopIteration()
line = self._file_reader.readline()
if not line:
raise StopIteration()
return (self._file_name, start_position), line.rstrip('\n')
class GoogleCloudStorageZipInputReader(InputReader):
"""Input reader for files from a zip archive stored in the GCS.
Each instance of the reader will read the TOC, from the end of the zip file,
and then only the contained files which it is responsible for.
Required configuration in the mapper_spec.input_reader dictionary.
bucket_name : name of the bucket to use (with no extra delimiters or
suffixed such as directories.
objects : a list of object names or prefixes. All objects must be
in the bucket_name. They all must be zip files. If the name ends with
a * it will be
treated as prefix and all objects with matching names will be read.
Entries should not start with a slash unless that is part of the object's
name. An example list could be:
['my-1st-input-file', 'directory/my-2nd-file', 'some/other/dir/input-*']
To retrieve all files '*' will match every object in the bucket. If a file
is listed twice or is covered by multiple prefixes it will be read twice,
there is no deduplication.
Optional configuration in the mapper_sec.input_reader dictionary.
buffer_size : the size of the read buffer for each file handle.
delimiter : if specified, turn on the shallow splitting mode.
The delimiter is used as a path separator to designate directory
hierarchy. Matching of prefixes from objects
will stop at the first directory instead of matching
all files under the directory. This allows MR to process bucket with
hundreds of thousands of files.
Outputs:
A tuple containing an other tuple and the file contents
((Zip file name, text file), file data)
Zip file name : Name of the zip file being processed
text file : Current file being outputed
data : contents of the file
"""
# Mapreduce parameters.
OBJECT_NAMES_PARAM = 'objects'
START_INDEX_PARAM = 'start_index'
END_INDEX_PARAM = 'end_index'
BUFFER_SIZE_PARAM = 'buffer_size'
DELIMITER_PARAM = 'delimiter'
BUCKET_NAME_PARAM = 'bucket_name'
_ACCOUNT_ID_PARAM = 'account_id'
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
reader_spec = _get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
'%s is required for Google Cloud Storage' %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError('Bad bucket name, %s' % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
'%s is required for Google Cloud Storage' %
cls.OBJECT_NAMES_PARAM)
file_names = reader_spec[cls.OBJECT_NAMES_PARAM]
if not isinstance(file_names, list):
raise errors.BadReaderParamsError(
'Object name list is not a list but a %s' %
file_names.__class__.__name__)
for file_name in file_names:
if not isinstance(file_name, basestring):
raise errors.BadReaderParamsError(
'Object name is not a string but a %s' %
file_name.__class__.__name__)
if cls.DELIMITER_PARAM in reader_spec:
delimiter = reader_spec[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
'%s is not a string but a %s' %
(cls.DELIMITER_PARAM, type(delimiter)))
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.OBJECT_NAMES_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.OBJECT_NAMES_PARAM: self._file_name,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
def __str__(self):
"""Returns the string representation of this GCSZipInputReader."""
return 'File Name(%r):[%d, %d]' % (
self._file_name, self._start_index, self._end_index)
# pylint: disable=too-many-locals
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'objects' parameter with one or more file_names.
Returns:
A list of GCSInputReaders corresponding to the specified shards.
"""
reader_spec = _get_params(mapper_spec, allow_old=False)
bucket = reader_spec[cls.BUCKET_NAME_PARAM]
file_names = reader_spec[cls.OBJECT_NAMES_PARAM]
delimiter = reader_spec.get(cls.DELIMITER_PARAM)
account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)
all_file_names = []
for file_name in file_names:
if file_name.endswith('*'):
all_file_names.extend(
[file_stat for file_stat in cloudstorage.listbucket(
'/' + bucket + '/' + file_name[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
try:
all_file_names.append(cloudstorage
.stat(('/%s/%s') % (bucket, file_name)))
except cloudstorage.NotFoundError:
logging.warning('File /%s/%s may have been removed. Skipping file.',
bucket, file_name)
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
try:
shards_per_file = shard_count // len(all_file_names)
except ZeroDivisionError:
shards_per_file = 1
if shards_per_file == 0:
shards_per_file = 1
sub_files = {}
total_size = 0
for file_name in all_file_names:
logging.info(file_name.filename)
zip_input = zipfile.ZipFile(cloudstorage.open(file_name.filename))
sub_files[file_name] = zip_input.infolist()
total_size += sum(x.file_size for x in sub_files[file_name])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
size_per_shard = total_size // shard_count
readers = []
for file_name in all_file_names:
bfiles = sub_files[file_name]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in bfiles:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(file_name.filename, start_file_index,
next_file_index, buffer_size=buffer_size,
delimiter=delimiter, account_id=account_id))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(file_name.filename, start_file_index,
next_file_index, buffer_size=buffer_size,
delimiter=delimiter, account_id=account_id))
return readers
# pylint: disable=too-many-arguments
def __init__(self, file_name, start_index, end_index,
buffer_size=None, delimiter=None, account_id=None):
"""Initializes this instance with the given file and file range.
This GCSZipInputReader will read from the file with index start_index
up to but not including the file with index end_index.
Args:
file_name: the file name that this input reader is processing.
start_index: the index of the first file to read.
end_index: the index of the first file that will not be read.
buffer_size: Used by the GCS reader to read data.
delimiter: The delimiter is used as a path separator to designate
directory hierarchy.
account_id: internal use
"""
self._file_name = file_name
self._start_index = start_index
self._end_index = end_index
self._buffer_size = buffer_size
self._account_id = account_id
self._delimiter = delimiter
options = {}
if self._buffer_size:
options['read_buffer_size'] = self._buffer_size
if self._account_id:
options['_account_id'] = self._account_id
try:
# pylint: disable=star-args
self._reader = cloudstorage.open(file_name, **options)
except cloudstorage.NotFoundError:
logging.warning('File /%s may have been removed. Skipping file.',
file_name)
self._zip = None
self._entries = None
def next(self):
"""Returns the next input from this input reader as
((ZipInfo, Current file name), full file contents) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is another tuple (Zip file name,
text file name).
The second element of the tuple complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader)
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (((self._file_name, self._zip.infolist()[self._start_index - 1]
.filename)), self._read(entry))
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
class GoogleCloudStorageZipLineInputReader(InputReader):
"""Input reader for newline delimited files in zip archives from GCS.
This has the same external interface as the GoogleCloudStorageZipInputReader,
in that it takes a list of files as its input and yields lines to the reader.
However the files themselves are expected to be zip archives of line delimited
files instead of the files themselves.
This is useful as many line delimited files gain greatly from compression.
Required configuration in the mapper_spec.input_reader dictionary.
bucket_name : name of the bucket to use (with no extra delimiters or
suffixed such as directories.
objects : a list of object names or prefixes. All objects must be
in the bucket_name. They all must be zip files. If the name ends with
a * it will be treated as prefix and all objects with matching names will
be read. Entries should not start with a slash unless that is part of the
object's name. An example list could be:
['my-1st-input-file', 'directory/my-2nd-file', 'some/other/dir/input-*']
To retrieve all files '*' will match every object in the bucket. If a file
is listed twice or is covered by multiple prefixes it will be read twice,
there is no deduplication.
Optional configuration in the mapper_sec.input_reader dictionary.
buffer_size : the size of the read buffer for each file handle.
delimiter : if specified, turn on the shallow splitting mode.
The delimiter is used as a path separator to designate directory
hierarchy. Matching of prefixes from objects
will stop at the first directory instead of matching
all files under the directory. This allows MR to process bucket with
hundreds of thousands of files.
Outputs:
A tuple containing an other tuple and the file contents
((Zip file name, text file, start position), line)
Zip file name : Name of the zip file being processed
text file : Current file being outputed
start position : Files index position for the start of the data
line : The data read till a '\n' was reached
"""
# Mapreduce parameters.
OBJECT_NAMES_PARAM = 'objects'
BUFFER_SIZE_PARAM = 'buffer_size'
DELIMITER_PARAM = 'delimiter'
BUCKET_NAME_PARAM = 'bucket_name'
_ACCOUNT_ID_PARAM = 'account_id'
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Serialization parameters.
START_FILE_INDEX_PARAM = 'start_file_index'
END_FILE_INDEX_PARAM = 'end_file_index'
OFFSET_PARAM = 'offset'
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
reader_spec = _get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
'%s is required for Google Cloud Storage' %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError('Bad bucket name, %s' % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
'%s is required for Google Cloud Storage' %
cls.OBJECT_NAMES_PARAM)
file_names = reader_spec[cls.OBJECT_NAMES_PARAM]
if not isinstance(file_names, list):
raise errors.BadReaderParamsError(
'Object name list is not a list but a %s' %
file_names.__class__.__name__)
for file_name in file_names:
if not isinstance(file_name, basestring):
raise errors.BadReaderParamsError(
'Object name is not a string but a %s' %
file_name.__class__.__name__)
if cls.DELIMITER_PARAM in reader_spec:
delimiter = reader_spec[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
'%s is not a string but a %s' %
(cls.DELIMITER_PARAM, type(delimiter)))
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.OBJECT_NAMES_PARAM: self._file_name,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.OBJECT_NAMES_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM])
def __str__(self):
"""Returns the string representation of this reader.
Returns:
string file_name:[start file num, end file num]:current offset.
"""
return 'file_name(%r):[%d, %d]:%d' % (
self._file_name, self._start_file_index, self._end_file_index,
self._next_offset())
# pylint: disable=too-many-locals
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'objects' parameter with one or more file_names.
Returns:
A list of GCSInputReaders corresponding to the specified shards.
"""
reader_spec = _get_params(mapper_spec, allow_old=False)
bucket = reader_spec[cls.BUCKET_NAME_PARAM]
file_names = reader_spec[cls.OBJECT_NAMES_PARAM]
delimiter = reader_spec.get(cls.DELIMITER_PARAM)
account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)
all_file_names = []
for file_name in file_names:
if file_name.endswith('*'):
all_file_names.extend(
[file_stat for file_stat in cloudstorage.listbucket(
'/' + bucket + '/' + file_name[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
try:
all_file_names.append(cloudstorage.stat(('/%s/%s') %
(bucket, file_name)))
except cloudstorage.NotFoundError:
logging.warning('File /%s/%s may have been removed. Skipping file.',
bucket, file_name)
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
try:
shards_per_file = shard_count // len(all_file_names)
except ZeroDivisionError:
shards_per_file = 1
if shards_per_file == 0:
shards_per_file = 1
sub_files = {}
total_size = 0
for file_name in all_file_names:
zip_input = zipfile.ZipFile(cloudstorage.open(file_name.filename))
sub_files[file_name] = zip_input.infolist()
total_size += sum(x.file_size for x in sub_files[file_name])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
size_per_shard = total_size // shard_count
readers = []
for file_name in all_file_names:
bfiles = sub_files[file_name]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in bfiles:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(file_name.filename, start_file_index,
next_file_index, buffer_size=buffer_size,
delimiter=delimiter, account_id=account_id))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(file_name.filename, start_file_index,
next_file_index, buffer_size=buffer_size,
delimiter=delimiter, account_id=account_id))
return readers
def next(self):
"""Returns the next line from this input reader as
((ZipInfo, file_name, Start Position), line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (Zip file name, Text file name, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader)
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries and not self._entry:
raise StopIteration()
if not self._entry:
self._entry = self._entries.pop()
file_name = self._entry.filename
value = self._zip.read(file_name)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
start_position = self._filestream.tell()
line = self._filestream.readline()
self._initial_offset = self._filestream.tell()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
self._entry = None
return self.next()
return((self._file_name, file_name, start_position), line.rstrip('\n'))
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
# pylint: disable=too-many-arguments
def __init__(self, file_name, start_file_index, end_file_index, offset=0,
buffer_size=None, delimiter=None, account_id=None):
"""Initializes this instance with the given file name and file range.
This GoogleCloudStorageZipInputReader will read from the file with index
start_file_index up to but not including the file with index end_file_index.
It will return lines starting at offset within file[start_file_index]
Args:
file_name: the file name that this input reader is processing.
start_file_index: the index of the first file to read within the zip.
end_file_index: the index of the first file that will not be read.
offset: the byte offset within blob_key.zip[start_file_index] to start
reading. The reader will continue to the end of the file.
"""
self._file_name = file_name
self._start_file_index = start_file_index
self._end_file_index = end_file_index
self._initial_offset = offset
self._buffer_size = buffer_size
self._account_id = account_id
self._delimiter = delimiter
options = {}
if self._buffer_size:
options['read_buffer_size'] = self._buffer_size
if self._account_id:
options['_account_id'] = self._account_id
try:
# pylint: disable=star-args
self._reader = cloudstorage.open(file_name, **options)
except cloudstorage.NotFoundError:
logging.warning('File /%s may have been removed. Skipping file.',
file_name)
self._zip = None
self._entries = None
self._entry = None
self._filestream = None
| rbruyere/appengine-mapreduce | python/test/mapreduce gcs/mapreduce/input_readers.py | Python | apache-2.0 | 128,812 |
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
from kafkatest.version import LATEST_0_9, LATEST_0_10, TRUNK, KafkaVersion
class MessageFormatChangeTest(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(MessageFormatChangeTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.messages_per_producer = 100
def produce_and_consume(self, producer_version, consumer_version, group):
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic,
throughput=self.producer_throughput,
message_validator=is_int,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, new_consumer=False, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.consumer.group_id = group
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
@parametrize(producer_version=str(TRUNK), consumer_version=str(TRUNK))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
""" This tests performs the following checks:
The workload is a mix of 0.9.x and 0.10.x producers and consumers
that produce to and consume from a 0.10.x cluster
1. initially the topic is using message format 0.9.0
2. change the message format version for topic to 0.10.0 on the fly.
3. change the message format version for topic back to 0.9.0 on the fly.
- The producers and consumers should not have any issue.
- Note that for 0.9.x consumers/producers we only do steps 1 and 2
"""
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.kafka.start()
self.logger.info("First format change to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group1")
self.logger.info("Second format change to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group2")
if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
self.logger.info("Third format change back to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group3")
| geeag/kafka | tests/kafkatest/tests/client/message_format_change_test.py | Python | apache-2.0 | 4,644 |
import shelve
import os
import re
from resource_api.interfaces import Resource as BaseResource, Link as BaseLink, AbstractUriPolicy
from resource_api.schema import StringField, DateTimeField, IntegerField
from resource_api.service import Service
from resource_api.errors import ValidationError
RE_SHA1 = re.compile("^[a-f0-9]{40}$")
SHELVE_PATH = "/tmp/school.shelve.db"
class ShelveService(Service):
def __init__(self):
super(ShelveService, self).__init__()
self._storage = shelve.open(SHELVE_PATH, writeback=True)
def _get_context(self):
return {"storage": self._storage}
def _get_user(self, data):
return None
def __del__(self):
self._storage.close()
class Resource(BaseResource):
def __init__(self, context):
super(Resource, self).__init__(context)
self._storage = context["storage"]
def exists(self, user, pk):
return pk in self._storage.get(self.get_name(), {})
def get_data(self, user, pk):
return self._storage.get(self.get_name(), {}).get(pk)
def delete(self, user, pk):
self._storage.get(self.get_name(), {}).pop(pk)
self._storage.sync()
def create(self, user, pk, data):
if self.get_name() not in self._storage:
self._storage[self.get_name()] = {}
self._storage[self.get_name()][pk] = data
self._storage.sync()
def update(self, user, pk, data):
self._storage[self.get_name()][pk].update(data)
self._storage.sync()
def get_uris(self, user, params=None):
return self._storage.get(self.get_name(), {}).keys()
def get_count(self, user, params=None):
return len(self.get_uris(params))
class Link(BaseLink):
def __init__(self, context):
super(Link, self).__init__(context)
self._storage = context["storage"]
def exists(self, user, pk, rel_pk):
return rel_pk in self._storage.get((pk, self.get_name()), {})
def get_data(self, user, pk, rel_pk):
return self._storage.get((pk, self.get_name()), {}).get(rel_pk)
def create(self, user, pk, rel_pk, data=None):
key = (pk, self.get_name())
if key not in self._storage:
self._storage[key] = {}
self._storage[key][rel_pk] = data
self._storage.sync()
def update(self, user, pk, rel_pk, data):
self._storage[key][rel_pk].update(data)
self._storage.sync()
def delete(self, user, pk, rel_pk):
self._storage.get((pk, self.get_name()), {}).pop(rel_pk)
self._storage.sync()
def get_uris(self, user, pk, params=None):
return self._storage.get((pk, self.get_name()), {}).keys()
def get_count(self, user, pk, params=None):
return len(self.get_uris(pk, params))
class Student(Resource):
""" A pupil """
class Schema:
email = StringField(regex="[^@]+@[^@]+\.[^@]+", pk=True,
description="Addess to which the notifications shall be sent")
first_name = StringField(description="Given name(s)")
last_name = StringField(description="Family name(s)")
birthday = DateTimeField()
class Links:
class courses(Link):
""" Courses the student has ever attended """
class Schema:
grade = IntegerField(min_val=1, max_val=5)
target = "Course"
related_name = "students"
master = True
class comments(Link):
""" Comments made by the student """
target = "Comment"
related_name = "student"
class ratings(Link):
""" Ratings given by the student """
target = "TeacherRating"
related_name = "student"
class Teacher(Resource):
""" A lecturer """
class Schema:
email = StringField(regex="[^@]+@[^@]+\.[^@]+", pk=True,
description="Addess to which the notifications shall be sent")
first_name = StringField(description="Given name(s)")
last_name = StringField(description="Family name(s)")
category = StringField(description="TQS Category", choices=["four", "five", "five plus", "six"])
class Links:
class ratings(Link):
""" Ratings given to the teacher """
target = "TeacherRating"
related_name = "teacher"
class courses(Link):
""" Courses the teacher is responsible for """
target = "Course"
related_name = "teacher"
class Course(Resource):
""" An educational unit represinting the lessons for a specific set of topics """
class Schema:
name = StringField(pk=True, description="Name of the course. E.g. physics, maths.")
duration = IntegerField(description="Length of the course in weeks")
class Links:
class teacher(Link):
""" The lecturer of the course """
target = "Teacher"
related_name = "courses"
cardinality = Link.cardinalities.ONE
master = True
required = True
class comments(Link):
""" All comments made about the course """
target = "Comment"
related_name = "course"
class ratings(Link):
""" All ratings that were given to the teachers of the specific course """
target = "TeacherRating"
related_name = "course"
class students(Link):
""" All pupils who attend the course """
target = "Student"
related_name = "courses"
class AutoGenSha1UriPolicy(AbstractUriPolicy):
""" Uses a randomly generated sha1 as a primary key """
@property
def type(self):
return "autogen_policy"
def generate_pk(self, data):
return os.urandom(16).encode('hex')
def serialize(self, pk):
return pk
def deserialize(self, pk):
if not isinstance(pk, basestring):
raise ValidationError("Has to be string")
if not RE_SHA1.match(value):
raise ValidationError("PK is not a valid SHA1")
return pk
class Comment(Resource):
""" Student's comment about the course """
UriPolicy = AutoGenSha1UriPolicy
class Schema:
pk = StringField(pk=True, description="Identifier of the resource")
value = StringField(description="Text of the comment")
creation_time = DateTimeField(description="Time when the comment was added (for sorting purpose)")
class Links:
class student(Link):
""" The pupil who made the comment """
target = "Student"
related_name = "comments"
cardinality = Link.cardinalities.ONE
master = True
required = True
class course(Link):
""" The subject the comment was made about """
target = "Course"
related_name = "comments"
cardinality = Link.cardinalities.ONE
master = True
required = True
class TeacherRating(Resource):
""" Student's rating about teacher's performance """
UriPolicy = AutoGenSha1UriPolicy
class Schema:
pk = StringField(pk=True, description="Identifier of the resource")
value = IntegerField(min_val=0, max_val=100, description="Lecturer's performance identifier ")
creation_time = DateTimeField(description="Time when the rating was added (for sorting purpose)")
class Links:
class student(Link):
""" The pupil who gave the rating to the teacher """
target = "Student"
related_name = "ratings"
cardinality = Link.cardinalities.ONE
master = True
required = True
class course(Link):
""" The subject with respect to which the rating was given """
target = "Course"
related_name = "ratings"
cardinality = Link.cardinalities.ONE
master = True
required = True
class teacher(Link):
""" The lecturer to whom the rating is related """
target = "Teacher"
related_name = "ratings"
cardinality = Link.cardinalities.ONE
master = True
required = True
srv = ShelveService()
srv.register(Student)
srv.register(Teacher)
srv.register(Course)
srv.register(Comment)
srv.register(TeacherRating)
srv.setup()
| gurunars/resource-api | documentation/tutorial/service_v4.py | Python | apache-2.0 | 8,456 |
'''
Integration Test for scheduler reboot VM in HA mode.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import test_stub
import time
import os
vm = None
node1_ip = None
node2_ip = None
def test():
global vm
global node1_ip
vm = test_stub.create_basic_vm()
vm.check()
start_date = int(time.time())
schd = vm_ops.reboot_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_reboot_vm_scheduler', start_date+60, 30)
node1_ip = os.environ.get('node1Ip')
node2_ip = os.environ.get('node2Ip')
test_util.test_logger("shutdown node: %s" % (node1_ip))
cmd = "init 0"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
test_util.test_logger("wait for 2 minutes to see if http api still works well")
time.sleep(180)
test_stub.exercise_connection(600)
time.sleep(180)
scheduler_execution_count = 0
for i in range(0, 30):
for j in range(0, 6):
if test_lib.lib_find_in_remote_management_server_log(node1_ip, host_username, host_password, start_date+60+30*i+j, '[msg received]: {"org.zstack.header.vm.RebootVmInstanceMsg', vm.get_vm().uuid):
scheduler_execution_count += 1
if test_lib.lib_find_in_remote_management_server_log(node2_ip, host_username, host_password, start_date+60+30*i+j, '[msg received]: {"org.zstack.header.vm.RebootVmInstanceMsg', vm.get_vm().uuid):
scheduler_execution_count -= 1
if abs(scheduler_execution_count) < 5:
test_util.test_fail('VM reboot scheduler is expected to executed for more than 5 times, while it only execute %s times' % (scheduler_execution_count))
schd_ops.delete_scheduler(schd.uuid)
vm.destroy()
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
time.sleep(180)
test_stub.exercise_connection(600)
test_util.test_pass('Scheduler Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global node1_ip
if vm:
try:
vm.destroy()
except:
pass
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
time.sleep(180)
test_stub.exercise_connection(600)
| zstackio/zstack-woodpecker | integrationtest/vm/ha/test_one_node_shutdown_with_scheduler.py | Python | apache-2.0 | 2,920 |
# coding=utf-8
import os.path
import sys
import types
import getopt
from getopt import GetoptError
import text_file
import regex_utils
import string_utils as str_utils
def grep(target, pattern, number = False, model = 'e'):
'''
grep: print lines matching a pattern
@param target:string list or text file name
@param pattern: regex pattern or line number pattern or reduce function: bool=action(str)
@param number: with line number
@param model: s: substring model, e: regex model, n: line number model, a: action model
@summary: list= ['1:huiyugeng:male', '2:zhuzhu:male', '3:maomao:female']
print grep.grep(list, '^(?!.*female).*$', ':', [1])
output: ['huiyugeng', 'zhuzhu']
'''
if isinstance(target, basestring):
text = text_file.read_file(target)
elif isinstance(target, list):
text = target
else:
text = None
if not text:
return None
line_num = 1;
result = []
for line_text in text:
line_text = str(line_text)
if __match(line_num, line_text, model, pattern):
line_text = __print(line_num, line_text, number)
if line_text != None:
result.append(line_text)
line_num = line_num + 1
return result
def __match(line_num, line_text, model, pattern):
if str_utils.is_blank(line_text):
return False
if str_utils.is_blank(pattern):
return True
patterns = []
if type(pattern) == types.ListType:
patterns = pattern
elif type(pattern) == types.FunctionType:
patterns = [pattern]
else:
patterns = [str(pattern)]
if str_utils.is_empty(model) :
model = 's'
model = model.lower()
for match_pattern in patterns:
if model == 's':
if match_pattern in line_text:
return True
elif model == 'n':
_min, _max = __split_region(match_pattern)
if line_num >= _min and line_num <= _max:
return True
elif model == 'e':
if regex_utils.check_line(match_pattern, line_text):
return True
elif model == 'a':
if type(pattern) == types.FunctionType:
if pattern(line_text):
return True
return False
def __split_region(pattern):
if pattern.startswith('[') and pattern.endswith(']') and ',' in pattern:
region = pattern[1: len(pattern) - 1].split(',')
if region != None and len(region) == 2:
_min = int(region[0].strip())
_max = int(region[1].strip())
return _min, _max
return 0, 0
def __print(line, text, number):
if number:
return str(line) + ':' + text.strip()
else:
return text.strip()
def exec_cmd(argv):
try:
filename = None
pattern = None
number = False
model = 'e'
if len(argv) > 2:
opts, _ = getopt.getopt(argv[2:],'hf:p:nm:', ['help', '--file', '--pattern', '--number', '--model'])
for name, value in opts:
if name in ('-h', '--help'):
show_help()
if name in ('-f', '--file'):
filename = value
if name in ('-p', '--pattern'):
pattern = value
if name in ('-n', '--number'):
number = True
if name in ('-m', '--model'):
model = value
if str_utils.is_empty(filename) or not os.path.exists(filename):
print 'error : could not find file : ' + filename
sys.exit()
if str_utils.is_empty(pattern):
print 'error : pattern is empty'
sys.exit()
result = grep(filename, pattern, number, model)
if result and isinstance(result, list):
for line in result:
print line
else:
show_help()
except GetoptError, e:
print 'error : ' + e.msg
except Exception, e:
print 'error : ' + e.message
def show_help():
pass | interhui/py-text | text/grep.py | Python | apache-2.0 | 4,283 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestVolumeBootPattern(manager.ScenarioTest):
"""
This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
* Boot an instance from the bootable volume
* Write content to the volume
* Delete an instance and Boot a new instance from the volume
* Check written content in the instance
* Create a volume snapshot while the instance is running
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
@classmethod
def resource_setup(cls):
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
super(TestVolumeBootPattern, cls).resource_setup()
def _create_volume_from_image(self):
img_uuid = CONF.compute.image_ref
vol_name = data_utils.rand_name('volume-origin')
return self.create_volume(name=vol_name, imageRef=img_uuid)
def _boot_instance_from_volume(self, vol_id, keypair):
# NOTE(gfidente): the syntax for block_device_mapping is
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
# from a snapshot, size instead can be safely left empty
bd_map = [{
'device_name': 'vda',
'volume_id': vol_id,
'delete_on_termination': '0'}]
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping': bd_map,
'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
snap_name = data_utils.rand_name('snapshot')
snap = self.snapshots_client.create_snapshot(
volume_id=vol_id,
force=True,
display_name=snap_name)
self.addCleanup_with_wait(
waiter_callable=self.snapshots_client.wait_for_resource_deletion,
thing_id=snap['id'], thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.snapshots_client.delete_snapshot, snap['id']])
self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
self.assertEqual(snap_name, snap['display_name'])
return snap
def _create_volume_from_snapshot(self, snap_id):
vol_name = data_utils.rand_name('volume')
return self.create_volume(name=vol_name, snapshot_id=snap_id)
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
for i in instances:
self.servers_client.stop(i['id'])
for i in instances:
self.servers_client.wait_for_server_status(i['id'], 'SHUTOFF')
def _detach_volumes(self, volumes):
# NOTE(gfidente): two loops so we do not wait for the status twice
for v in volumes:
self.volumes_client.detach_volume(v['id'])
for v in volumes:
self.volumes_client.wait_for_volume_status(v['id'], 'available')
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
_, floating_ip = self.floating_ips_client.create_floating_ip()
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
ip = floating_ip['ip']
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
return self.get_remote_client(ip, private_key=keypair['private_key'],
log_console_of_servers=[server])
def _get_content(self, ssh_client):
return ssh_client.exec_command('cat /tmp/text')
def _write_text(self, ssh_client):
text = data_utils.rand_name('text-')
ssh_client.exec_command('echo "%s" > /tmp/text; sync' % (text))
return self._get_content(ssh_client)
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
self.servers_client.wait_for_server_termination(server['id'])
def _check_content_of_written_file(self, ssh_client, expected):
actual = self._get_content(ssh_client)
self.assertEqual(expected, actual)
@test.skip_because(bug='1373513')
@test.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
keypair = self.create_keypair()
self.security_group = self._create_security_group()
# create an instance from volume
volume_origin = self._create_volume_from_image()
instance_1st = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# write content to volume on instance
ssh_client_for_instance_1st = self._ssh_to_server(instance_1st,
keypair)
text = self._write_text(ssh_client_for_instance_1st)
# delete instance
self._delete_server(instance_1st)
# create a 2nd instance from volume
instance_2nd = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# check the content of written file
ssh_client_for_instance_2nd = self._ssh_to_server(instance_2nd,
keypair)
self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
# snapshot a volume
snapshot = self._create_snapshot_from_volume(volume_origin['id'])
# create a 3rd instance from snapshot
volume = self._create_volume_from_snapshot(snapshot['id'])
instance_from_snapshot = self._boot_instance_from_volume(volume['id'],
keypair)
# check the content of written file
ssh_client = self._ssh_to_server(instance_from_snapshot, keypair)
self._check_content_of_written_file(ssh_client, text)
# NOTE(gfidente): ensure resources are in clean state for
# deletion operations to succeed
self._stop_instances([instance_2nd, instance_from_snapshot])
self._detach_volumes([volume_origin, volume])
class TestVolumeBootPatternV2(TestVolumeBootPattern):
def _boot_instance_from_volume(self, vol_id, keypair):
bdms = [{'uuid': vol_id, 'source_type': 'volume',
'destination_type': 'volume', 'boot_index': 0,
'delete_on_termination': False}]
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping_v2': bdms,
'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
| jamielennox/tempest | tempest/scenario/test_volume_boot_pattern.py | Python | apache-2.0 | 8,184 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log entries within the Google Stackdriver Logging API."""
import collections
import json
import re
from google.protobuf.any_pb2 import Any
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import Parse
from google.cloud.logging.resource import Resource
from google.cloud._helpers import _name_from_project_path
from google.cloud._helpers import _rfc3339_nanos_to_datetime
from google.cloud._helpers import _datetime_to_rfc3339
_GLOBAL_RESOURCE = Resource(type='global', labels={})
_LOGGER_TEMPLATE = re.compile(r"""
projects/ # static prefix
(?P<project>[^/]+) # initial letter, wordchars + hyphen
/logs/ # static midfix
(?P<name>[^/]+) # initial letter, wordchars + allowed punc
""", re.VERBOSE)
def logger_name_from_path(path):
"""Validate a logger URI path and get the logger name.
:type path: str
:param path: URI path for a logger API request.
:rtype: str
:returns: Logger name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
def _int_or_none(value):
"""Helper: return an integer or ``None``."""
if value is not None:
value = int(value)
return value
_LOG_ENTRY_FIELDS = ( # (name, default)
('log_name', None),
('labels', None),
('insert_id', None),
('severity', None),
('http_request', None),
('timestamp', None),
('resource', _GLOBAL_RESOURCE),
('trace', None),
('span_id', None),
('trace_sampled', None),
('source_location', None),
('operation', None),
('logger', None),
('payload', None),
)
_LogEntryTuple = collections.namedtuple(
'LogEntry', (field for field, _ in _LOG_ENTRY_FIELDS))
_LogEntryTuple.__new__.__defaults__ = tuple(
default for _, default in _LOG_ENTRY_FIELDS)
_LOG_ENTRY_PARAM_DOCSTRING = """\
:type log_name: str
:param log_name: the name of the logger used to post the entry.
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type insert_id: text
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry
:type trace: str
:param trace: (optional) traceid to apply to the entry.
:type span_id: str
:param span_id: (optional) span_id within the trace for the log entry.
Specify the trace parameter if span_id is set.
:type trace_sampled: bool
:param trace_sampled: (optional) the sampling decision of the trace
associated with the log entry.
:type source_location: dict
:param source_location: (optional) location in source code from which
the entry was emitted.
:type operation: dict
:param operation: (optional) additional information about a potentially
long-running operation associated with the log entry.
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
"""
_LOG_ENTRY_SEE_ALSO_DOCSTRING = """\
See:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
class LogEntry(_LogEntryTuple):
__doc__ = """
Log entry.
""" + _LOG_ENTRY_PARAM_DOCSTRING + _LOG_ENTRY_SEE_ALSO_DOCSTRING
received_timestamp = None
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return None
@classmethod
def from_api_repr(cls, resource, client, loggers=None):
"""Factory: construct an entry given its API representation
:type resource: dict
:param resource: text entry resource representation returned from
the API
:type client: :class:`google.cloud.logging.client.Client`
:param client: Client which holds credentials and project
configuration.
:type loggers: dict
:param loggers:
(Optional) A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: :class:`google.cloud.logging.entries.LogEntry`
:returns: Log entry parsed from ``resource``.
"""
if loggers is None:
loggers = {}
logger_fullname = resource['logName']
logger = loggers.get(logger_fullname)
if logger is None:
logger_name = logger_name_from_path(logger_fullname)
logger = loggers[logger_fullname] = client.logger(logger_name)
payload = cls._extract_payload(resource)
insert_id = resource.get('insertId')
timestamp = resource.get('timestamp')
if timestamp is not None:
timestamp = _rfc3339_nanos_to_datetime(timestamp)
labels = resource.get('labels')
severity = resource.get('severity')
http_request = resource.get('httpRequest')
trace = resource.get('trace')
span_id = resource.get('spanId')
trace_sampled = resource.get('traceSampled')
source_location = resource.get('sourceLocation')
if source_location is not None:
line = source_location.pop('line', None)
source_location['line'] = _int_or_none(line)
operation = resource.get('operation')
monitored_resource_dict = resource.get('resource')
monitored_resource = None
if monitored_resource_dict is not None:
monitored_resource = Resource._from_dict(monitored_resource_dict)
inst = cls(
log_name=logger_fullname,
insert_id=insert_id,
timestamp=timestamp,
labels=labels,
severity=severity,
http_request=http_request,
resource=monitored_resource,
trace=trace,
span_id=span_id,
trace_sampled=trace_sampled,
source_location=source_location,
operation=operation,
logger=logger,
payload=payload,
)
received = resource.get('receiveTimestamp')
if received is not None:
inst.received_timestamp = _rfc3339_nanos_to_datetime(received)
return inst
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = {}
if self.log_name is not None:
info['logName'] = self.log_name
if self.resource is not None:
info['resource'] = self.resource._to_dict()
if self.labels is not None:
info['labels'] = self.labels
if self.insert_id is not None:
info['insertId'] = self.insert_id
if self.severity is not None:
info['severity'] = self.severity
if self.http_request is not None:
info['httpRequest'] = self.http_request
if self.timestamp is not None:
info['timestamp'] = _datetime_to_rfc3339(self.timestamp)
if self.trace is not None:
info['trace'] = self.trace
if self.span_id is not None:
info['spanId'] = self.span_id
if self.trace_sampled is not None:
info['traceSampled'] = self.trace_sampled
if self.source_location is not None:
source_location = self.source_location.copy()
source_location['line'] = str(source_location.pop('line', 0))
info['sourceLocation'] = source_location
if self.operation is not None:
info['operation'] = self.operation
return info
class TextEntry(LogEntry):
__doc__ = """
Log entry with text payload.
""" + _LOG_ENTRY_PARAM_DOCSTRING + """
:type payload: str | unicode
:param payload: payload for the log entry.
""" + _LOG_ENTRY_SEE_ALSO_DOCSTRING
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource['textPayload']
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(TextEntry, self).to_api_repr()
info['textPayload'] = self.payload
return info
class StructEntry(LogEntry):
__doc__ = """
Log entry with JSON payload.
""" + _LOG_ENTRY_PARAM_DOCSTRING + """
:type payload: dict
:param payload: payload for the log entry.
""" + _LOG_ENTRY_SEE_ALSO_DOCSTRING
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource['jsonPayload']
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(StructEntry, self).to_api_repr()
info['jsonPayload'] = self.payload
return info
class ProtobufEntry(LogEntry):
__doc__ = """
Log entry with protobuf message payload.
""" + _LOG_ENTRY_PARAM_DOCSTRING + """
:type payload: protobuf message
:param payload: payload for the log entry.
""" + _LOG_ENTRY_SEE_ALSO_DOCSTRING
@classmethod
def _extract_payload(cls, resource):
"""Helper for :meth:`from_api_repr`"""
return resource['protoPayload']
@property
def payload_pb(self):
if isinstance(self.payload, Any):
return self.payload
@property
def payload_json(self):
if not isinstance(self.payload, Any):
return self.payload
def to_api_repr(self):
"""API repr (JSON format) for entry.
"""
info = super(ProtobufEntry, self).to_api_repr()
info['protoPayload'] = MessageToDict(self.payload)
return info
def parse_message(self, message):
"""Parse payload into a protobuf message.
Mutates the passed-in ``message`` in place.
:type message: Protobuf message
:param message: the message to be logged
"""
# NOTE: This assumes that ``payload`` is already a deserialized
# ``Any`` field and ``message`` has come from an imported
# ``pb2`` module with the relevant protobuf message type.
Parse(json.dumps(self.payload), message)
| jonparrott/google-cloud-python | logging/google/cloud/logging/entries.py | Python | apache-2.0 | 11,279 |
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import glob
import subprocess
import pdb
"""
This is a script to convert jp2 to png for Mitra's Data. \
We use Kakadu software for this script. Kakadu only runs on Ubuntu \
and has to have the library added to shared path.
"""
def main():
parser = argparse.ArgumentParser(description='Convert JP2 to PNG')
parser.add_argument('path', action="store", help='Directory with JP2 Files')
parser.add_argument('location', action="store", help='Directory to write to')
result = parser.parse_args()
# Reading all the jp2 files in that directory
filelist = glob.glob(result.path+'*.jp2')
for name in filelist:
print "Opening: {}".format( name )
# Identifying the subdirectory to place the data under
if name.find('F') != -1:
subfile = 'F/'
elif name.find('IHC') != -1:
subfile = 'IHC/'
elif name.find('N') != -1:
subfile = 'N/'
# Determine the write location of the file. This was /mnt on datascopes
writelocation = result.location+subfile+name.split(result.path)[1].split('_')[3].split('.')[0]
# Call kakadu expand from the command line, specify the input and the output filenames
subprocess.call( [ './kdu_expand' ,'-i', '{}'.format(name), '-o', '{}.tiff'.format(writelocation) ] )
if __name__ == "__main__":
main()
| neurodata/ndstore | scripts/ingest/mitra/jp2kakadu.py | Python | apache-2.0 | 1,931 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystone.common import json_home
from keystone.common import wsgi
from keystone.contrib.federation import controllers
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-FEDERATION', extension_version='1.0')
build_parameter_relation = functools.partial(
json_home.build_v3_extension_parameter_relation,
extension_name='OS-FEDERATION', extension_version='1.0')
IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id')
PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation(
parameter_name='protocol_id')
SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id')
class FederationExtension(wsgi.V3ExtensionRouter):
"""API Endpoints for the Federation extension.
The API looks like::
PUT /OS-FEDERATION/identity_providers/$identity_provider
GET /OS-FEDERATION/identity_providers
GET /OS-FEDERATION/identity_providers/$identity_provider
DELETE /OS-FEDERATION/identity_providers/$identity_provider
PATCH /OS-FEDERATION/identity_providers/$identity_provider
PUT /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
GET /OS-FEDERATION/identity_providers/
$identity_provider/protocols
GET /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
PATCH /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
DELETE /OS-FEDERATION/identity_providers/
$identity_provider/protocols/$protocol
PUT /OS-FEDERATION/mappings
GET /OS-FEDERATION/mappings
PATCH /OS-FEDERATION/mappings/$mapping_id
GET /OS-FEDERATION/mappings/$mapping_id
DELETE /OS-FEDERATION/mappings/$mapping_id
GET /OS-FEDERATION/projects
GET /OS-FEDERATION/domains
PUT /OS-FEDERATION/service_providers/$service_provider
GET /OS-FEDERATION/service_providers
GET /OS-FEDERATION/service_providers/$service_provider
DELETE /OS-FEDERATION/service_providers/$service_provider
PATCH /OS-FEDERATION/service_providers/$service_provider
GET /OS-FEDERATION/identity_providers/$identity_provider/
protocols/$protocol/auth
POST /OS-FEDERATION/identity_providers/$identity_provider/
protocols/$protocol/auth
POST /auth/OS-FEDERATION/saml2
GET /OS-FEDERATION/saml2/metadata
GET /auth/OS-FEDERATION/websso/{protocol_id}
?origin=https%3A//horizon.example.com
POST /auth/OS-FEDERATION/websso/{protocol_id}
?origin=https%3A//horizon.example.com
"""
def _construct_url(self, suffix):
return "/OS-FEDERATION/%s" % suffix
def add_routes(self, mapper):
auth_controller = controllers.Auth()
idp_controller = controllers.IdentityProvider()
protocol_controller = controllers.FederationProtocol()
mapping_controller = controllers.MappingController()
project_controller = controllers.ProjectAssignmentV3()
domain_controller = controllers.DomainV3()
saml_metadata_controller = controllers.SAMLMetadataV3()
sp_controller = controllers.ServiceProvider()
# Identity Provider CRUD operations
self._add_resource(
mapper, idp_controller,
path=self._construct_url('identity_providers/{idp_id}'),
get_action='get_identity_provider',
put_action='create_identity_provider',
patch_action='update_identity_provider',
delete_action='delete_identity_provider',
rel=build_resource_relation(resource_name='identity_provider'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, idp_controller,
path=self._construct_url('identity_providers'),
get_action='list_identity_providers',
rel=build_resource_relation(resource_name='identity_providers'))
# Protocol CRUD operations
self._add_resource(
mapper, protocol_controller,
path=self._construct_url('identity_providers/{idp_id}/protocols/'
'{protocol_id}'),
get_action='get_protocol',
put_action='create_protocol',
patch_action='update_protocol',
delete_action='delete_protocol',
rel=build_resource_relation(
resource_name='identity_provider_protocol'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, protocol_controller,
path=self._construct_url('identity_providers/{idp_id}/protocols'),
get_action='list_protocols',
rel=build_resource_relation(
resource_name='identity_provider_protocols'),
path_vars={
'idp_id': IDP_ID_PARAMETER_RELATION,
})
# Mapping CRUD operations
self._add_resource(
mapper, mapping_controller,
path=self._construct_url('mappings/{mapping_id}'),
get_action='get_mapping',
put_action='create_mapping',
patch_action='update_mapping',
delete_action='delete_mapping',
rel=build_resource_relation(resource_name='mapping'),
path_vars={
'mapping_id': build_parameter_relation(
parameter_name='mapping_id'),
})
self._add_resource(
mapper, mapping_controller,
path=self._construct_url('mappings'),
get_action='list_mappings',
rel=build_resource_relation(resource_name='mappings'))
# Service Providers CRUD operations
self._add_resource(
mapper, sp_controller,
path=self._construct_url('service_providers/{sp_id}'),
get_action='get_service_provider',
put_action='create_service_provider',
patch_action='update_service_provider',
delete_action='delete_service_provider',
rel=build_resource_relation(resource_name='service_provider'),
path_vars={
'sp_id': SP_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, sp_controller,
path=self._construct_url('service_providers'),
get_action='list_service_providers',
rel=build_resource_relation(resource_name='service_providers'))
self._add_resource(
mapper, domain_controller,
path=self._construct_url('domains'),
get_action='list_domains_for_groups',
rel=build_resource_relation(resource_name='domains'))
self._add_resource(
mapper, project_controller,
path=self._construct_url('projects'),
get_action='list_projects_for_groups',
rel=build_resource_relation(resource_name='projects'))
self._add_resource(
mapper, auth_controller,
path=self._construct_url('identity_providers/{identity_provider}/'
'protocols/{protocol}/auth'),
get_post_action='federated_authentication',
rel=build_resource_relation(
resource_name='identity_provider_protocol_auth'),
path_vars={
'identity_provider': IDP_ID_PARAMETER_RELATION,
'protocol': PROTOCOL_ID_PARAMETER_RELATION,
})
# Auth operations
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('saml2'),
post_action='create_saml_assertion',
rel=build_resource_relation(resource_name='saml2'))
self._add_resource(
mapper, auth_controller,
path='/auth' + self._construct_url('websso/{protocol_id}'),
get_post_action='federated_sso_auth',
rel=build_resource_relation(resource_name='websso'),
path_vars={
'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
})
# Keystone-Identity-Provider metadata endpoint
self._add_resource(
mapper, saml_metadata_controller,
path=self._construct_url('saml2/metadata'),
get_action='get_metadata',
rel=build_resource_relation(resource_name='metadata'))
| rushiagr/keystone | keystone/contrib/federation/routers.py | Python | apache-2.0 | 9,192 |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Command-line interface to the OpenStack APIs"""
import getpass
import logging
import sys
import traceback
from cliff import app
from cliff import command
from cliff import complete
from cliff import help
import openstackclient
from openstackclient.common import clientmanager
from openstackclient.common import commandmanager
from openstackclient.common import exceptions as exc
from openstackclient.common import timing
from openstackclient.common import utils
DEFAULT_DOMAIN = 'default'
def prompt_for_password(prompt=None):
"""Prompt user for a password
Propmpt for a password if stdin is a tty.
"""
if not prompt:
prompt = 'Password: '
pw = None
# If stdin is a tty, try prompting for the password
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
# Check for Ctl-D
try:
pw = getpass.getpass(prompt)
except EOFError:
pass
# No password because we did't have a tty or nothing was entered
if not pw:
raise exc.CommandError(
"No password entered, or found via --os-password or OS_PASSWORD",
)
return pw
class OpenStackShell(app.App):
CONSOLE_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
timing_data = []
def __init__(self):
# Patch command.Command to add a default auth_required = True
command.Command.auth_required = True
command.Command.best_effort = False
# But not help
help.HelpCommand.auth_required = False
complete.CompleteCommand.best_effort = True
super(OpenStackShell, self).__init__(
description=__doc__.strip(),
version=openstackclient.__version__,
command_manager=commandmanager.CommandManager('openstack.cli'))
self.api_version = {}
# Until we have command line arguments parsed, dump any stack traces
self.dump_stack_trace = True
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
# Assume TLS host certificate verification is enabled
self.verify = True
self.client_manager = None
# NOTE(dtroyer): This hack changes the help action that Cliff
# automatically adds to the parser so we can defer
# its execution until after the api-versioned commands
# have been loaded. There doesn't seem to be a
# way to edit/remove anything from an existing parser.
# Replace the cliff-added help.HelpAction to defer its execution
self.DeferredHelpAction = None
for a in self.parser._actions:
if type(a) == help.HelpAction:
# Found it, save and replace it
self.DeferredHelpAction = a
# These steps are argparse-implementation-dependent
self.parser._actions.remove(a)
if self.parser._option_string_actions['-h']:
del self.parser._option_string_actions['-h']
if self.parser._option_string_actions['--help']:
del self.parser._option_string_actions['--help']
# Make a new help option to just set a flag
self.parser.add_argument(
'-h', '--help',
action='store_true',
dest='deferred_help',
default=False,
help="Show this help message and exit",
)
def configure_logging(self):
"""Configure logging for the app
Cliff sets some defaults we don't want so re-work it a bit
"""
if self.options.debug:
# --debug forces verbose_level 3
# Set this here so cliff.app.configure_logging() can work
self.options.verbose_level = 3
super(OpenStackShell, self).configure_logging()
root_logger = logging.getLogger('')
# Set logging to the requested level
if self.options.verbose_level == 0:
# --quiet
root_logger.setLevel(logging.ERROR)
elif self.options.verbose_level == 1:
# This is the default case, no --debug, --verbose or --quiet
root_logger.setLevel(logging.WARNING)
elif self.options.verbose_level == 2:
# One --verbose
root_logger.setLevel(logging.INFO)
elif self.options.verbose_level >= 3:
# Two or more --verbose
root_logger.setLevel(logging.DEBUG)
# Requests logs some stuff at INFO that we don't want
# unless we have DEBUG
requests_log = logging.getLogger("requests")
# Other modules we don't want DEBUG output for
cliff_log = logging.getLogger('cliff')
stevedore_log = logging.getLogger('stevedore')
iso8601_log = logging.getLogger("iso8601")
if self.options.debug:
# --debug forces traceback
self.dump_stack_trace = True
requests_log.setLevel(logging.DEBUG)
cliff_log.setLevel(logging.DEBUG)
else:
self.dump_stack_trace = False
requests_log.setLevel(logging.ERROR)
cliff_log.setLevel(logging.ERROR)
stevedore_log.setLevel(logging.ERROR)
iso8601_log.setLevel(logging.ERROR)
def run(self, argv):
try:
return super(OpenStackShell, self).run(argv)
except Exception as e:
if not logging.getLogger('').handlers:
logging.basicConfig()
if self.dump_stack_trace:
self.log.error(traceback.format_exc(e))
else:
self.log.error('Exception raised: ' + str(e))
return 1
def build_option_parser(self, description, version):
parser = super(OpenStackShell, self).build_option_parser(
description,
version)
# service token auth argument
parser.add_argument(
'--os-url',
metavar='<url>',
default=utils.env('OS_URL'),
help='Defaults to env[OS_URL]')
# Global arguments
parser.add_argument(
'--os-region-name',
metavar='<auth-region-name>',
default=utils.env('OS_REGION_NAME'),
help='Authentication region name (Env: OS_REGION_NAME)')
parser.add_argument(
'--os-cacert',
metavar='<ca-bundle-file>',
default=utils.env('OS_CACERT'),
help='CA certificate bundle file (Env: OS_CACERT)')
verify_group = parser.add_mutually_exclusive_group()
verify_group.add_argument(
'--verify',
action='store_true',
help='Verify server certificate (default)',
)
verify_group.add_argument(
'--insecure',
action='store_true',
help='Disable server certificate verification',
)
parser.add_argument(
'--os-default-domain',
metavar='<auth-domain>',
default=utils.env(
'OS_DEFAULT_DOMAIN',
default=DEFAULT_DOMAIN),
help='Default domain ID, default=' +
DEFAULT_DOMAIN +
' (Env: OS_DEFAULT_DOMAIN)')
parser.add_argument(
'--timing',
default=False,
action='store_true',
help="Print API call timing info",
)
return clientmanager.build_plugin_option_parser(parser)
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
* authenticate against Identity if requested
"""
super(OpenStackShell, self).initialize_app(argv)
# Save default domain
self.default_domain = self.options.os_default_domain
# Loop through extensions to get API versions
for mod in clientmanager.PLUGIN_MODULES:
version_opt = getattr(self.options, mod.API_VERSION_OPTION, None)
if version_opt:
api = mod.API_NAME
self.api_version[api] = version_opt
version = '.v' + version_opt.replace('.', '_')
cmd_group = 'openstack.' + api.replace('-', '_') + version
self.command_manager.add_command_group(cmd_group)
self.log.debug(
'%(name)s API version %(version)s, cmd group %(group)s',
{'name': api, 'version': version_opt, 'group': cmd_group}
)
# Commands that span multiple APIs
self.command_manager.add_command_group(
'openstack.common')
# This is the naive extension implementation referred to in
# blueprint 'client-extensions'
# Extension modules can register their commands in an
# 'openstack.extension' entry point group:
# entry_points={
# 'openstack.extension': [
# 'list_repo=qaz.github.repo:ListRepo',
# 'show_repo=qaz.github.repo:ShowRepo',
# ],
# }
self.command_manager.add_command_group(
'openstack.extension')
# call InitializeXxx() here
# set up additional clients to stuff in to client_manager??
# Handle deferred help and exit
if self.options.deferred_help:
self.DeferredHelpAction(self.parser, self.parser, None, None)
# Set up common client session
if self.options.os_cacert:
self.verify = self.options.os_cacert
else:
self.verify = not self.options.insecure
self.client_manager = clientmanager.ClientManager(
auth_options=self.options,
verify=self.verify,
api_version=self.api_version,
pw_func=prompt_for_password,
)
def prepare_to_run_command(self, cmd):
"""Set up auth and API versions"""
self.log.info(
'command: %s.%s',
cmd.__class__.__module__,
cmd.__class__.__name__,
)
if cmd.auth_required and cmd.best_effort:
try:
# Trigger the Identity client to initialize
self.client_manager.auth_ref
except Exception:
pass
return
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
# Process collected timing data
if self.options.timing:
# Loop through extensions
for mod in self.ext_modules:
client = getattr(self.client_manager, mod.API_NAME)
if hasattr(client, 'get_timings'):
self.timing_data.extend(client.get_timings())
# Use the Timing pseudo-command to generate the output
tcmd = timing.Timing(self, self.options)
tparser = tcmd.get_parser('Timing')
# If anything other than prettytable is specified, force csv
format = 'table'
# Check the formatter used in the actual command
if hasattr(cmd, 'formatter') \
and cmd.formatter != cmd._formatter_plugins['table'].obj:
format = 'csv'
sys.stdout.write('\n')
targs = tparser.parse_args(['-f', format])
tcmd.run(targs)
def main(argv=sys.argv[1:]):
return OpenStackShell().run(argv)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| varunarya10/python-openstackclient | openstackclient/shell.py | Python | apache-2.0 | 12,311 |
from model.project import Project
def test_add_project(app):
project=Project(name="students_project", description="about Project")
try:
ind = app.project.get_project_list().index(project)
app.project.delete_named_project(project)
except ValueError:
pass
old_projects = app.project.get_project_list()
app.project.create(project)
new_projects = app.project.get_project_list()
assert len(old_projects) + 1 == len(new_projects)
old_projects.append(project)
assert sorted(old_projects,key=Project.id_or_max) == sorted(new_projects,key=Project.id_or_max)
| Manolaru/Python_Mantis | Working version/test/test_add_project.py | Python | apache-2.0 | 614 |
#!/usr/bin/env python3
import random
import unittest
import networkx
from mininet.topo import Topo
from clib.mininet_test_watcher import TopologyWatcher
from clib.mininet_test_base_topo import FaucetTopoTestBase
class FaucetFaultToleranceBaseTest(FaucetTopoTestBase):
"""
Generate a topology of the given parameters (using build_net & TopoBaseTest)
and then call network function to test the network and then slowly tear out bits
until the expected host connectivity does not match the real host connectivity.
===============================================================================================
INSTANT_FAIL:
The fault-tolerant tests will continue fail if there is a pair of hosts that can not
establish a connection.
Set to true to allow the test suite to continue testing the connectivity
for a fault to build the full graph for the current fault.
ASSUME_SYMMETRIC_PING:
A simplification can assume that (h1 -> h2) implies (h2 -> h1).
Set to true to assume that host connectivity is symmetric.
INTERVLAN_ONLY:
Set to true to test only the inter-VLAN connectivity; ignore connections between hosts on
the same VLAN. Speed up the inter-VLAN testing by ignoring the intra-VLAN cases for
tests that inherit from a intra-VLAN test. This creates that assumption that inter-VLAN
does not disrupt the intra-VLAN.
===============================================================================================
TODO: Add the following options
PROTECTED_NODES/EDGES: Prevent desired nodes/edges from being destroyed
ASSUME_TRANSITIVE_PING: Assume for (h1 -> h2) & (h2 -> h3) then (h1 -> h3)
IGNORE_SUBGRAPH: Assume for a topology with subgraphs, the subgraphs do not need to be tested
(if they have already been tested)
"""
INSTANT_FAIL = True
ASSUME_SYMMETRIC_PING = True
INTERVLAN_ONLY = False
# Watches the faults and host connectvitiy
topo_watcher = None
# List of fault events
fault_events = None
# Number of faults to occur before recalculating connectivity
num_faults = 1
# Fault-tolerance tests will only work in software
SOFTWARE_ONLY = True
# Randomization variables
seed = 1
rng = None
# Number of VLANs to create, if >= 2 then routing will be applied
NUM_VLANS = None
# Number of DPs in the network
NUM_DPS = None
# Number of links between switches
N_DP_LINKS = None
host_links = None
switch_links = None
routers = None
stack_roots = None
def setUp(self):
pass
def set_up(self, network_graph, stack_roots, host_links=None, host_vlans=None):
"""
Args:
network_graph (networkx.MultiGraph): Network topology for the test
stack_roots (dict): The priority values for the stack roots
host_links (dict): Links for each host to switches
host_vlans (dict): VLAN for each host
"""
super().setUp()
switch_links = list(network_graph.edges()) * self.N_DP_LINKS
link_vlans = {edge: None for edge in switch_links}
if not host_links or not host_vlans:
# Setup normal host links & vlans
host_links = {}
host_vlans = {}
host_n = 0
for dp_i in network_graph.nodes():
for v in range(self.NUM_VLANS):
host_links[host_n] = [dp_i]
host_vlans[host_n] = v
host_n += 1
dp_options = {}
for i in network_graph.nodes():
dp_options.setdefault(i, {
'group_table': self.GROUP_TABLE,
'ofchannel_log': self.debug_log_path + str(i) if self.debug_log_path else None,
'hardware': 'Open vSwitch'
})
if i in stack_roots:
dp_options[i]['stack'] = {'priority': stack_roots[i]}
vlan_options = {}
routers = {}
if self.NUM_VLANS >= 2:
# Setup options for routing
routers = {0: list(range(self.NUM_VLANS))}
for i in range(self.NUM_VLANS):
vlan_options[i] = {
'faucet_mac': self.faucet_mac(i),
'faucet_vips': [self.faucet_vip(i)],
'targeted_gw_resolution': False
}
for i in network_graph.nodes():
dp_options[i]['arp_neighbor_timeout'] = 2
dp_options[i]['max_resolve_backoff_time'] = 2
dp_options[i]['proactive_learn_v4'] = True
self.host_links = host_links
self.switch_links = switch_links
self.routers = routers
self.stack_roots = stack_roots
self.build_net(
host_links=host_links,
host_vlans=host_vlans,
switch_links=switch_links,
link_vlans=link_vlans,
n_vlans=self.NUM_VLANS,
dp_options=dp_options,
vlan_options=vlan_options,
routers=routers
)
self.start_net()
def host_connectivity(self, host, dst):
"""Ping to a destination, return True if the ping was successful"""
try:
self._ip_ping(host, dst, 5, timeout=50, count=5, require_host_learned=False)
except AssertionError:
return False
return True
def calculate_connectivity(self):
"""Ping between each set of host pairs to calculate host connectivity"""
connected_hosts = self.topo_watcher.get_connected_hosts(
two_way=not self.ASSUME_SYMMETRIC_PING, strictly_intervlan=self.INTERVLAN_ONLY)
for src, dsts in connected_hosts.items():
src_host = self.host_information[src]['host']
for dst in dsts:
dst_host = self.host_information[dst]['host']
dst_ip = self.host_information[dst]['ip']
result = self.host_connectivity(src_host, dst_ip.ip)
self.topo_watcher.add_network_info(src_host.name, dst_host.name, result)
self.assertTrue(not self.INSTANT_FAIL or result, 'Pair connection failed')
def create_controller_fault(self, *args):
"""
Set controller down (disconnects all switches from the controller)
Args:
index: The index to the controller to take down
"""
index = args[0]
controller = self.net.controllers[index]
controller.stop()
self.net.controllers.remove(controller)
self.topo_watcher.add_fault('Controller %s DOWN' % controller.name)
def create_random_controller_fault(self, *args):
"""Randomly create a fault for a controller"""
controllers = [c for c in self.net.controllers if c.name != 'gauge']
i = random.randrange(len(controllers))
c_name = controllers[i].name
controller = next((cont for cont in self.net.controllers if cont.name == c_name), None)
if controller is None:
return
self.create_controller_fault(self.net.controllers.index(controller))
def create_switch_fault(self, *args):
"""
Set switch down (Deletes the OVS switch bridge)
Args:
index: Index of the switch dpid to take out
"""
index = args[0]
dpid = self.dpids[index]
switch_name = self.topo.switches_by_id[index]
switch = next((switch for switch in self.net.switches if switch.name == switch_name), None)
if switch is None:
return
self.dump_switch_flows(switch)
name = '%s:%s DOWN' % (self.topo.switches_by_id[index], self.dpids[index])
self.topo_watcher.add_switch_fault(index, name)
switch.stop()
switch.cmd(self.VSCTL, 'del-controller', switch.name, '|| true')
self.assertTrue(
self.wait_for_prometheus_var(
'of_dp_disconnections_total', 1, dpid=dpid), 'DP %s not detected as DOWN' % dpid)
self.net.switches.remove(switch)
def random_switch_fault(self, *args):
"""Randomly take out an available switch"""
dpid_list = self.topo_watcher.get_eligable_switch_events()
if len(self.stack_roots.keys()) <= 1:
# Prevent only root from being destroyed
sorted_roots = dict(sorted(self.stack_roots.items(), key=lambda item: item[1]))
for root_index in sorted_roots.keys():
root_dpid = self.dpids[root_index]
if root_dpid in dpid_list:
dpid_list.remove(root_dpid)
break
if not dpid_list:
return
dpid_item_index = self.rng.randrange(len(dpid_list))
dpid_item = dpid_list[dpid_item_index]
dpid_index = self.dpids.index(dpid_item)
self.create_switch_fault(dpid_index)
def dp_link_fault(self, *args):
"""
Create a fault/tear down the stack link between two switches
Args:
src_dp_index: Index of the source DP of the stack link
dst_dp_index: Index of the destination DP of the stack
"""
src_i = args[0]
dst_i = args[1]
src_dpid = self.dpids[src_i]
dst_dpid = self.dpids[dst_i]
s1_name = self.topo.switches_by_id[src_i]
s2_name = self.topo.switches_by_id[dst_i]
for port, link in self.topo.ports[s1_name].items():
status = self.stack_port_status(src_dpid, s1_name, port)
if link[0] == s2_name and status == 3:
peer_port = link[1]
self.set_port_down(port, src_dpid)
self.set_port_down(peer_port, dst_dpid)
self.wait_for_stack_port_status(src_dpid, s1_name, port, 4)
self.wait_for_stack_port_status(dst_dpid, s2_name, peer_port, 4)
name = 'Link %s[%s]:%s-%s[%s]:%s DOWN' % (
s1_name, src_dpid, port, s2_name, dst_dpid, peer_port)
self.topo_watcher.add_link_fault(src_i, dst_i, name)
return
def random_dp_link_fault(self, *args):
"""Randomly create a fault for a DP link"""
link_list = self.topo_watcher.get_eligable_link_events()
if not link_list:
return
index = self.rng.randrange(len(link_list))
dplink = link_list[index]
srcdp = self.dpids.index(dplink[0])
dstdp = self.dpids.index(dplink[1])
self.dp_link_fault(srcdp, dstdp)
def create_proportional_random_fault_event(self):
"""Create a fault-event randomly based on the number of link and switch events available"""
funcs = []
for _ in self.topo_watcher.get_eligable_link_events():
funcs.append(self.random_dp_link_fault)
for _ in self.topo_watcher.get_eligable_switch_events():
funcs.append(self.random_switch_fault)
i = self.rng.randrange(len(funcs))
funcs[i]()
def create_random_fault_event(self):
"""Randomly choose an event type to fault on"""
funcs = []
if self.topo_watcher.get_eligable_link_events():
funcs.append(self.random_dp_link_fault)
if self.topo_watcher.get_eligable_switch_events():
funcs.append(self.random_switch_fault)
if not funcs:
return
i = self.rng.randrange(len(funcs))
funcs[i]()
def network_function(self, fault_events=None, num_faults=1):
"""
Test the network by slowly tearing it down different ways
Args:
fault_events: (optional) list of tuples of fault event functions and the parameters to
use in the given order; instead of randomly choosing parts of the network to break
num_faults: (optional) number of faults to cause before each evaluation is made
"""
self.verify_stack_up()
self.fault_events = fault_events
self.num_faults = num_faults
self.rng = random.Random(self.seed)
self.topo_watcher = TopologyWatcher(
self.dpids, self.switch_links, self.host_links,
self.NUM_VLANS, self.host_information, self.routers)
# Calculate stats (before any tear downs)
self.calculate_connectivity()
self.assertTrue(self.topo_watcher.is_connected(), (
'Host connectivity does not match predicted'))
# Start tearing down the network
if self.fault_events:
# Do Specified list of faults (in order) until failure or fault list completed
fault_index = 0
while fault_index < len(self.fault_events):
for _ in range(self.num_faults):
event_func, params = self.fault_events[fault_index]
fault_index += 1
event_func(*params)
self.calculate_connectivity()
self.assertTrue(self.topo_watcher.is_connected(), (
'Host connectivity does not match predicted'))
else:
# Continue creating fault until none are available or expected connectivity does not
# match real connectivity
while self.topo_watcher.continue_faults():
for _ in range(self.num_faults):
self.create_proportional_random_fault_event()
self.calculate_connectivity()
self.assertTrue(self.topo_watcher.is_connected(), (
'Host connectivity does not match predicted'))
def tearDown(self, ignore_oferrors=False):
"""Make sure to dump the watcher information too"""
if self.topo_watcher:
self.topo_watcher.dump_info(self.tmpdir)
super(FaucetFaultToleranceBaseTest, self).tearDown(ignore_oferrors=ignore_oferrors)
class FaucetSingleFaultTolerance2DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 2 DPs"""
NUM_DPS = 2
NUM_HOSTS = 4
NUM_VLANS = 2
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
ASSUME_SYMMETRIC_PING = False
class FaucetSingleFaultTolerance3DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 3 DPs"""
NUM_DPS = 3
NUM_HOSTS = 6
NUM_VLANS = 2
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
class FaucetSingleFaultTolerance4DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 4 DPs"""
NUM_DPS = 4
NUM_HOSTS = 4
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
def test_ftp2_all_random_switch_failures(self):
"""Test fat-tree-pod-2 randomly tearing down only switches"""
fault_events = [(self.random_switch_fault, (None,)) for _ in range(self.NUM_DPS)]
stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)}
self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots)
self.network_function(fault_events=fault_events)
def test_ftp2_all_random_link_failures(self):
"""Test fat-tree-pod-2 randomly tearing down only switch-switch links"""
network_graph = networkx.cycle_graph(self.NUM_DPS)
fault_events = [(self.random_dp_link_fault, (None,)) for _ in range(len(network_graph.edges()))]
stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)}
self.set_up(network_graph, stack_roots)
self.network_function(fault_events=fault_events)
def test_ftp2_edge_root_link_fault(self):
"""Test breaking a link between a edge switch to the root aggregation switch"""
fault_events = [(self.dp_link_fault, (0, 3))]
stack_roots = {2*i: i+1 for i in range(self.NUM_DPS//2)}
self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots)
self.network_function(fault_events=fault_events)
def test_ftp2_destroying_one_of_each_link(self):
"""Test tearing down one of each link for a fat-tree-pod-2 with redundant edges"""
self.N_DP_LINKS = 2
fault_events = []
for i in range(self.NUM_DPS):
j = i+1 if i+1 < self.NUM_DPS else 0
fault_events.append((self.dp_link_fault, (i, j)))
num_faults = len(fault_events)
stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)}
self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots)
self.network_function(fault_events=fault_events, num_faults=num_faults)
self.N_DP_LINKS = 1
class FaucetSingleFaultTolerance5DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 5 DPs"""
NUM_DPS = 5
NUM_HOSTS = 5
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
@unittest.skip('Too computationally complex')
class FaucetSingleFaultTolerance6DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 5 DPs"""
NUM_DPS = 6
NUM_HOSTS = 6
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
@unittest.skip('Too computationally complex')
class FaucetSingleFaultTolerance7DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 5 DPs"""
NUM_DPS = 7
NUM_HOSTS = 7
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
TEST_CLASS_LIST = [
FaucetSingleFaultTolerance2DPTest,
FaucetSingleFaultTolerance3DPTest,
FaucetSingleFaultTolerance4DPTest,
FaucetSingleFaultTolerance5DPTest,
FaucetSingleFaultTolerance6DPTest,
FaucetSingleFaultTolerance7DPTest
]
MIN_NODES = min([c.NUM_DPS for c in TEST_CLASS_LIST])
MAX_NODES = max([c.NUM_DPS for c in TEST_CLASS_LIST])
| trungdtbk/faucet | tests/generative/integration/mininet_tests.py | Python | apache-2.0 | 17,678 |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from common_openstack import OpenStackTest
class ServerTest(OpenStackTest):
def test_server_query(self):
factory = self.replay_flight_data()
p = self.load_policy({
'name': 'all-servers',
'resource': 'openstack.server'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_server_filter_name(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "value",
"key": "name",
"value": "c7n-test-1",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-1")
def test_server_filter_flavor(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "flavor",
"flavor_name": "m1.tiny",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-1")
def test_server_filter_tags(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "tags",
"tags": [
{
"key": "a",
"value": "a",
},
{
"key": "b",
"value": "b",
},
],
"op": "all",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-2")
| thisisshi/cloud-custodian | tools/c7n_openstack/tests/test_server.py | Python | apache-2.0 | 2,461 |
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.db import api as db
from nova import exception
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class MigrationContext(base.NovaPersistentObject, base.NovaObject):
"""Data representing additional resources related to a migration.
Some resources cannot be calculated from knowing the flavor alone for the
purpose of resources tracking, but need to be persisted at the time the
claim was made, for subsequent resource tracking runs to be consistent.
MigrationContext objects are created when the claim is done and are there
to facilitate resource tracking and final provisioning of the instance on
the destination host.
"""
# Version 1.0: Initial version
# Version 1.1: Add old/new pci_devices and pci_requests
VERSION = '1.1'
fields = {
'instance_uuid': fields.UUIDField(),
'migration_id': fields.IntegerField(),
'new_numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'old_numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'new_pci_devices': fields.ObjectField('PciDeviceList',
nullable=True),
'old_pci_devices': fields.ObjectField('PciDeviceList',
nullable=True),
'new_pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'old_pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
}
@classmethod
def obj_make_compatible(cls, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1):
primitive.pop('old_pci_devices', None)
primitive.pop('new_pci_devices', None)
primitive.pop('old_pci_requests', None)
primitive.pop('new_pci_requests', None)
@classmethod
def obj_from_db_obj(cls, db_obj):
primitive = jsonutils.loads(db_obj)
return cls.obj_from_primitive(primitive)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['migration_context'])
if not db_extra:
raise exception.MigrationContextNotFound(
instance_uuid=instance_uuid)
if db_extra['migration_context'] is None:
return None
return cls.obj_from_db_obj(db_extra['migration_context'])
| mikalstill/nova | nova/objects/migration_context.py | Python | apache-2.0 | 3,456 |
Subsets and Splits