gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import print_function
__author__ = """Alex "O." Holcombe""" ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import numpy as np
import itertools #to calculate all subsets
from copy import deepcopy
from math import atan, pi, cos, sin, sqrt, ceil
import time, sys, platform, os, StringIO, gc
from psychopy import visual, core
#BEGIN helper functions from primes.py
def gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a,b):
"""Return lowest common multiple."""
return (a*b)/gcd(a,b)
def LCM(terms):
"Return lcm of a list of numbers."
return reduce(lambda a,b: lcm(a,b), terms)
#END helper functions from primes.py
def calcCondsPerNumTargets(numRings,numTargets):
#numRings is number of rings, each of which can have up to one target
#numTargets is list or array of numTarget conditions, e.g. 1,2,3 means the experiment includes 1, 2, and 3 targets
#Each target can be placed randomly in any of the rings.
#Want all possibilities to be covered equally often. That means each target number condition has to include all the combinations
# of places that number of targets can go.
#So that some targetNum conditinos don't have more trials than others, have to scale up each targetNum condition to the worst case.
#Actually it's worse than that. To make them fit evenly, have to use least common multiple
#3 rings choose 2 for targets, 3 rings choose 1 for target, have to have as many conditions as the maximum.
#To find maximum, determine length of each.
ringNums = np.arange(numRings)
numPossibilitiesEach = list()
for k in numTargets:
numPossibilitiesCouldPutKtargets = len( list(itertools.combinations(ringNums,k)) )
#print(numPossibilitiesCouldPutKtargets)
numPossibilitiesEach.append( numPossibilitiesCouldPutKtargets )
m = max( numPossibilitiesEach ) #because the worst case (number of targets) requires this many, have to have this many for all. Actually,
leastCommonMultiple = LCM( numPossibilitiesEach ) #to have equal number of trials per numtargets, would have to use this figure for each
#print('biggest=',m, ' Least common multiple=', leastCommonMultiple)
return leastCommonMultiple
def accelerateComputer(slowFast, process_priority, disable_gc):
# process_priority = 'normal' 'high' or 'realtime'
if slowFast:
if process_priority == 'normal':
pass
elif process_priority == 'high':
core.rush(True)
elif process_priority == 'realtime': # Only makes a diff compared to 'high' on Windows.
core.rush(True, realtime = True)
else:
print('Invalid process priority:',process_priority,"Process running at normal.")
process_priority = 'normal'
if disable_gc:
gc.disable()
if slowFast==0: #turn off the speed-up
if disable_gc:
gc.enable()
core.rush(False)
def openMyStimWindow(monitorSpec,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=monitorSpec,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
if myWin is None:
print('ERROR: Failed to open window in openMyStimWindow!')
core.quit()
return myWin
def constructRingsAsGratings(myWin,numRings,radii,ringRadialMaskEachRing,numObjects,patchAngle,colors,stimColorIdxsOrder,gratingTexPix,blobToCueEachRing,ppLog):
#Originally to construct a grating formed of the colors in order of stimColorIdxsOrder
antialiasGrating = True
autoLogging = False
texEachRing=list() #texture which will draw the ring of objects via openGL texture on grating
cueTexEachRing=list() #making a separate grating for the cue, wherein everything background color except the location of the cue
ringsRadial=list(); #after making the rings of object, put them in this list
cueRings=list() #after making grating for each cue, put it in this cue
stimColorIdxsOrder= stimColorIdxsOrder[::-1] #reverse order of indices, because grating texture is rendered in reverse order than is blobs version
radialMaskEachRing=[[0,0,0,1,1,] ,[0,0,0,0,0,0,1,1,],[0,0,0,0,0,0,0,0,0,0,1,1,]]
numUniquePatches= len( max(stimColorIdxsOrder,key=len) )
numCycles =(1.0*numObjects) / numUniquePatches
angleSegment = 360./(numUniquePatches*numCycles)
if gratingTexPix % numUniquePatches >0: #gratingTexPix contains numUniquePatches. numCycles will control how many total objects there are around circle
ppLog.warn('Warning: could not exactly render a '+str(numUniquePatches)+'-segment pattern radially, will be off by '+str( (gratingTexPix%numUniquePatches)*1.0 /gratingTexPix ) )
if numObjects % numUniquePatches >0:
msg= 'Warning: numUniquePatches ('+str(numUniquePatches)+') not go evenly into numObjects'; ppLog.warn(msg)
#create texture for red-green-blue-red-green-blue etc. radial grating
for i in range(numRings):
#myTex.append(np.zeros([gratingTexPix,gratingTexPix,3])+[1,-1,1])
texEachRing.append( np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] ) #start with all channels in all locs = bgColor
cueTexEachRing.append( np.ones([gratingTexPix,gratingTexPix,3])*bgColor[0] )
if patchAngle > angleSegment:
msg='Error: patchAngle requested ('+str(patchAngle)+') bigger than maximum possible ('+str(angleSegment)+') numUniquePatches='+str(numUniquePatches)+' numCycles='+str(numCycles);
print(msg); ppLog.error(msg)
oneCycleAngle = 360./numCycles
segmentSizeTexture = angleSegment/oneCycleAngle *gratingTexPix #I call it segment because includes spaces in between, that I'll write over subsequently
patchSizeTexture = patchAngle/oneCycleAngle *gratingTexPix
patchSizeTexture = round(patchSizeTexture) #best is odd number, even space on either size
patchFlankSize = (segmentSizeTexture-patchSizeTexture)/2.
patchAngleActual = patchSizeTexture / gratingTexPix * oneCycleAngle
if abs(patchAngleActual - patchAngle) > .04:
msg = 'Desired patchAngle = '+str(patchAngle)+' but closest can get with '+str(gratingTexPix)+' gratingTexPix is '+str(patchAngleActual);
ppLog.warn(msg)
for colrI in range(numUniquePatches): #for that portion of texture, set color
start = colrI*segmentSizeTexture
end = start + segmentSizeTexture
start = round(start) #don't round until after do addition, otherwise can fall short
end = round(end)
ringColr=list();
for i in range(numRings):
ringColr.append(colors[ stimColorIdxsOrder[i][colrI] ])
for colorChannel in range(3):
for i in range(numRings):
texEachRing[i][:, start:end, colorChannel] = ringColr[i][colorChannel];
for cycle in range(int(round(numCycles))):
base = cycle*gratingTexPix/numCycles
for i in range(numRings):
cueTexEachRing[i][:, base+start/numCycles:base+end/numCycles, colorChannel] = ringColr[1][colorChannel]
#draw bgColor area (emptySizeEitherSideOfPatch) by overwriting first and last entries of segment
for i in range(numRings):
texEachRing[i][:, start:start+patchFlankSize, :] = bgColor[0]; #one flank
texEachRing[i][:, end-1-patchFlankSize:end, :] = bgColor[0]; #other flank
for cycle in range(int(round(numCycles))):
base = cycle*gratingTexPix/numCycles
for i in range(numRings):
cueTexEachRing[i][:,base+start/numCycles:base+(start+patchFlankSize)/numCycles,:] =bgColor[0];
cueTexEachRing[i][:,base+(end-1-patchFlankSize)/numCycles:base+end/numCycles,:] =bgColor[0]
#color the segment to be cued white. First, figure out cue segment len
segmentLen = gratingTexPix/numCycles*1/numUniquePatches
WhiteCueSizeAdj=0 # adust the white cue marker wingAdd 20110923
if numObjects==3:WhiteCueSizeAdj=110
elif numObjects==6:WhiteCueSizeAdj=25
elif numObjects==12:WhiteCueSizeAdj=-15
elif numObjects==2:WhiteCueSizeAdj=200
for i in range(numRings): #color cue position white
if blobToCueEachRing[i] >=0: #-999 means dont cue anything
blobToCueCorrectForRingReversal = numObjects-1 - blobToCueEachRing[i] #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
if blobToCueCorrectForRingReversal==0 and numObjects==12: WhiteCueSizeAdj=0
cueStartEntry = blobToCueCorrectForRingReversal*segmentLen+WhiteCueSizeAdj
cueEndEntry = cueStartEntry + segmentLen-2*WhiteCueSizeAdj
cueTexEachRing[i][:, cueStartEntry:cueEndEntry, :] = -1*bgColor[0] #-1*bgColor is that what makes it white?
blackGrains = round( .25*(cueEndEntry-cueStartEntry) )#number of "pixels" of texture at either end of cue sector to make black. Need to update this to reflect patchAngle
cueTexEachRing[i][:, cueStartEntry:cueStartEntry+blackGrains, :] = bgColor[0]; #this one doesn't seem to do anything?
cueTexEachRing[i][:, cueEndEntry-1-blackGrains:cueEndEntry, :] = bgColor[0];
angRes = 100 #100 is default. I have not seen any effect. This is currently not printed to log file!
for i in range(numRings):
ringsRadial.append(visual.RadialStim(myWin, tex=texEachRing[i], color=[1,1,1],size=radii[i],#myTexInner is the actual colored pattern. radial grating used to make it an annulus
mask=ringRadialMaskEachRing[i], # this is a 1-D mask dictating the behaviour from the centre of the stimulus to the surround.
radialCycles=0, angularCycles=numObjects*1.0/numUniquePatches,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging))
#the mask is radial and indicates that should show only .3-.4 as one moves radially, creating an annulus
#end preparation of colored rings
#draw cueing grating for tracking task. Have entire grating be empty except for one white sector
cueRings.append(visual.RadialStim(myWin, tex=cueTexEachRing[i], color=[1,1,1],size=radii[i], #cueTexInner is white. Only one sector of it shown by mask
mask = radialMaskEachRing[i], radialCycles=0, angularCycles=1, #only one cycle because no pattern actually repeats- trying to highlight only one sector
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging) )#depth doesn't seem to work, just always makes it invisible?
currentlyCuedBlobEachRing = blobToCueEachRing #this will mean that don't have to redraw
return ringsRadial,cueRings,currentlyCuedBlobEachRing
######### End constructRingAsGrating ###########################################################
#########################################
def constructThickThinWedgeRingsTargetAndCue(myWin,radius,radialMask,cueRadialMask,visibleWedge,numObjects,patchAngleThick,patchAngleThin,bgColor,
thickWedgeColor,thinWedgeColor,targetAngleOffset,gratingTexPix,cueColor,objToCue,ppLog):
#Construct a grating formed of the colors in order of stimColorIdxsOrder
#Also construct a similar cueRing grating with same colors, but one blob potentially highlighted.
#cueRing Has different spacing than ringRadial, not sure why, I think because calculations tend to be off as it's
#always one cycle.
#radialMask doesn't seem to eliminate very-central part, bizarre
antialiasGrating = False #Don't set this to true because in present context, it's like imposing a radial Gaussian ramp on each object
autoLogging = False
numCycles = numObjects
segmentAngle = 360./numCycles
#create texture for red-green-blue-red-green-blue etc. radial grating
#2-D texture which will draw the ring of objects via openGL texture on grating
ringTex = np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] #start with all channels in all locs = bgColor
cueTex = np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] #start with all channels in all locs = bgColor
oneCycleAngle = 360./numCycles
def patchSizeForTexture(segmentAngle, patchAngle, oneCycleAngle, gratingTexPix):
segmentSizeTexture = segmentAngle/oneCycleAngle *gratingTexPix #I call it segment because includes spaces between objects, that I'll write over subsequently
if patchAngle > segmentAngle:
msg='Error: patchAngle requested ('+str(patchAngle)+') bigger than maximum possible ('+str(segmentAngle)+') numCycles='+str(numCycles)
print(msg); ppLog.error(msg)
patchSizeTexture = patchAngle*1.0/oneCycleAngle *gratingTexPix
patchSizeTexture = round(patchSizeTexture) #best is odd number, even space on either size
patchFlankSize = (segmentSizeTexture-patchSizeTexture)/2. #this area will be drawn in bgColor
patchAngleActual = patchSizeTexture*1.0 / gratingTexPix * oneCycleAngle
if abs(patchAngleActual - patchAngle) > .04:
msg = 'Desired patchAngle = '+str(patchAngle)+' but closest can get with '+str(gratingTexPix)+' gratingTexPix is '+str(patchAngleActual);
ppLog.warn(msg)
return segmentSizeTexture, patchSizeTexture, patchFlankSize
#thick wedges. Create texture for visual.radialStim
segmentSizeTexture, patchSizeTexture, patchFlankSize = patchSizeForTexture(segmentAngle, patchAngleThick, oneCycleAngle, gratingTexPix)
start = round( 0 ) #identify starting texture position for this segment
end = round( start + segmentSizeTexture ) #don't round until after do addition, otherwise can fall short
#First draw the entire segment in patchColr, then erase sides (flankers) leaving only the patchAngle
ringTex[:, start:end, :] = thickWedgeColor[:]
#spaces in between objects are termed the flanks, should be bgColor,
ringTex[:, start:start+patchFlankSize, :] = bgColor[:] #one flank
ringTex[:, end-1-patchFlankSize:end, :] = bgColor[:] #other flank
#thin wedges. Create texture for visual.radialStim
segmentSizeTexture, thinWedgeSizeTexture, patchFlankSize = patchSizeForTexture(segmentAngle, patchAngleThin, oneCycleAngle, gratingTexPix)
#First draw the entire segment in patchColr, then erase sides (flankers) leaving only the patchAngle
start = patchFlankSize #identify starting texture position for this segment
end = round( start + thinWedgeSizeTexture ) #don't round until after do addition, otherwise can fall short
ringTex[:, start:end, :] = thinWedgeColor[:]
angRes = 200 #100 is default. I have not seen an artifact at present when set to 100, two things drawn don't overlap exactly
ringRadial= visual.RadialStim(myWin, tex=ringTex, color=[1,1,1],size=radius,#ringTex is the actual colored pattern. radial grating used to make it an annulus
visibleWedge=visibleWedge,
mask=radialMask, # this is a 1-D mask masking the centre, to create an annulus
radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
#Draw target (task is to judge offset of thin wedge relative to thick wedge.
#So, overdraw a single segment of the grating by using visibleWedge
#angularPhase =
#I need to not show the part of the thick wedge that will be displaced, while showing enough of thick wedge to overdraw previous location of thin wedge
targetCorrectedForRingReversal = numObjects-1 - objToCue #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
visibleAngleStart = targetCorrectedForRingReversal*segmentAngle + (segmentAngle-patchAngleThick)/2
visibleAngleEnd = visibleAngleStart + patchAngleThick
#print('targetCorrectedForRingReversal = ',targetCorrectedForRingReversal,' visibleAngleStart=',visibleAngleStart,' visibleAngleEnd=',visibleAngleEnd)
if targetAngleOffset >= 0:
visibleAngleEnd -= targetAngleOffset #don't show the part of the thick wedge that would be displaced
else: #shifted the other way, towards the start, so spillover on that side needs to be avoided by not drawing it
visibleAngleStart -= targetAngleOffset
#Below call is identical to ringRadial except ori
targetRadial= visual.RadialStim(myWin, tex=ringTex, color=[1,1,1],size=radius,#ringTex is the actual colored pattern. radial grating used to make it an annulus
visibleWedge=[visibleAngleStart,visibleAngleEnd],
ori = targetAngleOffset,
mask=radialMask, # this is a 1-D mask masking the centre, to create an annulus
radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
#Creating cue texture
#Both inner and outer cue arcs can be drawn in one go via a radial mask
#use visibleWedge so it only highlights a single thick wedge
#draw texture for cueRing
start = round( 0 ) #identify starting texture position for this segment
start = round( start+patchFlankSize )
end = round(start + segmentSizeTexture - patchFlankSize) #don't round until after do addition, otherwise can fall short
cueTex[:, start:end, :] = cueColor[:]
#Actually because I'm only showing a tiny sliver via visibleAngle, could color the whole thing
cueTex[:, :, :] = cueColor[:]
#draw cue
visibleAngleStart = 0; visibleAngleEnd=360
if objToCue>=0:
objToCueCorrectdForRingReversal = numObjects-1 - objToCue #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
visibleAngleStart = objToCueCorrectdForRingReversal*segmentAngle + (segmentAngle-patchAngleThick)/2
visibleAngleEnd = visibleAngleStart + patchAngleThick
#print('objToCueCorrectdForRingReversal = ',objToCueCorrectdForRingReversal,' visibleAngleStart=',visibleAngleStart,' visibleAngleEnd=',visibleAngleEnd)
cueRing = visual.RadialStim(myWin, tex=cueTex, color=[1,1,1],size=radius, #cueTexInner is white. Only one sector of it shown by mask
visibleWedge=[visibleAngleStart,visibleAngleEnd],
mask = cueRadialMask, radialCycles=0, angularCycles=1, #only one cycle because no pattern actually repeats- trying to highlight only one sector
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
return ringRadial,targetRadial,cueRing
######### End constructRingAsGrating ###########################################################
if __name__ == "__main__": #do self-tests
from psychopy import *
from psychopy import monitors, logging
monitorwidth = 38.5 #28.5 #monitor width in centimeters
viewdist = 57.; #cm
mon = monitors.Monitor("testMonitor",width=monitorwidth, distance=viewdist) #fetch the most recent calib for this monitor
bgColor = [-1,-1,-1]; allowGUI = True; units='deg'; fullscr=0; scrn=0; waitBlank=False
#mon.setSizePix( (widthPix,heightPix) )
widthPix = 800; heightPix = 600
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank)
widthPix = myWin.size[0]; heightPix = myWin.size[1]
# radius= 22
# ringRadialMask=[0,0,0,0,1] #to mask off center part of cirle, all a part of creating arc
#
# numObjects = 4
# blobToCue = 2
# patchAngle = 30
# gratingTexPix=1024#nump
# ring,cueRing,currentlyCuedBlob = constructMulticolorRingAsGrating(myWin,
# radius,ringRadialMask,numObjects,patchAngle,colors=[[1,0,0],[0,0,1]],stimColorIdxsOrder=[0,1],\
# gratingTexPix=gratingTexPix,blobToCue=blobToCue,ppLog=logging)
#Task will be to judge which thick wedge has the thin wedge offset within it
numObjects = 6
gratingTexPix= 1024
objToCue= 3
radius = 25
visibleWedge = [0,360]
patchAngleThickWedges = 360/numObjects/2
thickWedgeColor = [1,-1,-1]
thinWedgeColor=[0,0,1]
cueColor=[0,1,1]
radialMask = np.array( [0,0,0,0,0,0,0,1,0,0,0] )
wedgeRadiusFraction = np.where(radialMask)[0][0]*1.0 / len(radialMask)
print('wedgeRadiusFraction = ',wedgeRadiusFraction)
wedgeThicknessFraction = len( np.where(radialMask)[0] )*1.0 / len(radialMask)
print('wedgeThickness = ',wedgeThicknessFraction*radius)
wedgeCenterFraction = wedgeRadiusFraction + wedgeThicknessFraction/2.
desiredArcDistanceFractionRadius = .23
cueInnerArcDesiredFraction = wedgeCenterFraction - desiredArcDistanceFractionRadius
cueOuterArcDesiredFraction = wedgeCenterFraction + desiredArcDistanceFractionRadius
if cueOuterArcDesiredFraction > 1:
msg='Can"t start outer arc at fraction='+str(cueOuterArcDesiredFraction)
logging.error(msg); print(msg)
fractionResolution = .02 #Quantisation of possible positions of cue arc
binsNeeded = 1.0 / fractionResolution
cueRadialMask = np.zeros( binsNeeded )
#For the cueRadialMask, want everything zero except just inside and outside of the wedges.
innerArcCenterPos = round( binsNeeded*cueInnerArcDesiredFraction )
outerArcCenterPos = round( binsNeeded*cueOuterArcDesiredFraction )
cueRadialMask[ innerArcCenterPos ] = 1
cueRadialMask[ outerArcCenterPos ] = 1
print('cueInnerArcDesiredFraction = ',cueInnerArcDesiredFraction, ' actual = ', innerArcCenterPos*1.0/len(cueRadialMask) )
print('cueOuterArcDesiredFraction = ',cueOuterArcDesiredFraction, ' actual = ', outerArcCenterPos*1.0/len(cueRadialMask) )
targetAngleOffset = -6
thickThinWedgesRing, targetRing, cueRing = \
constructThickThinWedgeRingsTargetAndCue(myWin,radius,radialMask,cueRadialMask,visibleWedge,numObjects,patchAngleThickWedges,5,
bgColor,thickWedgeColor,thinWedgeColor,targetAngleOffset,gratingTexPix,cueColor,objToCue,ppLog=logging)
keepGoing = True
while keepGoing:
thickThinWedgesRing.draw()
cueRing.draw()
#Draw thin wedges at same time as thick wedges. But when time to draw target, draw over old position of target thin wedge and draw displaced version
#Now program the cue arcs and the target-displaced ring
myWin.flip()
for key in event.getKeys(): #check if pressed abort-type key
if key in ['escape','q']:
keepGoing = False
respcount = 1
else: #key in [
print('key =', key)
keepGoing = True #draw target superposed
while keepGoing:
thickThinWedgesRing.draw()
targetRing.draw()
#Draw thin wedges at same time as thick wedges. But when time to draw target, draw over old position of target thin wedge and draw displaced version
#Now program the cue arcs and the target-displaced ring
myWin.flip()
for key in event.getKeys(): #check if pressed abort-type key
if key in ['escape','q']:
keepGoing = False
respcount = 1
else: #key in [
print('key =', key)
|
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 16, 2012"
import unittest
import os
import json
import numpy as np
import warnings
from pymatgen.io.vaspio.vasp_output import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar, Dynmat
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class VasprunTest(unittest.TestCase):
def test_properties(self):
filepath = os.path.join(test_dir, 'vasprun.xml.nonlm')
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
0]].keys())
self.assertIn("S", orbs)
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=False)
#test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][1][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][-1][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][1].shape, (301, ))
filepath2 = os.path.join(test_dir, 'lifepo4.xml')
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
parse_potcar_file=False)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in range(len(vasprun.ionic_steps))]))
self.assertEqual(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
#test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
#Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
#Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[(Spin.up, 0,
0, 96,
Orbital.s)],
0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0], 3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1], -0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2], 3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.ionic')
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0], 515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1], -0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2], 19.02110169)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"),
parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style, "Reciprocal")
vasprun_no_pdos = Vasprun(os.path.join(test_dir, "Li_no_projected.xml"),
parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
vasprun_diel = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric"),
parse_potcar_file=False)
self.assertAlmostEqual(0.4294,vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0,vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0,vasprun_diel.dielectric[2][85][3])
def test_Xe(self):
vr = Vasprun(os.path.join(test_dir, 'vasprun.xml.xe'), parse_potcar_file=False)
self.assertEquals(vr.atomic_symbols, ['Xe'])
def test_invalid_element(self):
self.assertRaises(KeyError, Vasprun, os.path.join(test_dir, 'vasprun.xml.wrong_sp'))
def test_as_dict(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath,
parse_potcar_file=False)
#Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = Vasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64],
"wrong vbm kpoint index")
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
def test_sc_step_overflow(self):
filepath = os.path.join(test_dir, 'vasprun.xml.sc_overflow')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath)
self.assertEqual(len(w), 3)
estep = vasprun.ionic_steps[0]['electronic_steps'][29]
self.assertTrue(np.isnan(estep['e_wo_entrp']))
def test_update_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
potcar_path = os.path.join(test_dir, 'POTCAR.LiFePO4.gz')
potcar_path2 = os.path.join(test_dir, 'POTCAR2.LiFePO4.gz')
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
self.assertRaises(ValueError, Vasprun, filepath, parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
def test_potcar_not_found(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
#Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file='.')
self.assertEqual(len(w), 1)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
class OutcarTest(unittest.TestCase):
def test_init(self):
for f in ['OUTCAR', 'OUTCAR.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204,
'cores': '8'})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
filepath = os.path.join(test_dir, 'OUTCAR.stopped')
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
outcar.read_lepsilon()
outcar.read_lepsilon_ionic()
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2], 0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
def test_core_state_eigen(self):
filepath = os.path.join(test_dir, "OUTCAR.CL")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
def test_single_atom(self):
filepath = os.path.join(test_dir, "OUTCAR.Al")
outcar = Outcar(filepath)
expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)
expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,
'Total CPU time used (sec)': 50.194,
'Elapsed time (sec)': 52.337,
'Maximum memory used (kb)': 62900.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 49.602,
'cores': '32'})
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
class OszicarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OSZICAR')
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'LOCPOT')
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'CHGCAR.nospin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
#test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = os.path.join(test_dir, 'CHGCAR.Fe3O4')
chg = Chgcar.from_file(filepath)
ans = [1.93313368, 3.91201473, 4.11858277, 4.1240093, 4.10634989,
3.38864822]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
class ProcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PROCAR.simple')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(1, 'd'), 0)
self.assertAlmostEqual(p.get_occupation(1, 's'), 0.3538125)
self.assertAlmostEqual(p.get_occupation(1, 'p'), 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nb_bands, 10)
self.assertEqual(p.nb_kpoints, 10)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[1][2][2], {'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = os.path.join(test_dir, 'PROCAR')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd'), 4.3698147704200059)
self.assertAlmostEqual(p.get_occupation(0, 'dxy'), 0.85796295426000124)
class XdatcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'XDATCAR_4')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 3)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = os.path.join(test_dir, 'XDATCAR_5')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 3)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
class DynmatTest(unittest.TestCase):
def test_init(self):
# nosetests pymatgen/io/vaspio/tests/test_vasp_output.py:DynmatTest.test_init
filepath = os.path.join(test_dir, 'DYNMAT')
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(
d.data[4][2]['dispvec'], [0., 0.05, 0.]
))
self.assertTrue(np.allclose(
d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]
))
# TODO: test get_phonon_frequencies once cross-checked
if __name__ == "__main__":
unittest.main()
|
|
import numpy
import array
import copy
import re,os,sys,copy
from glob import glob
from scipy.interpolate import griddata
from scipy.integrate import simps,quad
from scipy.optimize import leastsq, fsolve
#from sm_functions import read_ised,read_ised2,calc_lyman,calc_beta
from astropy import units as U
from astropy import constants as C
from astropy import cosmology as cos
cosmo = cos.FlatLambdaCDM(H0=70,Om0=0.3)
f = open("error.log", "w")
original_stderr = sys.stderr
sys.stderr = f
class ised(object):
def __init__(self,path):
self.file = path
self.read_ised(self.file)
def read_ised(self,filename):
"""
This function reads data from Bruzual & Charlot binary format
SSP files and returns the necessary data in an array The input files
should be '.ised' files, either 2003 or 2007.
'ks' in the binary files is slightly different between 03/07 files
so the read length and index should be set appropriately, therefore
the function tries '03 format first and retries with the '07 format
if the returned number of ages isn't as expected (e.g. 221 ages)
"""
with open(filename,'rb') as f:
check = array.array('i')
check.fromfile(f,2)
if check[1] == 221:
ksl, ksi = 2, 1
F_l, F_i = 3, 2
else:
ksl, ksi = 3, 2
F_l, F_i = 5, 4
with open(filename,'rb') as f:
ks = array.array('i')
ks.fromfile(f,ksl)
ta = array.array('f')
ta.fromfile(f,ks[ksi])
self.ta = numpy.array(ta)
tmp = array.array('i')
tmp.fromfile(f,3)
self.ml,self.mul,iseg = tmp
if iseg > 0:
tmp = array.array('f')
tmp.fromfile(f,iseg*6)
tmp = array.array('f')
tmp.fromfile(f,5)
self.totm, self.totn, self.avs, self.jo, self.tauo = tmp
self.ids= array.array('c')
self.ids.fromfile(f,80)
tmp = array.array('f')
tmp.fromfile(f,4)
self.tcut = tmp[0]
self.ttt = tmp[1:]
ids = array.array('c')
ids.fromfile(f,80)
self.ids = array.array('c')
self.ids.fromfile(f,80)
self.igw = array.array('i')
self.igw.fromfile(f,1)
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.iw = array.array('i')
self.iw.fromfile(f,1)
wave = array.array('f')
wave.fromfile(f,self.iw[0])
self.wave = numpy.array(wave)
#SED Section
self.F = array.array('i')
self.F.fromfile(f,F_l)
self.iw = self.F[F_i] #Number of wavelength elements
self.sed = numpy.zeros((self.iw,ks[ksi]),dtype=numpy.float32)
G = array.array('f')
G.fromfile(f,self.iw)
self.sed[:,0] = G
ik = array.array('i')
ik.fromfile(f,1)
self.h = numpy.empty((ik[0],ks[ksi]),'f')
H = array.array('f')
H.fromfile(f,ik[0])
self.h[:,0] = H
for i in range(1,ks[ksi]): #Fill rest of array with SEDs
F = array.array('i')
F.fromfile(f,F_l)
iw = F[F_i]
G = array.array('f')
G.fromfile(f,iw)
self.sed[:,i] = G
ik = array.array('i')
ik.fromfile(f,1)
H = array.array('f')
H.fromfile(f,ik[0])
self.h[:,i] = H
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.bflx = array.array('f')
self.bflx.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
strm = array.array('f')
strm.fromfile(f,tmp[F_i])
self.strm = numpy.array(strm)
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.evf = array.array('f')
self.evf.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.evf = array.array('f')
self.evf.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.snr = array.array('f')
self.snr.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.pnr = array.array('f')
self.pnr.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.sn = array.array('f')
self.sn.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.bh = array.array('f')
self.bh.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.wd = array.array('f')
self.wd.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
rmtm = array.array('f')
rmtm.fromfile(f,tmp[F_i])
self.rmtm = numpy.array(rmtm)
class CSP:
def __init__(self,SSPpath = '../ssp/bc03/salpeter/lr/',
age=None,sfh=None,dust=None,metal_ind=None,fesc=None,
sfh_law='exp',dustmodel = 'calzetti',neb_cont=True,neb_met=True):
self.SSPpath = SSPpath
self.files = glob(self.SSPpath + '*.ised')
self.files.sort()
self.iseds = []
self.ta_arr = []
self.metal_arr = []
self.iw_arr = []
self.wave_arr = []
self.sed_arr = []
self.strm_arr = []
self.rmtm_arr = []
#Set up
for file in self.files:
ised_binary = ised(file)
self.ta_arr.append(ised_binary.ta)
self.metal_arr.append(ised_binary.ids)
self.iw_arr.append(ised_binary.iw)
self.wave_arr.append(ised_binary.wave)
self.sed_arr.append(ised_binary.sed)
self.strm_arr.append(ised_binary.strm)
self.rmtm_arr.append(ised_binary.rmtm)
self.iseds.append(ised_binary)
#Find closest match for each tg value in ta - set tg to these values
nebular = numpy.loadtxt('nebular_emission.dat',skiprows=1)
self.neb_cont = nebular[:,1]
self.neb_hlines = nebular[:,2]
self.neb_metal = nebular[:,3:]
self.neb_wave = nebular[:,0]
if None not in (age,sfh,dust,metal_ind):
if fesc == None:
self.build(age,sfh,dust,metal_ind,sfh_law=sfh_law,dustmodel=dustmodel,
neb_cont=neb_cont,neb_met=neb_met)
else:
self.build(age,sfh,dust,metal_ind,fesc,sfh_law,dustmodel,neb_cont,neb_met)
def _sfh_exp(self,t,tau):
sfh = numpy.exp(-1*t/tau)/abs(tau)
return sfh
def _sfh_pow(self,t,alpha):
sfh = numpy.power(t/1.e9,alpha)
return sfh
def _sfh_del(self,t,tau):
sfh = t/(tau**2)*numpy.exp(-t/tau)
return sfh
def _sfh_tru(self,t,tstop):
sfh = numpy.ones_like(t)
sfh[t > tstop*numpy.max(t)] = 0.
sfh /= numpy.trapz(sfh,t)
return sfh
def dust_func(self,lam,ai,bi,ni,li):
"""
Functional form for SMC, LMC and MW extinction curves of
Pei et al. 1992
"""
lam = numpy.array(lam) / 1e4
ki = numpy.power((lam / li),ni) + numpy.power((li / lam),ni) + bi
eta_i = ai / ki
return eta_i
def build(self,age,sfh,dust,metal,fesc=1.,sfh_law='exp',dustmodel = 'calzetti',
neb_cont=True,neb_met=True):
"""
"""
self.tg = age*1.e9
if sfh_law == 'exp':
self.tau = sfh*1.e9
elif sfh_law == 'del':
self.tau = sfh*1.e9
else:
self.tau = sfh
self.tauv = dust
self.mi = int(abs(metal))
self.fesc = fesc
self.sfh_law = sfh_law
self.inc_cont= neb_cont
self.inc_met = neb_met
self.dust_model = dustmodel
mu = 0.3
epsilon = 0.
self.ta = self.ta_arr[self.mi]
self.wave = self.wave_arr[self.mi]
[T1,T2] = numpy.meshgrid(self.tg,self.ta)
tgi = numpy.argmin(numpy.abs(self.tg-self.ta))
self.tg = self.ta[tgi]
if len(self.neb_wave) != len(self.wave):
self.neb_cont = griddata(self.neb_wave,self.neb_cont,self.wave)
self.neb_hlines = griddata(self.neb_wave,self.neb_hlines,self.wave)
neb_metaln = numpy.zeros((len(self.wave),3))
for i in range(3):
neb_metaln[:,i] = griddata(self.neb_wave,self.neb_metal[:,i],self.wave)
self.neb_metal = neb_metaln
self.neb_wave = self.wave
#quietprint("Metallicity "+str(self.mi+1)+":")
#print ".ised file: "+files[abs(SSP)]
sed = self.sed_arr[self.mi]
strm = self.strm_arr[self.mi]
rmtm = self.rmtm_arr[self.mi]
self.iw = self.iw_arr[self.mi]
metal=str((self.metal_arr[self.mi]))[12:-3].strip()
#quietprint(metal[self.mi] + "\nInclude nebular emission: " + str(add_nebular))
SSP_Z = float(re.split("Z=?",metal)[1])
#print SSP_Z,
if SSP_Z <= 0.0004: neb_z = 0
elif SSP_Z > 0.0004 and SSP_Z <= 0.004: neb_z = 1
elif SSP_Z > 0.004: neb_z = 2
#print neb_z
if self.dust_model == "charlot":
ATT = numpy.empty([len(self.wave),len(self.ta)])
tv = ((self.tauv/1.0857)*numpy.ones(len(self.ta)))
tv[self.ta>1e7] = mu*self.tauv
lam = numpy.array((5500/self.wave)**0.7)
ATT[:,:] = (numpy.exp(-1*numpy.outer(lam,tv)))
elif self.dust_model == "calzetti":
ATT = numpy.ones([len(self.wave),len(self.ta)])
k = numpy.zeros_like(self.wave)
w0 = [self.wave <= 1200]
w1 = [self.wave < 6300]
w2 = [self.wave >= 6300]
w_u = self.wave/1e4
x1 = numpy.argmin(numpy.abs(self.wave-1200))
x2 = numpy.argmin(numpy.abs(self.wave-1250))
k[w2] = 2.659*(-1.857 + 1.040/w_u[w2])
k[w1] = 2.659*(-2.156 + (1.509/w_u[w1]) - (0.198/w_u[w1]**2) + (0.011/w_u[w1]**3))
k[w0] = k[x1] + ((self.wave[w0]-1200.) * (k[x1]-k[x2]) / (self.wave[x1]-self.wave[x2]))
k += 4.05
k[k < 0.] = 0.
tv = self.tauv*k/4.05
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*tv)
elif self.dust_model == "calzetti2":
ATT = numpy.ones([len(self.wave),len(self.ta)])
k = numpy.zeros_like(self.wave)
w0 = [self.wave <= 1000]
w1 = [(self.wave > 1000)*(self.wave < 6300)]
w2 = [self.wave >= 6300]
w_u = self.wave/1e4
k[w2] = 2.659*(-1.857 + 1.040/w_u[w2])
k[w1] = 2.659*(-2.156 + (1.509/w_u[w1]) - (0.198/w_u[w1]**2) + (0.011/w_u[w1]**3))
p1 = self.dust_func(self.wave,27,4,5.5,0.08) + self.dust_func(self.wave,185,90,2,0.042)
k[w0] = p1[w0] / (p1[w1][0]/k[w1][0])
k += 4.05
k[k < 0.] = 0.
tv = self.tauv*k/4.05
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*tv)
elif self.dust_model == "smc":
ai = [185., 27., 0.005, 0.01, 0.012, 0.03]
bi = [90., 5.5, -1.95, -1.95, -1.8, 0.]
ni = [2., 4., 2., 2., 2., 2.]
li = [0.042, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 2.93
Ab = self.tauv * (1 + (1/Rv))
print(numpy.exp(self.tauv*eta))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
elif self.dust_model == "lmc":
ai = [175., 19., 0.023, 0.005, 0.006, 0.02]
bi = [90., 4.0, -1.95, -1.95, -1.8, 0.]
ni = [2., 4.5, 2., 2., 2., 2.]
li = [0.046, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 3.16
Ab = self.tauv * (1 + (1/Rv))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
elif self.dust_model == "mw":
ai = [165., 14., 0.045, 0.002, 0.002, 0.012]
bi = [90., 4., -1.95, -1.95, -1.8, 0.]
ni = [2., 6.5, 2., 2., 2., 2.]
li = [0.047, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 3.08
Ab = self.tauv * (1 + (1/Rv))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
"""
SECTION 1
First calculate and store those parameters that are functions of the age array
'ta' only - these are the same for every model to be made. The parameters are
the age array TP, the time interval array DT, the interpolation coefficient
'a' and the interpolation indices J. Each are stored in cell arrays of size ks,
with the data corresponding to the original age array first, and the
interpolated data second.
"""
self.TP = {}
self.A = {}
self.J = {}
self.DT = {}
for ai in range(tgi+1):
#Calculate taux2: the reverse age array; remove those values which
#are less than the first non-zero entry of taux1 - these values
#are treated differently in the original BC code
taux1 = self.ta[:ai+1]
taux2 = self.ta[ai]-self.ta[ai::-1]
if max(taux1) > 0.:
taux2 = numpy.delete(taux2,numpy.where(taux2<taux1[numpy.flatnonzero(taux1)[0]]))
#Remove values common to taux1 and taux2; calulate array TP
[T1,T2] = numpy.meshgrid(taux1,taux2)
[i,j] = numpy.where(T1-T2==0)
taux2 = numpy.delete(taux2, i)
self.TP[ai] = self.ta[ai]-numpy.concatenate((taux1,taux2),axis=0)
l = len(taux2)
#If taux2 has entries, calculate the interpolation parameters a and J.
#The indicies correspond to those values of 'ta' which are just below
#the entries in taux2. They are calculated by taking the difference
#between the two arrays, then finding the last negative entry in the
#resulting array.
if l == 0:
self.J[ai] = numpy.array([])
self.A[ai] = numpy.array([])
if l>0:
[T1,T2] = numpy.meshgrid(self.ta,taux2)
T = T1-T2
T[numpy.where(T<=0)] = 0
T[numpy.where(T!=0)] = 1
T = numpy.diff(T,1,1)
(i,self.J[ai]) = T.nonzero()
self.A[ai] = (numpy.log10(taux2/self.ta[self.J[ai]]) /
numpy.log10(self.ta[self.J[ai]+1]/self.ta[self.J[ai]]))
#Calculate age difference array: the taux arrays are joined and
#sorted, the differences calculated, then rearranged back to the order
#of the original taux values.
taux = numpy.concatenate((taux1,taux2),axis=0)
taux.sort()
b = numpy.searchsorted(taux,taux1)
c = numpy.searchsorted(taux,taux2)
order = numpy.concatenate((b,c))
d = numpy.diff(taux)
dt = numpy.append(d,0) + numpy.append(0,d)
self.DT[ai] = numpy.copy(dt[order])
SED = numpy.empty([len(self.wave)])
Nlyman = numpy.empty([1])
Nlyman_final = numpy.empty([1])
beta = numpy.empty([1])
norm = numpy.empty([1])
STR = numpy.empty([tgi+1])
SFR = numpy.empty([tgi+1])
W = {}
# metal=[str((self.data[1]))[12:-3].strip()]*len(params.metallicities)
RMr = numpy.empty([tgi+1])
PRr = numpy.empty([tgi+1])
URr = numpy.empty([tgi+1])
Tr = numpy.empty([tgi+1])
"""
SECTION 2
Now calculate the integration coefficients w, and store them in the
cell array W. Also calculate the stellar mass fraction str. The so
array is expanded and used by each successive iteration of the inner
loop (ai). The outer loop repeats the operation for each tau value.
"""
prgas = numpy.zeros(tgi+1)
for ai in range(tgi+1):
j = self.J[ai] #Interpolation indices
tp = self.TP[ai] #Integration timescale
pgas = numpy.zeros_like(tp)
if ai ==0:
prgas = numpy.zeros_like(self.ta)
else:
i = numpy.where(tp<=self.ta[ai-1])
ii = numpy.where(tp>self.ta[ai-1])
pgas[i] = griddata(self.ta,prgas,tp[i])
pgas[ii] = prgas[ai-1]
#print prgas[ai]
tbins = numpy.logspace(0,numpy.log10(max(tp)),1000)
npgas = numpy.zeros_like(tbins)
if self.sfh_law == 'exp':
if self.tau > 0.:
sr = (1 + epsilon*pgas)*numpy.exp(-1*tp/self.tau)/abs(self.tau)
norma = 1
if len(sr) > 1:
i = numpy.where(tbins <= self.ta[ai-1])
ii = numpy.where(tbins > self.ta[ai-1])
npgas[i] = griddata(self.ta,prgas,tbins[i])
npgas[ii] = prgas[ai-1]
norma = simps((1+ epsilon*npgas)*numpy.exp(-1*tbins/self.tau)/abs(self.tau),tbins)
sr /= norma
elif self.tau < 0.:
sr = numpy.exp(-1*tp/self.tau)/abs(self.tau)
norma = 1
self.sr = sr
if len(sr) > 1:
norma = simps(numpy.exp(-1*tbins/self.tau)/abs(self.tau),tbins)
sr /= norma
#print sr[0]
self.norma = norma
w = sr*self.DT[ai]/2
w1 = numpy.array(w[:ai+1])
W[0,ai] = w1
strr = numpy.array(numpy.dot(w1,strm[:ai+1]))
rm = numpy.array(numpy.dot(w1,rmtm[:ai+1]))
l = len(self.A[ai])
if l>0:
w2 = w[ai+1:ai+l+1]
wa = w2*self.A[ai]
wb = w2-wa
W[1,ai] = wa
W[2,ai] = wb
strr += (numpy.dot(wb,strm[j]) + numpy.dot(wa,strm[j+1]))
rm += (numpy.dot(wb,rmtm[j]) + numpy.dot(wa,rmtm[j+1]))
if strr > 1: strr= 1
if self.tau > 0.:
ugas = numpy.exp(-1*self.ta[ai]/self.tau)
elif self.tau < 0.:
ugas = numpy.exp(-1*self.ta[ai]/self.tau)/numpy.exp(-1*max(self.ta)/self.tau)
#ugas = 1.
#Processed gas = gas formed into stars - mass in stars - remnants
prgas[ai] = 1 - ugas - strr -rm
if prgas[ai] < 0.: prgas[ai] = 0
#print prgas[ai]
URr[ai] = ugas
PRr[ai] = prgas[ai]
RMr[ai] = rm
Tr[ai] = simps(numpy.exp(-1*numpy.sort(tp)/self.tau)/self.tau,numpy.sort(tp))
STR[ai] = strr
if self.tau > 0:
SFR[ai] = (1 + epsilon*prgas[ai])*numpy.exp(-self.ta[ai]/self.tau)/abs(self.tau)/norma
elif self.tau < 0:
SFR[ai] = numpy.exp(-self.ta[ai]/self.tau)/abs(self.tau)/norma
#print SFR[ai,ti,mi]
#self.SN = float(snn)
SFR[ai] /= STR[ai]
else:
if self.sfh_law == 'pow':
sfr = self._sfh_pow
elif self.sfh_law == 'del':
sfr = self._sfh_del
elif self.sfh_law == 'tru':
sfr = self._sfh_tru
sr = sfr(tp,self.tau)
self.tp=tp
norma = 1
self.sr = sr
if len(sr) > 1:
norma = simps(sfr(tbins,self.tau),tbins)
sr /= norma
self.norma = norma
#print sr[0]
w = sr*self.DT[ai]/2
w1 = numpy.array(w[:ai+1])
W[0,ai] = w1
strr = numpy.array(numpy.dot(w1,strm[:ai+1]))
rm = numpy.array(numpy.dot(w1,rmtm[:ai+1]))
l = len(self.A[ai])
if l>0:
w2 = w[ai+1:ai+l+1]
wa = w2*self.A[ai]
wb = w2-wa
W[1,ai] = wa
W[2,ai] = wb
strr += (numpy.dot(wb,strm[j]) + numpy.dot(wa,strm[j+1]))
rm += (numpy.dot(wb,rmtm[j]) + numpy.dot(wa,rmtm[j+1]))
if strr > 1: strr= 1
if self.tau > 0.:
ugas = sfr(self.ta,self.tau)[ai]
elif self.tau < 0.:
ugas = sfr(self.ta,self.tau)[ai]/sfr(max(self.ta),self.tau)
#ugas = 1.
#Processed gas = gas formed into stars - mass in stars - remnants
prgas[ai] = 1 - ugas - strr -rm
if prgas[ai] < 0.: prgas[ai] = 0
#print prgas[ai]
URr[ai] = ugas
PRr[ai] = prgas[ai]
RMr[ai] = rm
Tr[ai] = simps(sfr(numpy.sort(tp)/1.e9,self.tau),numpy.sort(tp))
STR[ai] = strr
if self.tau > 0:
SFR[ai] = (1 + epsilon*prgas[ai])*sfr(self.ta,self.tau)[ai]/norma
elif self.tau < 0:
SFR[ai] = sfr(self.ta[ai],self.tau)/norma
#print SFR[ai,ti,mi]
#self.SN = float(snn)
SFR[ai] /= STR[ai]
"""
SECTION 3
Finally, for each tauv/tau/tg combination, perform a weighted
sum of the S.S.params. spectral energy distribution 'sed1' to obtain the
model S.E.D. 'y'. Add each record to the SED array.
"""
sed1 = sed*ATT #dust-attenuated SED
ai = tgi
y = numpy.zeros([1,self.iw])
y_nodust = numpy.zeros([1,self.iw])
j = self.J[ai]
w1 = W[0,ai]
wa = W[1,ai]
wb = W[2,ai]
for i in range(ai):
y += (w1[i]*sed1[:,i])
y_nodust += (w1[i]*sed[:,i])
for i in range(len(wb)):
y += (wb[i]*sed1[:,j[i]] + wa[i]*sed1[:,j[i]+1])
y_nodust += (wb[i]*sed[:,j[i]] + wa[i]*sed[:,j[i]+1])
Nly = self.calc_lyman(self.wave,numpy.nan_to_num(y_nodust[0]))
#print Nly
if Nly > 0.:
Nlyman = numpy.log10(Nly)
else:
Nlyman = 0.
total = (self.neb_cont*self.inc_cont) + self.neb_hlines + (self.neb_metal[:,neb_z]*self.inc_met)
total *= 2.997925e18/(self.wave**2) #Convert to Flambda
total *= (Nly*(1-self.fesc))
y += total
Nly = self.calc_lyman(self.wave,numpy.nan_to_num(y[0] / STR[ai]))
#print Nly
self.fesc_tot = (self.fesc*Nly) / 10**Nlyman
if Nly > 0.:
Nlyman_final = numpy.log10(Nly) + 33. + numpy.log10(3.826)
if self.fesc > 0.:
Nlyman_final = numpy.log10(10**Nlyman_final * self.fesc)
elif self.fesc == 0:
Nlyman_final = 0.
else:
Nlyman_final = 0.
beta = self.calc_beta(self.wave,y[0])
#print ai,ai1
#print STR[ai1,ti,mi]
SED[:] = y/STR[ai] #normalised to 1 solar mass
norm = simps(numpy.exp(-1*numpy.logspace(0,numpy.log10(self.ta[tgi]),10000)/self.tau),numpy.logspace(0,numpy.log10(self.ta[tgi]),10000))
STR = STR[tgi]
SFR = SFR[tgi]
self.SED = SED
self.SFR = SFR / STR
self.STR = STR
self.beta = beta
self.Nly = Nlyman_final
self.Ms = 1.
def calc_beta(self,wave, SED):
"""
wave = wavelength array
SED = Rest-frame flux
Returns UV slope index and the error on that fit
"""
#new_wave = numpy.arange(1200,2650)
#new_SED = griddata(wave,SED,new_wave)
#wave = new_wave
#SED = new_SED
#window_lower = numpy.array([1268.,1309.,1342.,1407.,1562.,1677.,1760.,1866.,1930.,2400.])
#window_upper = numpy.array([1284.,1316.,1371.,1515.,1583.,1740.,1833.,1890.,1950.,2580.])
#window_lower = numpy.array([1407,1562,1677.,1760.,1866.,1930.,2400.])
#window_upper = numpy.array([1515,1583,1740.,1833.,1890.,1950.,2580.])
window_lower = numpy.array([1600,2400])
window_upper = numpy.array([1950,2600])
ww = numpy.zeros_like(wave,dtype=bool)
for w in numpy.arange(len(window_lower)):
ww[(wave >= window_lower[w])*(wave < window_upper[w])] = True
#window_mean = (window_lower+window_upper)/2 #midpoint for power-law fitting
fluxes = numpy.zeros_like(window_lower)
#for w, window_lower in enumerate(window_lower):
# wf = numpy.where((wave > window_lower) & (wave <= window_upper[w]))
# fluxes[w] = numpy.mean(SED[wf])
#fluxes *= 2.997925e18/(window_mean**2)
fluxerr = numpy.sqrt(fluxes)
logx = numpy.log10(wave[ww])
logy = numpy.log10(SED[ww])
logyerr = 1.#fluxerr/fluxes
fitfunc = lambda p, x: p[0] + (x*p[1])
errfunc = lambda p, x, y, err: (y - fitfunc(p,x))/err
pinit = [numpy.max(SED[ww]), -2.0]
out = leastsq(errfunc, pinit, args=(logx,logy,logyerr))
#out = leastsq(errfunc, pinit, args=(log,fluxes,fluxerr))
pfinal = out[0]
covar = out[1]
#print pfinal
index = pfinal[1]
#indexerr = numpy.sqrt(covar[0])
return (index)#, indexerr)
def calc_lyman(self,x,s):
wly = 912.
const_c = 2.997925e10
const_h = 6.6262e-27
const = 1e-8/const_h/const_c
n = int(sum([x < wly][0]))
f = numpy.zeros(n+1)
w = numpy.zeros(n+1)
for i in range(n+1):
if x[i] < wly:
w[i] = x[i]
f[i] = w[i]*s[i]
elif x[i] == wly:
w[i] = x[i]
f[i] = w[i]*s[i]
elif x[i] > wly:
w[i] = wly
f[i] = s[i-1] + ((w[i]-x[i-1])*(s[i]-s[i-1])/(x[i]-x[i-1]))
f[i] = w[i]*f[i]
nlyman = const*numpy.trapz(f,w)
#print numpy.log10(N_lyman)
return nlyman
def __str__(self):
params = ['Age', 'SFH Tau', 'Dust Tau', 'SFR', 'Stellar Mass','Beta']
values = [self.tg/1e9, self.tau/1e9, self.tauv, self.SFR, self.Ms, self.beta]
units = ['Gyr', 'Gyr', 'Av', 'Ms/yr','Msol','']
output = ['{:>14s}'.format(params[i]) +': '+ '{:<.3g}'.format(values[i]) + ' ' +units[i] for i in range(len(params))]
return '\n'.join(output)
def __add__(self,other):
if isinstance(other,CSP):
new = copy.deepcopy(self)
new.SED += other.SED
new.SFR += other.SFR
new.Ms += other.Ms
if new.Nly == 0:
new.Nly = other.Nly
if other.Nly == 0:
new.Nly = new.Nly
if (new.Nly > 0.) and (other.Nly > 0.):
new.Nly = numpy.log10(10**self.Nly + 10**other.Nly )
new.beta = new.calc_beta(new.wave,new.SED)
return new
def __iadd__(self,other):
if isinstance(other,CSP):
new = copy.deepcopy(self)
new.SED += other.SED
new.SFR += other.SFR
new.Ms += other.Ms
if new.Nly == 0:
new.Nly = other.Nly
if other.Nly == 0:
new.Nly = new.Nly
if (new.Nly > 0.) and (other.Nly > 0.):
new.Nly = numpy.log10(10**self.Nly + 10**other.Nly )
new.beta = new.calc_beta(new.wave,new.SED)
return new
def __mul__(self,other):
new = copy.deepcopy(self)
new.SED *= other
new.SFR *= other
new.Ms *= other
if other == 0.:
new.Nly = 0.
else:
new.Nly += numpy.log10(other)
new.Nly = numpy.maximum(new.Nly,0)
return new
def __imul__(self,other):
new = copy.deepcopy(self)
new.SED *= other
new.SFR *= other
new.Ms *= other
if other == 0.:
new.Nly = 0.
else:
new.Nly += numpy.log10(other)
new.Nly = numpy.maximum(new.Nly,0)
return new
def __div__(self,other):
new = copy.deepcopy(self)
new.SED /= other
new.SFR /= other
new.Ms /= other
if other == 0.:
new.Nly = 0.
else:
new.Nly -= numpy.log10(other)
new.Nly = numpy.maximum(new.Nly,0)
return new
def __idiv__(self,other):
new = copy.deepcopy(self)
new.SED /= other
new.SFR /= other
new.Ms /= other
if other == 0.:
new.Nly = 0.
else:
new.Nly -= numpy.log10(other)
new.Nly = numpy.maximum(new.Nly,0)
return new
def __rmul__(self,other):
new = copy.deepcopy(self)
new.SED *= other
new.SFR *= other
new.Ms *= other
if other == 0.:
new.Nly = 0.
else:
new.Nly += numpy.log10(other)
new.Nly = numpy.maximum(new.Nly,0)
return new
def addEmissionLine(self,wavelength,EqW):
wbin = numpy.argmin(numpy.abs(self.wave-wavelength))
#print wbin
binwidth = numpy.mean(numpy.diff(self.wave)[wbin-1:wbin+1])
#print binwidth
continuum = numpy.mean(self.SED[wbin:wbin+1])
#print continuum
lineluminosity = continuum * EqW
#print lineluminosity, lineluminosity/binwidth
self.Lalpha = lineluminosity
self.SED[wbin] += lineluminosity/binwidth
class Filter(object):
def __init__(self):
self.wave = []
self.freq = []
self.response = []
self.lambda_c = []
self.nu_c = []
class FileFilter(Filter):
def __init__(self,filepath,minbins=200):
self.path = filepath
# try:
data = numpy.loadtxt(self.path)
wf = data[:,0]
tp = data[:,1]
if len(data[:,0]) < minbins: #Re-sample large filters for performance
wfx = numpy.linspace(wf[0],wf[-1],minbins)
tpx = griddata(wf,tp,wfx)
wf = wfx
tp = tpx
self.wave = wf * U.angstrom
self.response = tp
self.freq = (C.c/self.wave).to(U.Hz)
nmax = numpy.argmax(self.response)
halfmax_low = self.wave[:nmax][numpy.argmin(numpy.abs(self.response[nmax] - 2*self.response[:nmax]))]
halfmax_hi = self.wave[nmax:][numpy.argmin(numpy.abs(self.response[nmax] - 2*self.response[nmax:]))]
print(self.wave[nmax],halfmax_low, halfmax_hi)
self.fwhm = halfmax_hi-halfmax_low
self.lambda_c = (simps(self.wave*self.response,self.wave) /
simps(self.response,self.wave))
self.nu_c = (simps(self.freq*self.response,self.freq) /
simps(self.response,self.freq))
# except:
# print 'Ohhhhh dear.'
class TophatFilter(Filter):
def __init__(self, centre, width, steps = 200):
self.centre = centre * U.angstrom
self.width = width * U.angstrom
self.steps = steps
upper, lower = self.centre+self.width, self.centre-self.width
resp_upper, resp_lower = self.centre+(self.width*0.5), self.centre-(self.width*0.5)
self.wave = numpy.linspace(lower,upper,steps)
self.response = numpy.zeros_like(self.wave.value)
tophat = (self.wave >= resp_lower)*(self.wave < resp_upper)
self.response[tophat] = 1
self.freq = (C.c/self.wave).to(U.Hz)
self.lambda_c = (simps(self.wave*self.response,self.wave) /
simps(self.response,self.wave))
self.nu_c = (simps(self.freq*self.response,self.freq) /
simps(self.response,self.freq))
class LoadEAZYFilters(object):
def __init__(self,path):
self.path = path
self.filters = []
self.filternames = []
self.central_wlengths = []
with open(self.path) as file:
for f in range(1000):
x = file.readline().split()
if len(x) < 1:
break
nwave, name, lambda_c = x[0],x[1],x[4]
nwave = int(nwave)
wavelength = []
response = []
#print nwave
for i in range(nwave):
N, w, r = numpy.array(file.readline().split()).astype('float')
wavelength.append(w)
response.append(r)
wavelength *= U.angstrom
freq = (C.c/wavelength).to(U.Hz)
lambda_c = (simps(wavelength*response,wavelength) /
simps(response,wavelength))
nu_c = (simps(freq*response,freq) /
simps(response,freq))
new_filter = Filter()
new_filter.wave = numpy.array(wavelength) * U.angstrom
new_filter.response = numpy.array(response)
new_filter.freq = numpy.array(freq) * U.Hz
new_filter.lambda_c = lambda_c
new_filter.nu_c = nu_c
nmax = numpy.argmax(new_filter.response)
halfmax_low = new_filter.wave[:nmax][numpy.argmin(numpy.abs(new_filter.response[nmax] - 2*new_filter.response[:nmax]))]
halfmax_hi = new_filter.wave[nmax:][numpy.argmin(numpy.abs(new_filter.response[nmax] - 2*new_filter.response[nmax:]))]
#print new_filter.wave[nmax],halfmax_low, halfmax_hi
new_filter.fwhm = halfmax_hi-halfmax_low
self.filters.append(new_filter)
self.filternames.append(name)
self.central_wlengths.append(lambda_c)
self.central_wlengths *= U.angstrom
class FilterSet:
def __init__(self,path=None):
self.directory = path
self.filters = []
if type(self.directory) == str:
try:
self.files = glob(self.directory)
self.files.sort()
for file in self.files:
self.filters.append(FileFilter(file))
except:
a = ''
def addFileFilter(self,path):
self.filters.append(FileFilter(path))
def addTophatFilter(self,centre, width, steps = 200):
self.filters.append(TophatFilter(centre, width, steps))
def addEAZYFilter(self,EAZYset,N):
if type(N) == int:
self.filters.append(EAZYset.filters[N])
elif type(N) == list:
for x in N:
self.filters.append(EAZYset.filters[x])
class Observe:
def __init__(self,SED,Filters,redshift,force_age = True,madau=True):
self.SED = SED
self.F = Filters
self.redshifts = numpy.array(redshift,ndmin=1)
self.wave = self.SED.wave
self.fluxes = []
self.AB = []
self.wl = []
self.fwhm = []
for z in self.redshifts:
self.lyman_abs = numpy.ones(len(self.wave))
if madau:
ly_cont_w = numpy.array([(self.wave<=912.)][0])
ly_b_w = numpy.array([(self.wave > 912.) & (self.wave <= 1026.)][0])
ly_a_w = numpy.array([(self.wave > 1026.) & (self.wave <= 1216.)][0])
dec_a = (1/(120*(1+z)))*quad(self.dec_a_func,
1050*(1+z),1170*(1+z))[0]
dec_b= (1/(95*(1+z)))*quad(self.dec_b_func,
920*(1+z),1015*(1+z))[0]
self.lyman_abs[ly_cont_w] *= 0.1
self.lyman_abs[ly_b_w] = dec_b
self.lyman_abs[ly_a_w] = dec_a
if z > 0:
self.dm = cosmo.distmod(z).value
else:
self.dm = 0.
if (self.SED.tg/1e9 > cosmo.age(z).value) and force_age:
print('SSP age older than universe...stopping.')
else:
tfluxes = []
tAB = []
twl = []
tfwhm = []
for filt in self.F.filters:
#print filt.wave[0]
flux, mag = self.calcFlux(filt,z)
tfluxes.append(flux)
tAB.append(mag)
twl.append(filt.lambda_c)
tfwhm.append(filt.fwhm.value)
self.fluxes.append(tfluxes)
self.AB.append(tAB)
self.wl = twl
self.fwhm = tfwhm
self.wl *= U.angstrom
self.fwhm *= U.angstrom
self.fluxes = (numpy.squeeze(self.AB) * 1e-6*U.Jy)
self.AB = (numpy.squeeze(self.AB) * U.mag)
def dec_a_func(self,wave_obs):
return numpy.exp(-1*0.0036*(numpy.power(wave_obs/1216.,3.46)))
def dec_b_func(self,wave_obs):
teff_beta=1.7e-3*(numpy.power(wave_obs/1026.,3.46))
teff_gamma=1.2e-3*(numpy.power(wave_obs/973.,3.46))
teff_delta=9.3e-4*(numpy.power(wave_obs/950.,3.46))
#The above absorption lines dominate the average absorption over the range
#but higher order lines should be added in future
teff_total=teff_beta+teff_gamma+teff_delta
return numpy.exp(-1*teff_total)
def calcFlux(self,filt,z):
wf = filt.wave.value
tp = filt.response
z1 = z+1
if len(wf) > 1000: #Re-sample large filters for performance
wfx = numpy.linspace(wf[0],wf[-1],1000)
tpx = griddata(wf,tp,wfx)
wf = wfx
tp = tpx
#Find SED wavelength entries within filter range
wff = numpy.array([wf[0] < self.wave[i] < wf[-1]
for i in range(len(self.wave))])
wft = self.wave[wff]
#Interpolate to find throughput values at new wavelength points
tpt = griddata(wf,tp,wft)
#Join arrays and sort w.r.t to wf
wf = numpy.concatenate((wf,wft))
tp = numpy.concatenate((tp,tpt))
order = numpy.argsort(wf)
wf = wf[order]
tp = tp[order]
dwf = numpy.diff(wf)
nwf = len(wf)
tpwf = tp/wf
f_mean2 = numpy.dot(dwf,(tpwf[:nwf-1]+tpwf[1:])/2)
tpwf = tp*wf #Reassign tpwf as product
wf1 = wf/z1
WR = 0.
for i in range(nwf):
#Interpolation indices
j = numpy.where(self.wave<wf1[i])[0][-1]
a = (wf1[i] - self.wave[j])/(self.wave[j+1]-self.wave[j])
tpa = (tpwf[i]*((1-a)*(self.SED.SED[j]*self.lyman_abs[j]) +
a*self.SED.SED[j+1]*self.lyman_abs[j+1]))
if i != 0:
WR += dwf[i-1]*(tpb+tpa)
tpb = tpa
F_mean = WR/2/z1/f_mean2/2.997925e18
#print 'F_mean shape '+ len(F_mean)
AB0 = 5*numpy.log10(1.7684e8*1e-5)
# dl = 10pc in Mpc
# this factor is sqrt(4*pi*(3.0856e24)^2 Lsun)
#Convert fluxes to AB magnitudes
Mag = AB0 - 2.5*numpy.log10(F_mean) - 48.6
Mag += self.dm
Flux = 10**((23.9 - Mag)/2.5) #uJy
return Flux , Mag
#must define cosmo before calling an Observe
"""
a = SSP()
a.build(0.8,1000,0,4,fesc=0.) #(Age, Tau, Dust, Metallicity Index)
#print a
b = SSP()
b.build(1.5,0.5,1,2,fesc=1.)
#print b
a = a*1e9 #Multiply by a factor, equivalent to *=
b = b*5e9
c = a+b
#a+b #Add b to a (a += b)
print a
filt_dir = '../aMasSED-code/GOODS-S_18_FilterCurves/Filter*.txt'
Filts = FilterSet(filt_dir)
Filts.addTophatFilter(1500,100)
AA = Observe(a,Filts,2) # Observe a (built from two initial SEDs)
# through the filters in Filts at redshift of z = 2
#BB = Observe(b,Filts,2)
print AA.AB
"""
|
|
from collections import namedtuple
import numpy as np
from . import distributions
from . import futil
__all__ = ['find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a regression line
This computes a least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimate
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
y = np.asarray(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = find_repeats(x)
_, nyreps = find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4], dtype=int32))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2], dtype=int32))
"""
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
if np.asarray(arr).size == 0:
return RepeatedResults([], [])
v1, v2, n = futil.dfreps(arr)
return RepeatedResults(v1[:n], v2[:n])
|
|
import json
import logging
import os
import sys
import six
from docker.utils.ports import split_port
from jsonschema import Draft4Validator
from jsonschema import FormatChecker
from jsonschema import RefResolver
from jsonschema import ValidationError
from .errors import ConfigurationError
log = logging.getLogger(__name__)
DOCKER_CONFIG_HINTS = {
'cpu_share': 'cpu_shares',
'add_host': 'extra_hosts',
'hosts': 'extra_hosts',
'extra_host': 'extra_hosts',
'device': 'devices',
'link': 'links',
'memory_swap': 'memswap_limit',
'port': 'ports',
'privilege': 'privileged',
'priviliged': 'privileged',
'privilige': 'privileged',
'volume': 'volumes',
'workdir': 'working_dir',
}
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
@FormatChecker.cls_checks(
format="ports",
raises=ValidationError(
"Invalid port formatting, it should be "
"'[[remote_ip:]remote_port:]port[/protocol]'"))
def format_ports(instance):
try:
split_port(instance)
except ValueError:
return False
return True
@FormatChecker.cls_checks(format="environment")
def format_boolean_in_environment(instance):
"""
Check if there is a boolean in the environment and display a warning.
Always return True here so the validation won't raise an error.
"""
if isinstance(instance, bool):
log.warn(
"Warning: There is a boolean value in the 'environment' key.\n"
"Environment variables can only be strings.\nPlease add quotes to any boolean values to make them string "
"(eg, 'True', 'yes', 'N').\nThis warning will become an error in a future release. \r\n"
)
return True
def validate_service_names(config):
for service_name in config.keys():
if not isinstance(service_name, six.string_types):
raise ConfigurationError(
"Service name: {} needs to be a string, eg '{}'".format(
service_name,
service_name))
def validate_top_level_object(config):
if not isinstance(config, dict):
raise ConfigurationError(
"Top level object needs to be a dictionary. Check your .yml file "
"that you have defined a service at the top level.")
validate_service_names(config)
def validate_extends_file_path(service_name, extends_options, filename):
"""
The service to be extended must either be defined in the config key 'file',
or within 'filename'.
"""
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
if 'file' not in extends_options and filename is None:
raise ConfigurationError(
"%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
)
def validate_extended_service_exists(extended_service_name, full_extended_config, extended_config_path):
if extended_service_name not in full_extended_config:
msg = (
"Cannot extend service '%s' in %s: Service not found"
) % (extended_service_name, extended_config_path)
raise ConfigurationError(msg)
def get_unsupported_config_msg(service_name, error_key):
msg = "Unsupported config option for '{}' service: '{}'".format(service_name, error_key)
if error_key in DOCKER_CONFIG_HINTS:
msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
return msg
def anglicize_validator(validator):
if validator in ["array", "object"]:
return 'an ' + validator
return 'a ' + validator
def process_errors(errors, service_name=None):
"""
jsonschema gives us an error tree full of information to explain what has
gone wrong. Process each error and pull out relevant information and re-write
helpful error messages that are relevant.
"""
def _parse_key_from_error_msg(error):
return error.message.split("'")[1]
def _clean_error_message(message):
return message.replace("u'", "'")
def _parse_valid_types_from_validator(validator):
"""
A validator value can be either an array of valid types or a string of
a valid type. Parse the valid types and prefix with the correct article.
"""
if isinstance(validator, list):
if len(validator) >= 2:
first_type = anglicize_validator(validator[0])
last_type = anglicize_validator(validator[-1])
types_from_validator = ", ".join([first_type] + validator[1:-1])
msg = "{} or {}".format(
types_from_validator,
last_type
)
else:
msg = "{}".format(anglicize_validator(validator[0]))
else:
msg = "{}".format(anglicize_validator(validator))
return msg
def _parse_oneof_validator(error):
"""
oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
required = [context for context in error.context if context.validator == 'required']
if required:
return required[0].message
additionalProperties = [context for context in error.context if context.validator == 'additionalProperties']
if additionalProperties:
invalid_config_key = _parse_key_from_error_msg(additionalProperties[0])
return "contains unsupported option: '{}'".format(invalid_config_key)
constraint = [context for context in error.context if len(context.path) > 0]
if constraint:
valid_types = _parse_valid_types_from_validator(constraint[0].validator_value)
invalid_config_key = "".join(
"'{}' ".format(fragment) for fragment in constraint[0].path
if isinstance(fragment, six.string_types)
)
msg = "{}contains {}, which is an invalid type, it should be {}".format(
invalid_config_key,
constraint[0].instance,
valid_types
)
return msg
uniqueness = [context for context in error.context if context.validator == 'uniqueItems']
if uniqueness:
msg = "contains non unique items, please remove duplicates from {}".format(
uniqueness[0].instance
)
return msg
types = [context.validator_value for context in error.context if context.validator == 'type']
valid_types = _parse_valid_types_from_validator(types)
msg = "contains an invalid type, it should be {}".format(valid_types)
return msg
root_msgs = []
invalid_keys = []
required = []
type_errors = []
other_errors = []
for error in errors:
# handle root level errors
if len(error.path) == 0 and not error.instance.get('name'):
if error.validator == 'type':
msg = "Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level."
root_msgs.append(msg)
elif error.validator == 'additionalProperties':
invalid_service_name = _parse_key_from_error_msg(error)
msg = "Invalid service name '{}' - only {} characters are allowed".format(invalid_service_name, VALID_NAME_CHARS)
root_msgs.append(msg)
else:
root_msgs.append(_clean_error_message(error.message))
else:
if not service_name:
# field_schema errors will have service name on the path
service_name = error.path[0]
error.path.popleft()
else:
# service_schema errors have the service name passed in, as that
# is not available on error.path or necessarily error.instance
service_name = service_name
if error.validator == 'additionalProperties':
invalid_config_key = _parse_key_from_error_msg(error)
invalid_keys.append(get_unsupported_config_msg(service_name, invalid_config_key))
elif error.validator == 'anyOf':
if 'image' in error.instance and 'build' in error.instance:
required.append(
"Service '{}' has both an image and build path specified. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
elif 'image' not in error.instance and 'build' not in error.instance:
required.append(
"Service '{}' has neither an image nor a build path "
"specified. Exactly one must be provided.".format(service_name))
elif 'image' in error.instance and 'dockerfile' in error.instance:
required.append(
"Service '{}' has both an image and alternate Dockerfile. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
else:
required.append(_clean_error_message(error.message))
elif error.validator == 'oneOf':
config_key = error.path[0]
msg = _parse_oneof_validator(error)
type_errors.append("Service '{}' configuration key '{}' {}".format(
service_name, config_key, msg)
)
elif error.validator == 'type':
msg = _parse_valid_types_from_validator(error.validator_value)
if len(error.path) > 0:
config_key = " ".join(["'%s'" % k for k in error.path])
type_errors.append(
"Service '{}' configuration key {} contains an invalid "
"type, it should be {}".format(
service_name,
config_key,
msg))
else:
root_msgs.append(
"Service '{}' doesn\'t have any configuration options. "
"All top level keys in your docker-compose.yml must map "
"to a dictionary of configuration options.'".format(service_name))
elif error.validator == 'required':
config_key = error.path[0]
required.append(
"Service '{}' option '{}' is invalid, {}".format(
service_name,
config_key,
_clean_error_message(error.message)))
elif error.validator == 'dependencies':
dependency_key = list(error.validator_value.keys())[0]
required_keys = ",".join(error.validator_value[dependency_key])
required.append("Invalid '{}' configuration for '{}' service: when defining '{}' you must set '{}' as well".format(
dependency_key, service_name, dependency_key, required_keys))
else:
config_key = " ".join(["'%s'" % k for k in error.path])
err_msg = "Service '{}' configuration key {} value {}".format(service_name, config_key, error.message)
other_errors.append(err_msg)
return "\n".join(root_msgs + invalid_keys + required + type_errors + other_errors)
def validate_against_fields_schema(config):
schema_filename = "fields_schema.json"
format_checkers = ["ports", "environment"]
return _validate_against_schema(config, schema_filename, format_checkers)
def validate_against_service_schema(config, service_name):
schema_filename = "service_schema.json"
format_checkers = ["ports"]
return _validate_against_schema(config, schema_filename, format_checkers, service_name)
def _validate_against_schema(config, schema_filename, format_checker=[], service_name=None):
config_source_dir = os.path.dirname(os.path.abspath(__file__))
if sys.platform == "win32":
file_pre_fix = "///"
config_source_dir = config_source_dir.replace('\\', '/')
else:
file_pre_fix = "//"
resolver_full_path = "file:{}{}/".format(file_pre_fix, config_source_dir)
schema_file = os.path.join(config_source_dir, schema_filename)
with open(schema_file, "r") as schema_fh:
schema = json.load(schema_fh)
resolver = RefResolver(resolver_full_path, schema)
validation_output = Draft4Validator(schema, resolver=resolver, format_checker=FormatChecker(format_checker))
errors = [error for error in sorted(validation_output.iter_errors(config), key=str)]
if errors:
error_msg = process_errors(errors, service_name)
raise ConfigurationError("Validation failed, reason(s):\n{}".format(error_msg))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_predictor."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.core import box_predictor
from object_detection.protos import hyperparams_pb2
class MaskRCNNBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.build(hyperparams, is_training=True)
def test_get_boxes_with_five_classes(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
)
box_predictions = mask_box_predictor.detectFaces(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
class_predictions_with_background_shape) = sess.run(
[tf.shape(box_encodings),
tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])
self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6])
def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self):
with self.assertRaises(ValueError):
box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_instance_masks=True)
def test_get_instance_masks(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
conv_hyperparams=self._build_arg_scope_with_hyperparams(
op_type=hyperparams_pb2.Hyperparams.CONV),
predict_instance_masks=True)
box_predictions = mask_box_predictor.detectFaces(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
mask_predictions = box_predictions[box_predictor.MASK_PREDICTIONS]
self.assertListEqual([2, 1, 5, 14, 14],
mask_predictions.get_shape().as_list())
def test_do_not_return_instance_masks_and_keypoints_without_request(self):
image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)
mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4)
box_predictions = mask_box_predictor.detectFaces(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
self.assertEqual(len(box_predictions), 2)
self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)
self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND
in box_predictions)
def test_value_error_on_predict_keypoints(self):
with self.assertRaises(ValueError):
box_predictor.MaskRCNNBoxPredictor(
is_training=False,
num_classes=5,
fc_hyperparams=self._build_arg_scope_with_hyperparams(),
use_dropout=False,
dropout_keep_prob=0.5,
box_code_size=4,
predict_keypoints=True)
class RfcnBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_correct_box_encoding_and_class_prediction_shapes(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
proposal_boxes = tf.random_normal([4, 2, 4], dtype=tf.float32)
rfcn_box_predictor = box_predictor.RfcnBoxPredictor(
is_training=False,
num_classes=2,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
num_spatial_bins=[3, 3],
depth=4,
crop_size=[12, 12],
box_code_size=4
)
box_predictions = rfcn_box_predictor.detectFaces(
image_features, num_predictions_per_location=1, scope='BoxPredictor',
proposal_boxes=proposal_boxes)
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
class_predictions_shape) = sess.run(
[tf.shape(box_encodings),
tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [8, 1, 2, 4])
self.assertAllEqual(class_predictions_shape, [8, 1, 3])
class ConvolutionalBoxPredictorTest(tf.test.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_boxes_for_five_aspect_ratios_per_location(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.detectFaces(
image_features, num_predictions_per_location=5, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)])
self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
self.assertAllEqual(objectness_predictions_shape, [4, 320, 1])
def test_get_boxes_for_one_aspect_ratio_per_location(self):
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.detectFaces(
image_features, num_predictions_per_location=1, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)])
self.assertAllEqual(box_encodings_shape, [4, 64, 1, 4])
self.assertAllEqual(objectness_predictions_shape, [4, 64, 1])
def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(
self):
num_classes_without_background = 6
image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=num_classes_without_background,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.detectFaces(
image_features,
num_predictions_per_location=5,
scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
class_predictions_with_background = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape, class_predictions_with_background_shape
) = sess.run([
tf.shape(box_encodings), tf.shape(class_predictions_with_background)])
self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])
self.assertAllEqual(class_predictions_with_background_shape,
[4, 320, num_classes_without_background+1])
def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(
self):
image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(
is_training=False,
num_classes=0,
conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),
min_depth=0,
max_depth=32,
num_layers_before_predictor=1,
use_dropout=True,
dropout_keep_prob=0.8,
kernel_size=1,
box_code_size=4
)
box_predictions = conv_box_predictor.detectFaces(
image_features, num_predictions_per_location=5, scope='BoxPredictor')
box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]
objectness_predictions = box_predictions[
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
init_op = tf.global_variables_initializer()
resolution = 32
expected_num_anchors = resolution*resolution*5
with self.test_session() as sess:
sess.run(init_op)
(box_encodings_shape,
objectness_predictions_shape) = sess.run(
[tf.shape(box_encodings), tf.shape(objectness_predictions)],
feed_dict={image_features:
np.random.rand(4, resolution, resolution, 64)})
self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
self.assertAllEqual(objectness_predictions_shape,
[4, expected_num_anchors, 1])
if __name__ == '__main__':
tf.test.main()
|
|
"""Unit tests for new super() implementation."""
import sys
import unittest
class A:
def f(self):
return 'A'
@classmethod
def cm(cls):
return (cls, 'A')
class B(A):
def f(self):
return super().f() + 'B'
@classmethod
def cm(cls):
return (cls, super().cm(), 'B')
class C(A):
def f(self):
return super().f() + 'C'
@classmethod
def cm(cls):
return (cls, super().cm(), 'C')
class D(C, B):
def f(self):
return super().f() + 'D'
def cm(cls):
return (cls, super().cm(), 'D')
class E(D):
pass
class F(E):
f = E.f
class G(A):
pass
class TestSuper(unittest.TestCase):
def tearDown(self):
# This fixes the damage that test_various___class___pathologies does.
nonlocal __class__
__class__ = TestSuper
def test_basics_working(self):
self.assertEqual(D().f(), 'ABCD')
def test_class_getattr_working(self):
self.assertEqual(D.f(D()), 'ABCD')
def test_subclass_no_override_working(self):
self.assertEqual(E().f(), 'ABCD')
self.assertEqual(E.f(E()), 'ABCD')
def test_unbound_method_transfer_working(self):
self.assertEqual(F().f(), 'ABCD')
self.assertEqual(F.f(F()), 'ABCD')
def test_class_methods_still_working(self):
self.assertEqual(A.cm(), (A, 'A'))
self.assertEqual(A().cm(), (A, 'A'))
self.assertEqual(G.cm(), (G, 'A'))
self.assertEqual(G().cm(), (G, 'A'))
def test_super_in_class_methods_working(self):
d = D()
self.assertEqual(d.cm(), (d, (D, (D, (D, 'A'), 'B'), 'C'), 'D'))
e = E()
self.assertEqual(e.cm(), (e, (E, (E, (E, 'A'), 'B'), 'C'), 'D'))
def test_super_with_closure(self):
# Issue4360: super() did not work in a function that
# contains a closure
class E(A):
def f(self):
def nested():
self
return super().f() + 'E'
self.assertEqual(E().f(), 'AE')
def test_various___class___pathologies(self):
# See issue #12370
class X(A):
def f(self):
return super().f()
__class__ = 413
x = X()
self.assertEqual(x.f(), 'A')
self.assertEqual(x.__class__, 413)
class X:
x = __class__
def f():
__class__
self.assertIs(X.x, type(self))
with self.assertRaises(NameError) as e:
exec("""class X:
__class__
def f():
__class__""", globals(), {})
self.assertIs(type(e.exception), NameError) # Not UnboundLocalError
class X:
global __class__
__class__ = 42
def f():
__class__
self.assertEqual(globals()["__class__"], 42)
del globals()["__class__"]
self.assertNotIn("__class__", X.__dict__)
class X:
nonlocal __class__
__class__ = 42
def f():
__class__
self.assertEqual(__class__, 42)
def test___class___instancemethod(self):
# See issue #14857
class X:
def f(self):
return __class__
self.assertIs(X().f(), X)
def test___class___classmethod(self):
# See issue #14857
class X:
@classmethod
def f(cls):
return __class__
self.assertIs(X.f(), X)
def test___class___staticmethod(self):
# See issue #14857
class X:
@staticmethod
def f():
return __class__
self.assertIs(X.f(), X)
def test___class___new(self):
test_class = None
class Meta(type):
def __new__(cls, name, bases, namespace):
nonlocal test_class
self = super().__new__(cls, name, bases, namespace)
test_class = self.f()
return self
class A(metaclass=Meta):
@staticmethod
def f():
return __class__
self.assertIs(test_class, A)
def test___class___delayed(self):
test_namespace = None
class Meta(type):
def __new__(cls, name, bases, namespace):
nonlocal test_namespace
test_namespace = namespace
return None
class A(metaclass=Meta):
@staticmethod
def f():
return __class__
self.assertIs(A, None)
B = type("B", (), test_namespace)
self.assertIs(B.f(), B)
def test___class___mro(self):
test_class = None
class Meta(type):
def mro(self):
# self.f() doesn't work yet...
self.__dict__["f"]()
return super().mro()
class A(metaclass=Meta):
def f():
nonlocal test_class
test_class = __class__
self.assertIs(test_class, A)
def test___classcell___deleted(self):
class Meta(type):
def __new__(cls, name, bases, namespace):
del namespace['__classcell__']
return super().__new__(cls, name, bases, namespace)
class A(metaclass=Meta):
@staticmethod
def f():
__class__
with self.assertRaises(NameError):
A.f()
def test___classcell___reset(self):
class Meta(type):
def __new__(cls, name, bases, namespace):
namespace['__classcell__'] = 0
return super().__new__(cls, name, bases, namespace)
class A(metaclass=Meta):
@staticmethod
def f():
__class__
with self.assertRaises(NameError):
A.f()
self.assertEqual(A.__classcell__, 0)
def test_obscure_super_errors(self):
def f():
super()
self.assertRaises(RuntimeError, f)
def f(x):
del x
super()
self.assertRaises(RuntimeError, f, None)
class X:
def f(x):
nonlocal __class__
del __class__
super()
self.assertRaises(RuntimeError, X().f)
def test_cell_as_self(self):
class X:
def meth(self):
super()
def f():
k = X()
def g():
return k
return g
c = f().__closure__[0]
self.assertRaises(TypeError, X.meth, c)
def test_super_init_leaks(self):
# Issue #26718: super.__init__ leaked memory if called multiple times.
# This will be caught by regrtest.py -R if this leak.
# NOTE: Despite the use in the test a direct call of super.__init__
# is not endorsed.
sp = super(float, 1.0)
for i in range(1000):
super.__init__(sp, int, i)
if __name__ == "__main__":
unittest.main()
|
|
import serverconf
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TBinaryProtocol
import thrift
import sdhashsrv
from sdhashsrv import *
from sdhashsrv.ttypes import *
from sdhashsrv.constants import *
from sdhashsrv.sdhashsrv import *
from serverconf import *
import json
def listing():
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
try:
transport.open()
result=client.setList(True)
transport.close()
except Exception:
result="Connection Failed"
pass
return result
def hashsetdisplay(num):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
result=client.displaySet(num)
transport.close()
i = 0;
output=[]
middle=result.split("\n")
reslist=sorted(middle, key=str.lower)
for val in reslist:
if (i>0):
items=[]
items=val.rsplit(None,1)
output.append((i,items[0],items[1]))
i=i+1
return output
def hashsetcompare(num,thresh):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
resid=client.createResultID("web")
client.compareAll(num,thresh,resid)
transport.close()
return resid
def hashsetcompare2(num,num2,thresh,sample):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
resid=client.createResultID("web")
client.compareTwo(num,num2,thresh,sample,resid)
transport.close()
return resid
def getresultbyid(resid):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
result=client.displayResult(resid)
queryset='query'
targetset='target'
info=client.displayResultInfo(resid)
stuff=info.split()
if (info.count('--')==1):
queryset=stuff[0]
targetset=stuff[2]
else:
queryset=stuff[0]
targetset=queryset
output=[]
header=[]
header.append('queryset')
header.append('query')
header.append('targetset')
header.append('target')
header.append('score')
for line in result.split('\n'):
cols = line.split('|')
items = []
cforw = cols[0].count('/')
cback = cols[0].count('\\')
if (len(cols) == 3):
items.append(queryset)
if (len(cols[0]) >50):
if (cback > 0):
fileparts=cols[0].rsplit('\\',3)
items.append('...\\'+fileparts[1]+'\\'+fileparts[2]+'\\'+fileparts[3])
if (cforw > 0):
fileparts=cols[0].rsplit('/',3)
items.append('.../'+fileparts[1]+'/'+fileparts[2]+'/'+fileparts[3])
else:
items.append('...'+cols[0][-50:])
else:
items.append(cols[0])
items.append(targetset)
cforw = cols[1].count('/')
cback = cols[1].count('\\')
if (len(cols[1]) >50):
if (cback > 0):
fileparts=cols[1].rsplit('\\',3)
items.append('...\\'+fileparts[1]+'\\'+fileparts[2]+'\\'+fileparts[3])
if (cforw > 0):
fileparts=cols[1].rsplit('/',3)
items.append('.../'+fileparts[1]+'/'+fileparts[2]+'/'+fileparts[3])
else:
items.append(cols[1])
items.append(cols[2])
output.append(items)
transport.close()
return output
def getresultbyname(name):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
result=client.displayResultsList(name,1) # use json output
transport.close()
parsed=json.loads(result)
parsed.reverse()
output=[]
for val in parsed:
indexnum=val.items()[0][0]
name=unicode.decode(val.items()[0][1])
output.append((indexnum,name))
return output
def filelisting():
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
result=client.displaySourceList()
transport.close()
output=[]
i=0
reslist=sorted(result, key=str.lower)
for val in reslist:
output.append((i,val))
i+=1
return output
def createhashset(name,rawlist):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
setID=client.createHashsetID()
fixed=str(rawlist)
filelist=fixed.split('|')
print rawlist
print filelist
# index searching off until have ui changes
client.hashString(name,filelist,0,setID,0)
transport.close()
return setID
def getresultstatus(resID):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
result=client.displayResultStatus(resID)
time=client.displayResultDuration(resID)
transport.close()
return result+' '+time
def gethashsetstatus(hashsetID):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
result=client.getHashsetName(hashsetID)
if (result==''):
output='processing hashset #'+str(hashsetID)
else:
output='completed hashset '+result
transport.close()
return output
def removeresult(resID):
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = sdhashsrv.Client(protocol)
transport.open()
client.removeResult(resID)
transport.close()
return True
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for robotics.learning.estimator_models.meta_learning.preprocessors."""
import functools
from absl.testing import parameterized
import numpy as np
import six
from six.moves import range
from tensor2robot.meta_learning import preprocessors
from tensor2robot.preprocessors import abstract_preprocessor
from tensor2robot.utils import tensorspec_utils as utils
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
TSpec = utils.TensorSpecStruct
_RANDOM_SEED = 1234
_DEFAULT_IN_IMAGE_SHAPE = (640, 512, 3)
_DEFAULT_OUT_IMAGE_SHAPE = (256, 320, 3)
_DEFAULT_ACTION_SHAPE = (5,)
_DEFAULT_REWARD_SHAPE = (1,)
class MockBasePreprocessor(abstract_preprocessor.AbstractPreprocessor):
def get_in_feature_specification(self, mode):
del mode
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE,
dtype=tf.uint8,
is_optional=False,
data_format='jpeg',
name='state/image')
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
return feature_spec
def get_out_feature_specification(self, mode):
del mode
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_OUT_IMAGE_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/image')
feature_spec.original_image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE, dtype=tf.float32, is_optional=True)
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
return feature_spec
def get_in_label_specification(self, mode):
del mode
label_spec = TSpec()
label_spec.reward = utils.ExtendedTensorSpec(
shape=_DEFAULT_REWARD_SHAPE,
dtype=tf.float32,
is_optional=False,
name='reward')
return label_spec
def get_out_label_specification(self, mode):
del mode
label_spec = TSpec()
label_spec.reward = utils.ExtendedTensorSpec(
shape=_DEFAULT_REWARD_SHAPE,
dtype=tf.float32,
is_optional=False,
name='reward')
return label_spec
def _preprocess_fn(self, features, labels, mode):
features.original_image = tf.image.convert_image_dtype(
features.image, tf.float32)
features.image = tf.image.resize_bilinear(
features.original_image,
size=self.get_out_feature_specification(mode).image.shape[:2])
return features, labels
class PreprocessorsTest(tf.test.TestCase, parameterized.TestCase):
def _create_mock_tensors(self,
base_preprocessor,
batch_size,
mode=tf.estimator.ModeKeys.TRAIN):
np.random.seed(_RANDOM_SEED)
features = utils.make_random_numpy(
base_preprocessor.get_in_feature_specification(mode),
batch_size=batch_size)
labels = utils.make_random_numpy(
base_preprocessor.get_in_label_specification(mode),
batch_size=batch_size)
return (features, labels)
def _init_mock(self, batch_size, mode=tf.estimator.ModeKeys.TRAIN):
base_preprocessor = MockBasePreprocessor()
maml_preprocessor = preprocessors.MAMLPreprocessorV2(
base_preprocessor=MockBasePreprocessor())
mock_tensors = self._create_mock_tensors(base_preprocessor, batch_size,
mode)
return maml_preprocessor, mock_tensors
@parameterized.parameters((1, 1), (1, 2), (2, 1), (2, 2))
def test_maml_preprocessor_v2_meta_map_fn_raises(
self, num_condition_samples_per_task, num_inference_samples_per_task):
batch_size = (
num_condition_samples_per_task + num_inference_samples_per_task)
init_mock = self._init_mock(2 * batch_size)
maml_preprocessor, mock_tensors = init_mock
# Create a failure case for not enough data in the batch.
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size - 1, drop_remainder=True)
# Trigger raise conditions for create_meta_map_fn due to
# num_*_samples_per_task being None or not > 0.
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
None, num_inference_samples_per_task)
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, None)
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
-num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, -num_inference_samples_per_task)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
dataset.map(map_func=map_fn, num_parallel_calls=1)
# Create a failure case for too many examples in a batch.
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size + 1, drop_remainder=True)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
dataset.map(map_func=map_fn, num_parallel_calls=1)
# Create a failure case because the batch_size is not known at graph
# construction time.
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size + 1, drop_remainder=False)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
with self.assertRaises(ValueError):
dataset.map(map_func=map_fn, num_parallel_calls=1)
@parameterized.parameters((1, 1), (1, 2), (2, 1), (2, 2))
def test_maml_preprocessor_v2_meta_map_fn(
self, num_condition_samples_per_task, num_inference_samples_per_task):
batch_size = (
num_condition_samples_per_task + num_inference_samples_per_task)
init_mock = self._init_mock(2 * batch_size)
maml_preprocessor, mock_tensors = init_mock
with self.session() as sess:
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(batch_size, drop_remainder=True)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
dataset = dataset.map(map_func=map_fn, num_parallel_calls=1)
raw_meta_features, raw_meta_labels = dataset.make_one_shot_iterator(
).get_next()
np_raw_meta_features, np_raw_meta_labels = sess.run(
[raw_meta_features, raw_meta_labels])
ref_features, ref_labels = mock_tensors
self.assertEqual(
list(np_raw_meta_features.condition.features.keys()),
list(np_raw_meta_features.inference.features.keys()))
for feature_name in np_raw_meta_features.condition.features.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features[feature_name],
ref_features[feature_name][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features[feature_name],
ref_features[feature_name]
[num_condition_samples_per_task:batch_size])
# The labels and the condition labels have to have the same keys.
self.assertEqual(
list(np_raw_meta_features.condition.labels.keys()),
list(np_raw_meta_labels.keys()))
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.labels[label_name],
ref_labels[label_name][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_labels[label_name],
ref_labels[label_name][num_condition_samples_per_task:batch_size])
@parameterized.parameters((1, 1, 1), (1, 2, 2), (2, 1, 2), (1, 2, 3),
(2, 1, 3), (2, 2, 3))
def test_maml_preprocessor_v2_preprocess(self, num_condition_samples_per_task,
num_inference_samples_per_task,
outer_batch_size):
inner_batch_size = (
num_condition_samples_per_task + num_inference_samples_per_task)
init_mock = self._init_mock(outer_batch_size * inner_batch_size)
maml_preprocessor, mock_tensors = init_mock
with self.session() as sess:
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(inner_batch_size, drop_remainder=True)
map_fn = maml_preprocessor.create_meta_map_fn(
num_condition_samples_per_task, num_inference_samples_per_task)
dataset = dataset.map(map_func=map_fn, num_parallel_calls=1)
# Note, if drop_remainder = False, the resulting dataset has no static
# shape which is required for the meta preprocessing.
dataset = dataset.batch(outer_batch_size, drop_remainder=True)
preprocess_fn = functools.partial(
maml_preprocessor.preprocess, mode=tf.estimator.ModeKeys.TRAIN)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=1)
raw_meta_features, raw_meta_labels = dataset.make_one_shot_iterator(
).get_next()
np_raw_meta_features, np_raw_meta_labels = sess.run(
[raw_meta_features, raw_meta_labels])
ref_features, ref_labels = mock_tensors
self.assertEqual(
list(np_raw_meta_features.condition.features.keys()),
list(np_raw_meta_features.inference.features.keys()))
# The image has been resized. Therefore, we ensure that its shape is
# correct. Note, we have to strip the outer and inner batch dimensions.
self.assertEqual(np_raw_meta_features.condition.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
self.assertEqual(np_raw_meta_features.inference.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
# The following tests are important to ensure that our reshaping,
# flattening and unflattening actually preserves all information.
# We can only test those two since the image has been resized.
# Since the featurename has been altered during preprocessing we have
# to index the reference data differently.
# Further, we only test the first batch, since everything afterwards
# would require more index slicing :).
# For the image we have to convert the original data into float32 since
# that is the required conversion for our preprocessor.
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features['original_image'][0],
ref_features['image'][:num_condition_samples_per_task].astype(
np.float32) / 255)
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features['original_image'][0],
ref_features['image'][num_condition_samples_per_task:inner_batch_size]
.astype(np.float32) / 255)
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features['action'][0],
ref_features['action'][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features['action'][0],
ref_features['action']
[num_condition_samples_per_task:inner_batch_size])
# The labels and the condition labels have to have the same keys.
self.assertEqual(
list(np_raw_meta_features.condition.labels.keys()),
list(np_raw_meta_labels.keys()))
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.labels[label_name][0],
ref_labels[label_name][:num_condition_samples_per_task])
np.testing.assert_array_almost_equal(
np_raw_meta_labels[label_name][0], ref_labels[label_name]
[num_condition_samples_per_task:inner_batch_size])
def test_create_metaexample_spec(self):
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE,
dtype=tf.uint8,
is_optional=False,
data_format='jpeg',
name='state/image')
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
num_samples_in_task = 3
metaexample_spec = preprocessors.create_metaexample_spec(
feature_spec, num_samples_in_task, 'condition')
flat_feature_spec = utils.flatten_spec_structure(feature_spec)
self.assertLen(
list(metaexample_spec.keys()),
num_samples_in_task * len(list(flat_feature_spec.keys())))
for key in flat_feature_spec:
for i in range(num_samples_in_task):
meta_example_key = six.ensure_str(key) + '/{:d}'.format(i)
self.assertIn(meta_example_key, list(metaexample_spec.keys()))
self.assertTrue(
six.ensure_str(metaexample_spec[meta_example_key].name).startswith(
'condition_ep'))
def test_stack_intratask_episodes(self):
feature_spec = TSpec()
feature_spec.image = utils.ExtendedTensorSpec(
shape=_DEFAULT_IN_IMAGE_SHAPE,
dtype=tf.uint8,
is_optional=False,
data_format='jpeg',
name='state/image')
feature_spec.action = utils.ExtendedTensorSpec(
shape=_DEFAULT_ACTION_SHAPE,
dtype=tf.float32,
is_optional=False,
name='state/action')
batch_size = 2
num_samples_in_task = 3
metaexample_spec = preprocessors.create_metaexample_spec(
feature_spec, num_samples_in_task, 'condition')
tensors = utils.make_random_numpy(metaexample_spec, batch_size)
out_tensors = preprocessors.stack_intra_task_episodes(
tensors, num_samples_in_task)
self.assertEqual(
out_tensors.image.shape,
(batch_size, num_samples_in_task) + _DEFAULT_IN_IMAGE_SHAPE)
self.assertEqual(
out_tensors.action.shape,
(batch_size, num_samples_in_task) + _DEFAULT_ACTION_SHAPE)
@parameterized.parameters((1, 1, 1), (1, 2, 2), (2, 1, 2), (1, 2, 3),
(2, 1, 3), (2, 3, 1))
def test_meta_example_preprocess(
self,
num_condition_samples_per_task,
num_inference_samples_per_task,
outer_batch_size):
base_preprocessor = MockBasePreprocessor()
meta_example_preprocessor = preprocessors.FixedLenMetaExamplePreprocessor(
base_preprocessor=base_preprocessor,
num_condition_samples_per_task=num_condition_samples_per_task,
num_inference_samples_per_task=num_inference_samples_per_task)
mock_tensors = self._create_mock_tensors(
meta_example_preprocessor, outer_batch_size)
with self.session() as sess:
dataset = tf.data.Dataset.from_tensor_slices(mock_tensors)
dataset = dataset.batch(outer_batch_size, drop_remainder=True)
preprocess_fn = functools.partial(
meta_example_preprocessor.preprocess,
mode=tf.estimator.ModeKeys.TRAIN)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=1)
raw_meta_features, raw_meta_labels = (
dataset.make_one_shot_iterator().get_next())
np_raw_meta_features, np_raw_meta_labels = sess.run(
[raw_meta_features, raw_meta_labels])
ref_features, ref_labels = mock_tensors
self.assertEqual(
list(np_raw_meta_features.condition.features.keys()),
list(np_raw_meta_features.inference.features.keys()))
# The labels and the condition labels have to have the same keys.
self.assertEqual(
list(np_raw_meta_features.condition.labels.keys()),
list(np_raw_meta_labels.keys()))
# The image has been resized. Therefore, we ensure that its shape is
# correct. Note, we have to strip the outer and inner batch dimensions.
self.assertEqual(
np_raw_meta_features.condition.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
self.assertEqual(
np_raw_meta_features.inference.features.image.shape[2:],
_DEFAULT_OUT_IMAGE_SHAPE)
for i in range(num_condition_samples_per_task):
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.features['action'][:, i, Ellipsis],
ref_features['condition/features/action/{:d}'.format(i)])
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_features.condition.labels[label_name][:, i, Ellipsis],
ref_features['condition/labels/{:s}/{:d}'.format(
label_name, i)])
for i in range(num_inference_samples_per_task):
np.testing.assert_array_almost_equal(
np_raw_meta_features.inference.features['action'][:, i, Ellipsis],
ref_features['inference/features/action/{:d}'.format(i)])
for label_name in np_raw_meta_features.condition.labels.keys():
np.testing.assert_array_almost_equal(
np_raw_meta_labels[label_name][:, i, Ellipsis],
ref_labels[six.ensure_str(label_name) + '/{:d}'.format(i)])
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os.path
import eventlet
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_cfg
LOG = logging.getLogger(__name__)
agent_cfg.register_external_process_opts()
agent_cfg.register_process_monitor_opts(cfg.CONF)
class MonitoredProcess(object, metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def active(self):
"""Boolean representing the running state of the process."""
@abc.abstractmethod
def enable(self):
"""Enable the service, or respawn the process."""
class ProcessManager(MonitoredProcess):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, namespace=None, service=None,
pids_path=None, default_cmd_callback=None,
cmd_addl_env=None, pid_file=None, run_as_root=False,
custom_reload_callback=None):
self.conf = conf
self.uuid = uuid
self.namespace = namespace
self.default_cmd_callback = default_cmd_callback
self.cmd_addl_env = cmd_addl_env
self.pids_path = pids_path or self.conf.external_pids
self.pid_file = pid_file
self.run_as_root = run_as_root or self.namespace is not None
self.custom_reload_callback = custom_reload_callback
self.kill_scripts_path = cfg.CONF.AGENT.kill_scripts_path
if service:
self.service_pid_fname = 'pid.' + service
self.service = service
else:
self.service_pid_fname = 'pid'
self.service = 'default-service'
fileutils.ensure_tree(os.path.dirname(self.get_pid_file_name()),
mode=0o755)
def enable(self, cmd_callback=None, reload_cfg=False, ensure_active=False):
if not self.active:
if not cmd_callback:
cmd_callback = self.default_cmd_callback
cmd = cmd_callback(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root)
elif reload_cfg:
self.reload_cfg()
if ensure_active:
common_utils.wait_until_true(lambda: self.active)
def reload_cfg(self):
if self.custom_reload_callback:
self.disable(get_stop_command=self.custom_reload_callback)
else:
self.disable('HUP')
def disable(self, sig='9', get_stop_command=None):
pid = self.pid
if self.active:
if get_stop_command:
cmd = get_stop_command(self.get_pid_file_name())
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
run_as_root=self.run_as_root,
privsep_exec=True)
else:
cmd = self.get_kill_cmd(sig, pid)
utils.execute(cmd, run_as_root=self.run_as_root,
privsep_exec=True)
# In the case of shutting down, remove the pid file
if sig == '9':
utils.delete_if_exists(self.get_pid_file_name(),
run_as_root=self.run_as_root)
elif pid:
LOG.debug('%(service)s process for %(uuid)s pid %(pid)d is stale, '
'ignoring signal %(signal)s',
{'service': self.service, 'uuid': self.uuid,
'pid': pid, 'signal': sig})
else:
LOG.debug('No %(service)s process started for %(uuid)s',
{'service': self.service, 'uuid': self.uuid})
def get_kill_cmd(self, sig, pid):
if self.kill_scripts_path:
kill_file = "%s-kill" % self.service
kill_file_path = os.path.join(self.kill_scripts_path, kill_file)
if os.path.isfile(kill_file_path):
return [kill_file_path, sig, pid]
return ['kill', '-%s' % (sig), pid]
def get_pid_file_name(self):
"""Returns the file name for a given kind of config file."""
if self.pid_file:
return self.pid_file
else:
return utils.get_conf_file_name(self.pids_path,
self.uuid,
self.service_pid_fname)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_file(self.get_pid_file_name(), int)
@property
def active(self):
cmdline = self.cmdline
return self.uuid in cmdline if cmdline else False
@property
def cmdline(self):
pid = self.pid
if not pid:
return
try:
return ' '.join(psutil.Process(pid).cmdline())
except (psutil.NoSuchProcess, psutil.AccessDenied):
return
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
def __init__(self, config, resource_type):
"""Handle multiple process managers and watch over all of them.
:param config: oslo config object with the agent configuration.
:type config: oslo_config.ConfigOpts
:param resource_type: can be dhcp, router, etc.
:type resource_type: str
"""
self._config = config
self._resource_type = resource_type
self._monitored_processes = {}
if self._config.AGENT.check_child_processes_interval:
self._spawn_checking_thread()
def register(self, uuid, service_name, monitored_process):
"""Start monitoring a process.
The given monitored_process will be tied to it's uuid+service_name
replacing the old one if it existed already.
The monitored_process should be enabled before registration,
otherwise ProcessMonitor could try to enable the process itself,
which could lead to double enable and if unlucky enough, two processes
running, and also errors in the logs.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
:param monitored_process: MonitoredProcess we want to monitor.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes[service_id] = monitored_process
def unregister(self, uuid, service_name):
"""Stop monitoring a process.
The uuid+service_name will be removed from the monitored processes.
The service must be disabled **after** unregistering, otherwise if
process monitor checks after you disable the process, and before
you unregister it, the process will be respawned, and left orphaned
into the system.
:param uuid: An ID of the resource for which the process is running.
:param service_name: A logical service name for this process monitor,
so the same uuid provided via process manager
can reference several different services.
"""
service_id = ServiceId(uuid, service_name)
self._monitored_processes.pop(service_id, None)
def stop(self):
"""Stop the process monitoring.
This method will stop the monitoring thread, but no monitored
process will be stopped.
"""
self._monitor_processes = False
def _spawn_checking_thread(self):
self._monitor_processes = True
eventlet.spawn(self._periodic_checking_thread)
@lockutils.synchronized("_check_child_processes")
def _check_child_processes(self):
# we build the list of keys before iterating in the loop to cover
# the case where other threads add or remove items from the
# dictionary which otherwise will cause a RuntimeError
for service_id in list(self._monitored_processes):
pm = self._monitored_processes.get(service_id)
if pm and not pm.active:
LOG.error("%(service)s for %(resource_type)s "
"with uuid %(uuid)s not found. "
"The process should not have died",
{'service': service_id.service,
'resource_type': self._resource_type,
'uuid': service_id.uuid})
self._execute_action(service_id)
eventlet.sleep(0)
def _periodic_checking_thread(self):
while self._monitor_processes:
eventlet.sleep(self._config.AGENT.check_child_processes_interval)
eventlet.spawn(self._check_child_processes)
def _execute_action(self, service_id):
action = self._config.AGENT.check_child_processes_action
action_function = getattr(self, "_%s_action" % action)
action_function(service_id)
def _respawn_action(self, service_id):
LOG.warning("Respawning %(service)s for uuid %(uuid)s",
{'service': service_id.service,
'uuid': service_id.uuid})
self._monitored_processes[service_id].enable()
def _exit_action(self, service_id):
LOG.error("Exiting agent as programmed in check_child_processes_"
"actions")
self._exit_handler(service_id.uuid, service_id.service)
def _exit_handler(self, uuid, service):
"""This is an exit handler for the ProcessMonitor.
It will be called if the administrator configured the exit action in
check_child_processes_actions, and one of our external processes die
unexpectedly.
"""
LOG.error("Exiting agent because of a malfunction with the "
"%(service)s process identified by uuid %(uuid)s",
{'service': service, 'uuid': uuid})
raise SystemExit(1)
|
|
#
# Filename: IBMarketingCloud.py
# 6/12/2016 1:56 AM
#
#
__author__ = 'measley'
import ConfigParser
import json
import requests
from lxml import etree
from lxml import objectify
from lxml.etree import Element
from lxml.etree import SubElement
class IBMCloud(object):
# constants
AUTH_LEGACY = 1
AUTH_OAUTH = 2
# variables will be set
# - pod
# - access_token
# - auth_method
# - jsessionid
# - URL
def __init__(self):
self.pod = None
self.access_token = None
self.auth_method = None
self.jsessionid = None
self.URL = None
self.authenticated = False
def login(self, auth_method=AUTH_LEGACY, config_file=None):
self.auth_method = auth_method
config = ConfigParser.ConfigParser()
config.read(config_file)
if self.auth_method is self.AUTH_LEGACY:
pod = config.get('MARKETING_CLOUD_LEGACY', 'Pod')
self.pod = pod
self.URL = r"https://api{}.silverpop.com/XMLAPI".format(pod)
envelope = Element("Envelope")
body = SubElement(envelope, "Body")
login = SubElement(body, "Login")
username = SubElement(login, "USERNAME")
username.text = config.get('MARKETING_CLOUD_LEGACY', 'Username')
password = SubElement(login, "PASSWORD")
password.text = config.get('MARKETING_CLOUD_LEGACY', 'Password')
loginXML = etree.tostring(envelope, encoding="UTF-8")
#construct paramstring
paramstring = {'xml': loginXML}
r = requests.post(self.URL, params=paramstring)
if r.status_code is 200:
ar = ApiResult(r)
root = objectify.fromstring(ar.message)
print(etree.tostring(root, pretty_print=True))
if root.Body.RESULT.SUCCESS.text == "false":
return ar
elif root.Body.RESULT.SUCCESS.text == "true":
self.jsessionid = root.Body.RESULT.SESSIONID
self.authenticated = True
return ar
else:
return False
elif self.auth_method is self.AUTH_OAUTH:
client_id = config.get('MARKETING_CLOUD_OAUTH2', 'ClientID')
client_secret = config.get('MARKETING_CLOUD_OAUTH2', 'ClientSecret')
refresh_token = config.get('MARKETING_CLOUD_OAUTH2', 'RefreshToken')
grant_type = "refresh_token"
pod = config.get('MARKETING_CLOUD_OAUTH2', 'Pod')
if None in (client_id, client_secret, refresh_token, pod):
print("Check config file.")
oauth_url = "https://api" + pod + ".silverpop.com/oauth/token"
data = {"grant_type": grant_type,
"client_id": client_id,
"client_secret": client_secret,
"refresh_token": refresh_token}
r = requests.post(oauth_url, data)
if r.status_code is 200:
d = json.loads(r.text)
self.access_token = d['access_token']
self.pod = pod
self.URL = r"https://api{}.silverpop.com/XMLAPI".format(pod)
self.auth_method = self.AUTH_OAUTH
self.authenticated = True
return d['expires_in']
else:
return False
def _runapi(self, xml=None):
if self.authenticated:
xml = xml.replace('<', '<').replace('>', '>')
if self.auth_method is self.AUTH_LEGACY:
#set legacy URL and jsession
paramstring = {"jsessionid": self.jsessionid, "xml": xml}
r = requests.post(self.URL, paramstring)
return ApiResult(r)
elif self.auth_method is self.AUTH_OAUTH:
#set oauth URL
headers = {'Authorization': "Bearer " + self.access_token}
data = {'xml': xml}
r = requests.post(self.URL, headers=headers, data=data)
return ApiResult(r)
else:
return False
def addRecipient(self, listid=None, createdfrom=None, sendautoreply=False,
updateiffound=False, allowhtml=False, visitorkey=None,
contactlists=None, syncfields=None, columns=None):
if None in (listid, createdfrom, columns):
return False
envelopeNode = Element("Envelope")
bodyNode = SubElement(envelopeNode, "Body")
addRecipientNode = SubElement(bodyNode, "AddRecipient")
listIdNode = SubElement(addRecipientNode, "LIST_ID")
listIdNode.text = str(listid)
createdfromNode = SubElement(addRecipientNode, "CREATED_FROM")
createdfromNode.text = str(createdfrom)
if sendautoreply:
sendautoreplyNode = SubElement(addRecipientNode, "SEND_AUTOREPLY")
sendautoreplyNode.text = str(sendautoreply)
if updateiffound:
updateiffoundNode = SubElement(addRecipientNode, "UPDATE_IF_FOUND")
updateiffoundNode.text = str(updateiffound)
if allowhtml:
allowhtmlNode = SubElement(addRecipientNode, "ALLOW_HTML")
allowhtmlNode.text = str(allowhtml)
if visitorkey:
visitorkeyNode = SubElement(addRecipientNode, "VISITOR_KEY")
visitorkeyNode.text = str(visitorkey)
if contactlists:
if isinstance(contactlists, list) or isinstance(contactlists, tuple):
contactlistsNode = SubElement(addRecipientNode, "CONTACT_LISTS")
clists=""
for i in len(contactlists):
clists += "<CONTACT_LIST_ID>{}</CONTACT_LIST_ID>".format(i)
contactlistsNode.text = clists
if syncfields:
if isinstance(syncfields, dict):
syncfieldsNode = SubElement(addRecipientNode, "SYNC_FIELDS")
sfields = ""
for name, value in syncfields.items():
sfields += """<SYNC_FIELD>
<NAME>{}</NAME>
<VALUE>{}</VALUE>
</SYNC_FIELD>""".format(name,value)
syncfieldsNode.text = sfields
if columns:
if isinstance(columns, dict):
columnsNode = SubElement(addRecipientNode,"COLUMN")
scolumns = ""
for name, value in columns.items():
scolumns += """<NAME>{}</NAME>
<VALUE>{}</VALUE>""".format(name, value)
columnsNode.text = scolumns
addrecipientxml = etree.tostring(envelopeNode)
return self._runapi(addrecipientxml)
def logout(self):
envelope = Element("Envelope")
body = SubElement(envelope, "Body")
login = SubElement(body, "Logout")
logoutXML = etree.tostring(envelope, encoding="UTF-8")
return self._runapi(logoutXML)
def rawRecipientDataExport(self, mailingid=None, reportid=None, campaignid=None,
listid=None, includechildren=False, eventdatestart=None,
eventdateend=None, senddatestart=None, senddateend=None,
exportformat=None, fileencoding=None, exportfilename=None,
email=None, movetoftp=False, private=False, shared=False,
sentmailings=False, sending=False, optinconfirmation=False,
profileconfirmation=False, automated=False, campaignactive=False,
campaigncompleted=False, campaigncancelled=False,
campaignscrapetemplate=False, includetestmailings=False,
alleventtypes=False, sent=False, suppressed=False, opens=False,
clicks=False, optins=False, optouts=False, forwards=False,
attachments=False, conversions=False, clickstreams=False,
hardbounces=False, softbounces=False, replyabuse=False,
replycoa=False, replyother=False, mailblocks=False,
mailingrestrictions=False, includeseeds=False,
includeforwards=False, includeinboxmonitoring=False,
codedtypefields=False, excludedeleted=False, forwardsonly=False,
returnmailingname=False, returnsubject=False,
returncrmcampaignid=False, returnprogramid=False, columns=None):
"""Raw Recipient Data Export """
envelopeNode = Element("Envelope")
bodyNode = SubElement(envelopeNode, "Body")
addRecipientNode = SubElement(bodyNode, "RawRecipientDataExport")
if mailingid:
mailingidNode = SubElement(addRecipientNode, "MAILING_ID")
mailingidNode.text = str(mailingid)
if reportid:
reportidNode = SubElement(addRecipientNode, "REPORT_ID")
reportidNode.text = str(reportid)
if campaignid:
campaignidNode = SubElement(addRecipientNode, "CAMPAIGN_ID")
campaignidNode.text = str(campaignid)
if listid:
listidNode = SubElement(addRecipientNode, "LIST_ID")
listidNode.text = str(listid)
if includechildren:
includechildrenNode = SubElement(addRecipientNode, "INCLUDE_CHILDREN")
includechildrenNode.text = str(includechildren)
class ApiResult(object):
def __init__(self, response):
self._status = response.status_code
self._message = response.text
def __str__(self):
return self._status
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import func_graph as func_graph_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable))
# TODO(edloper): Update this to just use ConcreteFunction.__call__ with the
# structured signature.
def _call_concrete_function(function, inputs):
"""Calls a restored Function with structured inputs.
This differs from `function.__call__` in that inputs and outputs are
structured and that it casts inputs to tensors if needed.
Note: this does not checks that non-tensor inputs match. That should be
done before via `_concrete_function_callable_with`.
Args:
function: ConcreteFunction to call.
inputs: Structured inputs compatible with
`function.graph.structured_input_signature`.
Returns:
The structured function output.
"""
expected_structure = function.graph.structured_input_signature
flatten_inputs = nest.flatten_up_to(
expected_structure, inputs, expand_composites=True)
flatten_expected = nest.flatten(expected_structure, expand_composites=True)
tensor_inputs = []
for arg, expected in zip(flatten_inputs, flatten_expected):
if isinstance(expected, tensor_spec.TensorSpec):
tensor_inputs.append(
ops.convert_to_tensor(arg, dtype_hint=expected.dtype))
result = function._call_flat(tensor_inputs, function._captured_inputs) # pylint: disable=protected-access
if isinstance(result, ops.Operation):
return None
return result
def _try_convert_to_tensor_spec(arg, dtype_hint):
"""Returns None or TensorSpec obtained if `arg` is converted to tensor."""
try:
# Note: try conversion in a FuncGraph to avoid polluting current context.
with func_graph_lib.FuncGraph(name="guess_conversion").as_default():
result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint)
return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype)
except (TypeError, ValueError):
return None
def _concrete_function_callable_with(function, inputs, allow_conversion):
"""Returns whether concrete `function` can be called with `inputs`."""
expected_structure = function.graph.structured_input_signature
try:
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
except (TypeError, ValueError):
return False
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
if allow_conversion:
arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
return False
if arg.dtype != expected.dtype:
return False
if not expected.shape.is_compatible_with(arg.shape):
return False
elif isinstance(expected, type_spec.TypeSpec):
if not expected.is_compatible_with(arg):
return False
elif _is_tensor(arg):
if id(arg) != id(expected):
return False
else:
if arg != expected:
return False
return True
def _deserialize_function_spec_as_nonmethod(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
# Convert a method function into a non method.
if function_spec_proto.is_method:
if not typeless_fullargspec.args:
raise NotImplementedError(
"Missing support to deserialize a method function without a named "
"'self' argument.")
args = typeless_fullargspec.args[1:]
else:
args = typeless_fullargspec.args
fullargspec = tf_inspect.FullArgSpec(
args=args,
varargs=typeless_fullargspec.varargs,
varkw=typeless_fullargspec.varkw,
defaults=typeless_fullargspec.defaults,
kwonlyargs=typeless_fullargspec.kwonlyargs,
kwonlydefaults=typeless_fullargspec.kwonlydefaults,
annotations=typeless_fullargspec.annotations)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
# See `tf.function` and the JitCompile proto for details.
jit_compile = {
saved_object_graph_pb2.FunctionSpec.JitCompile.DEFAULT: None,
saved_object_graph_pb2.FunctionSpec.JitCompile.ON: True,
saved_object_graph_pb2.FunctionSpec.JitCompile.OFF: False,
}.get(function_spec_proto.jit_compile)
return function_lib.FunctionSpec(fullargspec=fullargspec,
is_method=False,
input_signature=input_signature,
jit_compile=jit_compile)
# TODO(allenl): The fact that we can't derive ConcreteFunction calling
# conventions from the serialized input spec right now is unfortunate. Merging
# these would be good, maybe by adding TensorSpec names to cache keys so renamed
# keyword arguments would yield different ConcreteFunctions.
def setup_bare_concrete_function(saved_bare_concrete_function,
concrete_functions):
"""Makes a restored bare concrete function callable."""
concrete_function = concrete_functions[
saved_bare_concrete_function.concrete_function_name]
# pylint: disable=protected-access
concrete_function._arg_keywords = (
saved_bare_concrete_function.argument_keywords)
concrete_function._num_positional_args = (
saved_bare_concrete_function.allowed_positional_arguments)
if saved_bare_concrete_function.HasField("function_spec"):
coder = nested_structure_coder.StructureCoder()
function_spec = _deserialize_function_spec_as_nonmethod(
saved_bare_concrete_function.function_spec,
coder)
concrete_function._set_function_spec(function_spec)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False,
jit_compile=function_spec.jit_compile)
self.concrete_functions = concrete_functions
self._function_spec = function_spec
# Prevent RestoredFunction from spamming users with frequent tracing
# warnings.
self._omit_frequent_tracing_warning = True
@property
def _run_functions_eagerly(self):
# We do not have access to the original python function, and thus, we
# cannot meaningfully do anything but call our concrete function graphs
# under the hood.
#
# Attempting to call our bespoke python function (i.e.
# `restored_function_body`) will work so long as the user passes in all
# required and optional arguments. If an optional argument is missing,
# however, the call will break. For this reason, we instead skip the
# eager call path altogether if a user has enabled eager function execution
# via `tf.config.run_functions_eagerly`.
return False
def _list_all_concrete_functions_for_serialization(self):
return self.concrete_functions
def _defun_with_scope(self, scope):
func = super(RestoredFunction, self)._defun_with_scope(scope)
func._function_spec = self._function_spec # pylint: disable=protected-access
return func
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
As a side effect of this function, the `FunctionSpec` from
`saved_function` is added to each `ConcreteFunction` in this map.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.StructureCoder()
# Note: handling method functions is tricky since make_decorator does not
# allows control of "ismethod". Additionally since restored functions do
# not behave as methods i.e. they always use the same captured tensors
# independent of the object they are bound to, there is little value on
# propagating that correctly.
#
# Ideally this conversion should happen at serialization time. But since
# there are SavedModels which have "ismethod" populated and have an extra
# argument that they expect to be ignored, we do it at deserialization.
function_spec = _deserialize_function_spec_as_nonmethod(
saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function or raises an error if no matching function."""
if not saved_function.concrete_functions:
raise ValueError("Found zero restored functions for caller function.")
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return "Positional arguments ({} total):\n * {}".format(
len(positional), "\n * ".join(str(a) for a in positional))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append(
"Option {}:\n {}\n Keyword arguments: {}"
.format(index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(
"Could not find matching function to call loaded from the SavedModel. "
"Got:\n {}\n Keyword arguments: {}\n\nExpected "
"these arguments to match one of the following {} option(s):\n\n{}"
.format(_pretty_format_positional(args), kwargs,
len(saved_function.concrete_functions),
"\n\n".join(signature_descriptions)))
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
for cf in concrete_function_objects:
cf._set_function_spec(function_spec) # pylint: disable=protected-access
restored_function = RestoredFunction(
restored_function_body,
restored_function_body.__name__,
function_spec,
concrete_function_objects)
return tf_decorator.make_decorator(
restored_function_body,
restored_function,
decorator_argspec=function_spec.fullargspec)
def load_function_def_library(library, load_shared_name_suffix=None):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
load_shared_name_suffix: If specified, used to uniquify shared
names. Otherwise, a unique name is generated.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
library_function_names = set(fdef.signature.name for fdef in library.function)
functions = {}
renamed_functions = {}
# Our graph building code currently requires functions to be registered with
# some tf.Graph in order to import functions using the
# op-name-is-function-name calling convention. To avoid leaking memory into
# the global default graph when executing eagerly, we create a temporary
# Graph.
#
# TODO(allenl): Make this Graph creation unnecessary when executing eagerly by
# fixing function_def_to_graph_def.
if ops.executing_eagerly_outside_functions():
graph = ops.Graph()
else:
graph = ops.get_default_graph()
if load_shared_name_suffix is None:
load_shared_name_suffix = "_load_{}".format(ops.uid())
for fdef in _sort_function_defs(library, library_function_names):
copy = _fix_fdef(fdef, functions, load_shared_name_suffix)
# There is no need to copy all functions into the function def graph. It
# leads to a O(n^2) increase of memory when importing functions and the
# extra function definitions are a no-op since they already imported as a
# function before and passed in explicitly (due to the topologic sort
# import).
with graph.as_default():
func_graph = function_def_lib.function_def_to_graph(copy)
_restore_gradient_functions(func_graph, renamed_functions)
for dep in _list_function_deps(fdef, library_function_names):
functions[dep].add_to_graph(func_graph)
# We do not initialize the new ConcreteFunction's function_spec and/or
# arg_keywords here (which are used to parse the structured and flat
# signatures, respectively). ConcreteFunction that are part of a saved
# function is set up later by recreate_function(); and bare ConcreteFunction
# is set up by by setup_bare_concrete_function().
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph(graph)
functions[fdef.signature.name] = func
renamed_functions[func.name] = func
if any(op.type == "TRTEngineOp" for op in func_graph.get_operations()):
# TODO(b/150708051): Remove this hack once TensorRT SavedModel integration
# is fixed. Currently it's leaking memory to maintain bug compatibility
# with previous behavior.
func.add_to_graph(ops.get_default_graph())
return functions
def _restore_gradient_functions(func_graph, renamed_functions):
"""Populate function op's _gradient_function with default gradient."""
for op in func_graph.get_operations():
# TODO(andresp): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a custom
# one.
if op.type in ["StatefulPartitionedCall", "PartitionedCall"]:
function = renamed_functions[compat.as_bytes(
op.node_def.attr["f"].func.name)]
op._gradient_function = function._get_gradient_function() # pylint: disable=protected-access
def _sort_function_defs(library, library_function_names):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fdef in library.function:
for dep in _list_function_deps(fdef, library_function_names):
edges[dep].append(fdef.signature.name)
in_count[fdef.signature.name] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic-dependency between functions. ",
"Could not resolve %r." % (failed_to_resolve,))
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def _check_op_has_custom_gradients(node_def):
"""Returns True if op has custom gradients."""
return ("_gradient_op_type" in node_def.attr and
node_def.op not in ["StatefulPartitionedCall", "PartitionedCall"])
def fix_node_def(node_def, functions, shared_name_suffix):
"""Replace functions calls and shared names in `node_def`."""
if node_def.op in functions:
node_def.op = functions[node_def.op].name
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
attr_value.func.name = functions[attr_value.func.name].name
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
fn.name = functions[fn.name].name
# Fix old table creation bug.
if node_def.op == "HashTableV2":
if ("use_node_name_sharing" not in node_def.attr or
not node_def.attr["use_node_name_sharing"].b):
node_def.attr["use_node_name_sharing"].b = True
# We are turning on node mame sharing, so have to make sure we don't
# accidentally share a table resource.
shared_name_suffix += "_{}".format(ops.uid())
# TODO(b/124205571): Avoid accidental sharing and destruction of restored
# resources. For now uniquify "shared_name" when loading functions to avoid
# sharing.
# TODO: Add regression test for b/150826922.
op_def = op_def_registry.get(node_def.op)
if op_def:
attr = next((a for a in op_def.attr if a.name == "shared_name"), None)
if attr:
shared_name = None
if "shared_name" in node_def.attr and node_def.attr["shared_name"].s:
shared_name = node_def.attr["shared_name"].s
elif attr.default_value.s:
shared_name = compat.as_bytes(attr.default_value.s)
if not shared_name:
shared_name = compat.as_bytes(node_def.name)
node_def.attr["shared_name"].s = (
shared_name + compat.as_bytes(shared_name_suffix))
def _fix_fdef(orig_fdef, functions, shared_name_suffix):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
orig_fdef: FunctionDef proto to fix. It is not modified.
functions: map from function name to a ConcreteFunction instance.
shared_name_suffix: A unique string for this load which helps to avoid
`shared_name` collisions across loads. Two functions from the same load
using the same `shared_name` still need to share, but functions from
different loads with the same `shared_name` should not.
Returns:
A fixed copy of the original FunctionDef.
"""
fdef = function_pb2.FunctionDef()
fdef.CopyFrom(orig_fdef)
contains_custom_gradients = False
for node_def in fdef.node_def:
fix_node_def(node_def, functions, shared_name_suffix)
if not contains_custom_gradients:
contains_custom_gradients = _check_op_has_custom_gradients(node_def)
if contains_custom_gradients:
logging.warning(
"Importing a function (%s) with ops with custom gradients. Will likely "
"fail if a gradient is requested.", fdef.signature.name)
fdef.signature.name = _clean_function_name(fdef.signature.name)
return fdef
def _list_function_deps(fdef, library_function_names):
"""Find functions referenced in `fdef`."""
# TODO(andresp): Recurse into list attributes and into NameAttrList attrs both
# when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
if node_def.op in library_function_names:
deps.add(node_def.op)
else:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
deps.add(fn.name)
return deps
_FUNCTION_WRAPPER_NAME_REGEX = r"^%s(.*)_\d+$" % (function_lib._INFERENCE_PREFIX
) # pylint:disable=protected-access
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name)
if match:
return match.group(1)
else:
return name
|
|
"""
This module defines the different types of terms...
"""
__all__ = [
'Node',
'Identifier',
'URIRef',
'BNode',
'Literal',
'Variable',
'Statement',
]
import logging
_LOGGER = logging.getLogger(__name__)
import base64
import re
import threading
from urlparse import urlparse, urljoin, urldefrag
from string import ascii_letters, rsplit
from random import choice
from itertools import islice
from datetime import date, time, datetime, timedelta
from time import strptime
try:
from hashlib import md5
except ImportError:
from md5 import md5
# from sys import version_info
# if version_info[0:2] > (2, 2):
# from unicodedata import normalize
# else:
# normalize = None
#
#from rdflib.syntax.xml_names import is_ncname
#from rdflib.exceptions import Error
class Node(object):
"""
A Node in the Graph.
"""
__slots__ = ()
class Identifier(Node, unicode): # we allow Identifiers to be Nodes in our Graph
"""
See http://www.w3.org/2002/07/rdf-identifer-terminology/
regarding choice of terminology.
"""
__slots__ = ()
def __new__(cls, value):
return unicode.__new__(cls, value)
class URIRef(Identifier):
"""
RDF URI Reference: http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref
"""
__slots__ = ()
def __new__(cls, value, base=None):
if base is not None:
ends_in_hash = value.endswith("#")
value = urljoin(base, value, allow_fragments=1)
if ends_in_hash:
if not value.endswith("#"):
value += "#"
#if normalize and value and value != normalize("NFC", value):
# raise Error("value must be in NFC normalized form.")
try:
rt = unicode.__new__(cls, value)
except UnicodeDecodeError:
rt = unicode.__new__(cls, value, 'utf-8')
return rt
def n3(self):
return "<%s>" % self
def concrete(self):
if "#" in self:
return URIRef("/".join(rsplit(self, "#", 1)))
else:
return self
def abstract(self):
if "#" not in self:
scheme, netloc, path, params, query, fragment = urlparse(self)
if path:
return URIRef("#".join(rsplit(self, "/", 1)))
else:
if not self.endswith("#"):
return URIRef("%s#" % self)
else:
return self
else:
return self
def defrag(self):
if "#" in self:
url, frag = urldefrag(self)
return URIRef(url)
else:
return self
def __reduce__(self):
return (URIRef, (unicode(self),))
def __getnewargs__(self):
return (unicode(self), )
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if isinstance(other, URIRef):
return unicode(self)==unicode(other)
else:
return False
def __str__(self):
return self.encode()
def __repr__(self):
if self.__class__ is URIRef:
clsName = "rdflib.term.URIRef"
else:
clsName = self.__class__.__name__
# quoting risk? drewp is not sure why this doesn't use %r
return """%s('%s')""" % (clsName, str(self))
def md5_term_hash(self):
"""a string of hex that will be the same for two URIRefs that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("U")
return d.hexdigest()
def _letter():
while True:
yield choice(ascii_letters)
def _unique_id():
"""Create a (hopefully) unique prefix"""
uid = "".join(islice(_letter(), 0, 8))
return uid
def _serial_number_generator():
i = 0
while 1:
yield i
i = i + 1
bNodeLock = threading.RLock()
class BNode(Identifier):
"""
Blank Node: http://www.w3.org/TR/rdf-concepts/#section-blank-nodes
"""
__slots__ = ()
def __new__(cls, value=None,
_sn_gen=_serial_number_generator(), _prefix=_unique_id()):
"""
# only store implementations should pass in a value
"""
if value==None:
# so that BNode values do not
# collide with ones created with a different instance of this module
# at some other time.
bNodeLock.acquire()
node_id = _sn_gen.next()
bNodeLock.release()
value = "%s%s" % (_prefix, node_id)
else:
# TODO: check that value falls within acceptable bnode value range
# for RDF/XML needs to be something that can be serialzed
# as a nodeID for N3 ?? Unless we require these
# constraints be enforced elsewhere?
pass #assert is_ncname(unicode(value)), "BNode identifiers
#must be valid NCNames"
return Identifier.__new__(cls, value)
def n3(self):
return "_:%s" % self
def __getnewargs__(self):
return (unicode(self), )
def __reduce__(self):
return (BNode, (unicode(self),))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
"""
>>> BNode("foo")==None
False
>>> BNode("foo")==URIRef("foo")
False
>>> URIRef("foo")==BNode("foo")
False
>>> BNode("foo")!=URIRef("foo")
True
>>> URIRef("foo")!=BNode("foo")
True
"""
if isinstance(other, BNode):
return unicode(self)==unicode(other)
else:
return False
def __str__(self):
return self.encode()
def __repr__(self):
if self.__class__ is BNode:
clsName = "rdflib.term.BNode"
else:
clsName = self.__class__.__name__
return """%s('%s')""" % (clsName, str(self))
def md5_term_hash(self):
"""a string of hex that will be the same for two BNodes that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("B")
return d.hexdigest()
class Literal(Identifier):
"""
RDF Literal: http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal
>>> Literal(1).toPython()
1L
>>> cmp(Literal("adsf"), 1)
1
>>> from rdflib.namespace import XSD
>>> lit2006 = Literal('2006-01-01',datatype=XSD.date)
>>> lit2006.toPython()
datetime.date(2006, 1, 1)
>>> lit2006 < Literal('2007-01-01',datatype=XSD.date)
True
>>> Literal(datetime.utcnow()).datatype
rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#dateTime')
>>> oneInt = Literal(1)
>>> twoInt = Literal(2)
>>> twoInt < oneInt
False
>>> Literal('1') < Literal(1)
False
>>> Literal('1') < Literal('1')
False
>>> Literal(1) < Literal('1')
True
>>> Literal(1) < Literal(2.0)
True
>>> Literal(1) < URIRef('foo')
True
>>> Literal(1) < 2.0
True
>>> Literal(1) < object
True
>>> lit2006 < "2007"
True
>>> "2005" < lit2006
True
"""
__slots__ = ("language", "datatype", "_cmp_value")
def __new__(cls, value, lang=None, datatype=None):
if lang is not None and datatype is not None:
raise TypeError("A Literal can only have one of lang or datatype, "
"per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal")
if datatype:
lang = None
else:
value, datatype = _castPythonToLiteral(value)
if datatype:
lang = None
if datatype:
datatype = URIRef(datatype)
try:
inst = unicode.__new__(cls, value)
except UnicodeDecodeError:
inst = unicode.__new__(cls, value, 'utf-8')
inst.language = lang
inst.datatype = datatype
inst._cmp_value = inst._toCompareValue()
return inst
def __reduce__(self):
return (Literal, (unicode(self), self.language, self.datatype),)
def __getstate__(self):
return (None, dict(language=self.language, datatype=self.datatype))
def __setstate__(self, arg):
_, d = arg
self.language = d["language"]
self.datatype = d["datatype"]
def __add__(self, val):
"""
>>> Literal(1) + 1
2L
>>> Literal("1") + "1"
rdflib.term.Literal(u'11')
"""
py = self.toPython()
if isinstance(py, Literal):
s = super(Literal, self).__add__(val)
return Literal(s, self.language, self.datatype)
else:
return py + val
def __lt__(self, other):
"""
>>> from rdflib.namespace import XSD
>>> Literal("YXNkZg==", datatype=XSD[u'base64Binary']) < "foo"
True
>>> u"\xfe" < Literal(u"foo")
False
>>> Literal(base64.encodestring(u"\xfe".encode("utf-8")), datatype=URIRef("http://www.w3.org/2001/XMLSchema#base64Binary")) < u"foo"
False
"""
if other is None:
return False # Nothing is less than None
try:
return self._cmp_value < other
except TypeError, te:
return unicode(self._cmp_value) < other
except UnicodeDecodeError, ue:
if isinstance(self._cmp_value, str):
return self._cmp_value < other.encode("utf-8")
else:
raise ue
def __le__(self, other):
"""
>>> from rdflib.namespace import XSD
>>> Literal('2007-01-01T10:00:00', datatype=XSD.dateTime) <= Literal('2007-01-01T10:00:00', datatype=XSD.dateTime)
True
"""
if other is None:
return False
if self==other:
return True
else:
return self < other
def __gt__(self, other):
if other is None:
return True # Everything is greater than None
try:
return self._cmp_value > other
except TypeError, te:
return unicode(self._cmp_value) > other
except UnicodeDecodeError, ue:
if isinstance(self._cmp_value, str):
return self._cmp_value > other.encode("utf-8")
else:
raise ue
def __ge__(self, other):
if other is None:
return False
if self==other:
return True
else:
return self > other
def __ne__(self, other):
"""
Overriden to ensure property result for comparisons with None via !=.
Routes all other such != and <> comparisons to __eq__
>>> Literal('') != None
True
>>> Literal('2') <> Literal('2')
False
"""
return not self.__eq__(other)
def __hash__(self):
"""
>>> from rdflib.namespace import XSD
>>> a = {Literal('1', datatype=XSD.integer):'one'}
>>> Literal('1', datatype=XSD.double) in a
False
"Called for the key object for dictionary operations,
and by the built-in function hash(). Should return
a 32-bit integer usable as a hash value for
dictionary operations. The only required property
is that objects which compare equal have the same
hash value; it is advised to somehow mix together
(e.g., using exclusive or) the hash values for the
components of the object that also play a part in
comparison of objects." -- 3.4.1 Basic customization (Python)
"Two literals are equal if and only if all of the following hold:
* The strings of the two lexical forms compare equal, character by character.
* Either both or neither have language tags.
* The language tags, if any, compare equal.
* Either both or neither have datatype URIs.
* The two datatype URIs, if any, compare equal, character by character."
-- 6.5.1 Literal Equality (RDF: Concepts and Abstract Syntax)
"""
return Identifier.__hash__(self) ^ hash(self.language) ^ hash(self.datatype)
def __eq__(self, other):
"""
>>> f = URIRef("foo")
>>> f is None or f == ''
False
>>> Literal("1", datatype=URIRef("foo")) == Literal("1", datatype=URIRef("foo"))
True
>>> Literal("1", datatype=URIRef("foo")) == Literal("2", datatype=URIRef("foo"))
False
>>> Literal("1", datatype=URIRef("foo")) == "asdf"
False
>>> from rdflib.namespace import XSD
>>> Literal('2007-01-01', datatype=XSD.date) == Literal('2007-01-01', datatype=XSD.date)
True
>>> Literal('2007-01-01', datatype=XSD.date) == date(2007, 1, 1)
True
>>> oneInt = Literal(1)
>>> oneNoDtype = Literal('1')
>>> oneInt == oneNoDtype
False
>>> Literal("1", XSD[u'string']) == Literal("1", XSD[u'string'])
True
>>> Literal("one", lang="en") == Literal("one", lang="en")
True
>>> Literal("hast", lang='en') == Literal("hast", lang='de')
False
>>> oneInt == Literal(1)
True
>>> oneFloat = Literal(1.0)
>>> oneInt == oneFloat
True
>>> oneInt == 1
True
"""
if other is None:
return False
if isinstance(other, Literal):
return self._cmp_value == other._cmp_value
elif isinstance(other, basestring):
return unicode(self) == other
else:
return self._cmp_value == other
def n3(self):
r'''
Returns a representation in the N3 format.
Examples::
>>> Literal("foo").n3()
u'"foo"'
Strings with newlines or triple-quotes::
>>> Literal("foo\nbar").n3()
u'"""foo\nbar"""'
>>> Literal("''\'").n3()
u'"\'\'\'"'
>>> Literal('"""').n3()
u'"\\"\\"\\""'
Language::
>>> Literal("hello", lang="en").n3()
u'"hello"@en'
Datatypes::
>>> Literal(1).n3()
u'"1"^^<http://www.w3.org/2001/XMLSchema#integer>'
>>> Literal(1, lang="en").n3()
u'"1"^^<http://www.w3.org/2001/XMLSchema#integer>'
>>> Literal(1.0).n3()
u'"1.0"^^<http://www.w3.org/2001/XMLSchema#float>'
Datatype and language isn't allowed (datatype takes precedence)::
>>> Literal(True).n3()
u'"true"^^<http://www.w3.org/2001/XMLSchema#boolean>'
Custom datatype::
>>> footype = URIRef("http://example.org/ns#foo")
>>> Literal("1", datatype=footype).n3()
u'"1"^^<http://example.org/ns#foo>'
'''
return self._literal_n3()
def _literal_n3(self, use_plain=False, qname_callback=None):
'''
Using plain literal (shorthand) output::
>>> Literal(1)._literal_n3(use_plain=True)
u'1'
>>> Literal(1.0)._literal_n3(use_plain=True)
u'1.0'
>>> from rdflib.namespace import XSD
>>> Literal("foo", datatype=XSD.string)._literal_n3(
... use_plain=True)
u'"foo"^^<http://www.w3.org/2001/XMLSchema#string>'
>>> Literal(True)._literal_n3(use_plain=True)
u'true'
>>> Literal(False)._literal_n3(use_plain=True)
u'false'
Using callback for datatype QNames::
>>> Literal(1)._literal_n3(
... qname_callback=lambda uri: u"xsd:integer")
u'"1"^^xsd:integer'
'''
if use_plain and self.datatype in _PLAIN_LITERAL_TYPES:
try:
self.toPython() # check validity
return '%s' % self
except ValueError:
pass # if it's in, we let it out?
encoded = self._quote_encode()
datatype = self.datatype
quoted_dt = None
if datatype:
if qname_callback:
quoted_dt = qname_callback(datatype)
if not quoted_dt:
quoted_dt = "<%s>" % datatype
language = self.language
if language:
if datatype:
# TODO: this isn't valid RDF (it's datatype XOR language)
return '%s@%s^^%s' % (encoded, language, quoted_dt)
return '%s@%s' % (encoded, language)
elif datatype:
return '%s^^%s' % (encoded, quoted_dt)
else:
return '%s' % encoded
def _quote_encode(self):
# This simpler encoding doesn't work; a newline gets encoded as "\\n",
# which is ok in sourcecode, but we want "\n".
#encoded = self.encode('unicode-escape').replace(
# '\\', '\\\\').replace('"','\\"')
#encoded = self.replace.replace('\\', '\\\\').replace('"','\\"')
# NOTE: Could in theory chose quotes based on quotes appearing in the
# string, i.e. '"' and "'", but N3/turtle doesn't allow "'"(?).
# which is nicer?
# if self.find("\"")!=-1 or self.find("'")!=-1 or self.find("\n")!=-1:
if "\n" in self:
# Triple quote this string.
encoded = self.replace('\\', '\\\\')
if '"""' in self:
# is this ok?
encoded = encoded.replace('"""','\\"""')
if encoded.endswith('"'):
encoded = encoded[:-1] + "\\\""
return '"""%s"""' % encoded
else:
return '"%s"' % self.replace('\n','\\n').replace('\\', '\\\\'
).replace('"', '\\"')
def __str__(self):
return self.encode()
def __repr__(self):
args = [super(Literal, self).__repr__()]
if self.language is not None:
args.append("lang=%s" % repr(self.language))
if self.datatype is not None:
args.append("datatype=%s" % repr(self.datatype))
if self.__class__ == Literal:
clsName = "rdflib.term.Literal"
else:
clsName = self.__class__.__name__
return """%s(%s)""" % (clsName, ", ".join(args))
def toPython(self):
"""
Returns an appropriate python datatype derived from this RDF Literal
"""
convFunc = _toPythonMapping.get(self.datatype, None)
if convFunc:
rt = convFunc(self)
else:
rt = self
return rt
def _toCompareValue(self):
try:
rt = self.toPython()
except Exception, e:
_LOGGER.warning("could not convert %s to a Python datatype" %
repr(self))
rt = self
if rt is self:
if self.language is None and self.datatype is None:
return unicode(rt)
else:
return (unicode(rt), rt.datatype, rt.language)
return rt
def md5_term_hash(self):
"""a string of hex that will be the same for two Literals that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("L")
return d.hexdigest()
_XSD_PFX = 'http://www.w3.org/2001/XMLSchema#'
_PLAIN_LITERAL_TYPES = (
URIRef(_XSD_PFX+'integer'),
URIRef(_XSD_PFX+'float'),
#XSD.decimal, XSD.double, # TODO: "subsumed" by float...
URIRef(_XSD_PFX+'boolean'),
)
def _castPythonToLiteral(obj):
"""
Casts a python datatype to a tuple of the lexical value and a
datatype URI (or None)
"""
for pType,(castFunc,dType) in _PythonToXSD:
if isinstance(obj, pType):
if castFunc:
return castFunc(obj), dType
elif dType:
return obj, dType
else:
return obj, None
return obj, None # TODO: is this right for the fall through case?
# Mappings from Python types to XSD datatypes and back (burrowed from sparta)
# datetime instances are also instances of date... so we need to order these.
_PythonToXSD = [
(basestring, (None, None)),
(float , (None, URIRef(_XSD_PFX+'float'))),
(bool , (lambda i:str(i).lower(), URIRef(_XSD_PFX+'boolean'))),
(int , (None, URIRef(_XSD_PFX+'integer'))),
(long , (None, URIRef(_XSD_PFX+'long'))),
(datetime , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'dateTime'))),
(date , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'date'))),
(time , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'time'))),
]
def _strToTime(v) :
return strptime(v, "%H:%M:%S")
def _strToDate(v) :
tstr = strptime(v, "%Y-%m-%d")
return date(tstr.tm_year, tstr.tm_mon, tstr.tm_mday)
def _strToDateTime(v) :
"""
Attempt to cast to datetime, or just return the string (otherwise)
"""
try:
tstr = strptime(v, "%Y-%m-%dT%H:%M:%S")
except:
try:
tstr = strptime(v, "%Y-%m-%dT%H:%M:%SZ")
except:
try:
tstr = strptime(v, "%Y-%m-%dT%H:%M:%S%Z")
except:
try:
# %f only works in python 2.6
# in 2.5 a ValueError will be raised, and we still return
# just the string
return datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%f")
except:
try:
# %f only works in python 2.6
return datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
try:
# %f only works in python 2.6
# HACK split off the timezone offset
# works for "2011-01-16T19:39:18.239743+01:00"
m = re.match(r'(.*)([-+])(\d{2}):(\d{2})$',
v).groups()
d = datetime.strptime(m[0], "%Y-%m-%dT%H:%M:%S.%f")
t = timedelta(hours=int(m[2]), seconds=int(m[3]))
if m[1] == '+':
d += t
else:
d -= t
return d
except:
return v
return datetime(tstr.tm_year, tstr.tm_mon, tstr.tm_mday,
tstr.tm_hour, tstr.tm_min, tstr.tm_sec)
XSDToPython = {
URIRef(_XSD_PFX+'time') : _strToTime,
URIRef(_XSD_PFX+'date') : _strToDate,
URIRef(_XSD_PFX+'dateTime') : _strToDateTime,
URIRef(_XSD_PFX+'string') : None,
URIRef(_XSD_PFX+'normalizedString') : None,
URIRef(_XSD_PFX+'token') : None,
URIRef(_XSD_PFX+'language') : None,
URIRef(_XSD_PFX+'boolean') : lambda i:i.lower() in ['1','true'],
URIRef(_XSD_PFX+'decimal') : float,
URIRef(_XSD_PFX+'integer') : long,
URIRef(_XSD_PFX+'nonPositiveInteger') : int,
URIRef(_XSD_PFX+'long') : long,
URIRef(_XSD_PFX+'nonNegativeInteger') : int,
URIRef(_XSD_PFX+'negativeInteger') : int,
URIRef(_XSD_PFX+'int') : long,
URIRef(_XSD_PFX+'unsignedLong') : long,
URIRef(_XSD_PFX+'positiveInteger') : int,
URIRef(_XSD_PFX+'short') : int,
URIRef(_XSD_PFX+'unsignedInt') : long,
URIRef(_XSD_PFX+'byte') : int,
URIRef(_XSD_PFX+'unsignedShort') : int,
URIRef(_XSD_PFX+'unsignedByte') : int,
URIRef(_XSD_PFX+'float') : float,
URIRef(_XSD_PFX+'double') : float,
URIRef(_XSD_PFX+'base64Binary') : base64.decodestring,
URIRef(_XSD_PFX+'anyURI') : None,
}
_toPythonMapping = {}
_toPythonMapping.update(XSDToPython)
def bind(datatype, conversion_function):
"""
bind a datatype to a function for converting it into a Python
instance.
"""
if datatype in _toPythonMapping:
_LOGGER.warning("datatype '%s' was already bound. Rebinding." %
datatype)
_toPythonMapping[datatype] = conversion_function
class Variable(Identifier):
"""
"""
__slots__ = ()
def __new__(cls, value):
if value[0]=='?':
value=value[1:]
return unicode.__new__(cls, value)
def __repr__(self):
return self.n3()
def n3(self):
return "?%s" % self
def __reduce__(self):
return (Variable, (unicode(self),))
def md5_term_hash(self):
"""a string of hex that will be the same for two Variables that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("V")
return d.hexdigest()
class Statement(Node, tuple):
def __new__(cls, (subject, predicate, object), context):
return tuple.__new__(cls, ((subject, predicate, object), context))
def __reduce__(self):
return (Statement, (self[0], self[1]))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder implementation."""
import functools
import os
from google.protobuf.any_pb2 import Any
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
# API label for SavedModel metrics.
_SAVE_BUILDER_LABEL = "save_v1_builder"
# Base class for the SavedModelBuilder that is only used by Tensorflow
# internally. Please use tf.compat.v1.saved_model.SavedModelBuilder instead.
@tf_export("__internal__.saved_model.SavedModelBuilder", v1=[])
class _SavedModelBuilder(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides the functionality to build a
`SavedModel` protocol buffer. Specifically, this allows multiple meta
graphs to be saved as part of a single language-neutral `SavedModel`,
while sharing variables and assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they can be provided
when the meta graph def is added. If multiple meta graph defs are associated
an asset of the same name, only the first version is retained.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = tf.compat.v1.saved_model.Builder(export_dir)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
assets_list=foo_assets)
...
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
Note: This function will only be available through the v1 compatibility
library as tf.compat.v1.saved_model.builder.SavedModelBuilder or
tf.compat.v1.saved_model.Builder. Tensorflow 2.0 will introduce a new
object-based method of creating SavedModels.
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if file_io.file_exists(export_dir):
if file_io.list_directory(export_dir):
raise AssertionError(
f"Export directory {export_dir} already exists, and isn't empty. "
"Please choose a different export directory, or delete all the "
"contents of the specified directory.")
else:
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
def _save_and_write_assets(self, meta_graph_def, assets_list=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
meta_graph_def: The meta graph def to which the assets will be added.
assets_list: The list where the asset paths are setup.
"""
# Creates a function that adds assets into the meta graph def.
write_fn = functools.partial(_add_asset_to_metagraph, meta_graph_def)
asset_filename_map = _maybe_save_assets(write_fn, assets_list)
# Return if there are no assets to write.
if not asset_filename_map:
tf_logging.info("No assets to write.")
return
# Copy assets from source path to destination path.
copy_assets_to_destination_dir(asset_filename_map, self._export_dir)
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def _validate_tensor_info(self, tensor_info):
"""Validates the `TensorInfo` proto.
Checks if the `encoding` (`name` or `coo_sparse` or `type_spec`) and
`dtype` fields exist and are non-empty.
Args:
tensor_info: `TensorInfo` protocol buffer to validate.
Raises:
AssertionError: If the `encoding` or `dtype` fields of the supplied
`TensorInfo` proto are not populated.
"""
if tensor_info is None:
raise AssertionError(
"All TensorInfo protos used in the SignatureDefs must have the name "
"and dtype fields set.")
if tensor_info.WhichOneof("encoding") is None:
# TODO(soergel) validate each of the fields of coo_sparse
raise AssertionError(
f"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used "
"in the SignatureDefs must have one of the 'encoding' fields (e.g., "
"name or coo_sparse) set.")
if tensor_info.WhichOneof("encoding") == "composite_tensor":
for component in tensor_info.composite_tensor.components:
self._validate_tensor_info(component)
elif tensor_info.dtype == types_pb2.DT_INVALID:
raise AssertionError(
f"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in"
" the SignatureDefs must have the dtype field set.")
def _validate_signature_def_map(self, signature_def_map):
"""Validates the `SignatureDef` entries in the signature def map.
Validation of entries in the signature def map includes ensuring that the
`name` and `dtype` fields of the TensorInfo protos of the `inputs` and
`outputs` of each `SignatureDef` are populated. Also ensures that reserved
SignatureDef keys for the initialization and train ops are not used.
Args:
signature_def_map: The map of signature defs to be validated.
Raises:
AssertionError: If a TensorInfo is not valid.
KeyError: If a reserved signature key is used in the map.
"""
for signature_def_key in signature_def_map:
signature_def = signature_def_map[signature_def_key]
inputs = signature_def.inputs
outputs = signature_def.outputs
for inputs_key in inputs:
self._validate_tensor_info(inputs[inputs_key])
for outputs_key in outputs:
self._validate_tensor_info(outputs[outputs_key])
if constants.INIT_OP_SIGNATURE_KEY in signature_def_map:
raise KeyError(
f"SignatureDef map key \"{constants.INIT_OP_SIGNATURE_KEY}\" is "
"reserved for initialization. Please use a different key.")
if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map:
raise KeyError(
f"SignatureDef map key \"{constants.TRAIN_OP_SIGNATURE_KEY}\" is "
f"reserved for the train op. Please use a different key.")
def _maybe_create_saver(self, saver=None):
"""Creates a sharded saver if one does not already exist."""
if not saver:
# Initialize a saver to generate a sharded output for all saveables in the
# current scope.
saver = tf_saver.Saver(
variables._all_saveable_objects(), # pylint: disable=protected-access
sharded=True,
write_version=saver_pb2.SaverDef.V2,
allow_empty=True)
return saver
def add_meta_graph(self,
tags,
signature_def_map=None,
assets_list=None,
clear_devices=False,
init_op=None,
train_op=None,
saver=None):
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_list: Assets to be saved with SavedModel. Note
that this list should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
clear_devices: Set to true if the device info on the default graph should
be cleared.
init_op: Op or group of ops to execute when the graph is loaded. Note
that when the init_op is specified it is run after the restore op at
load-time.
train_op: Op or group of opts that trains the model when run. This will
not be run automatically when the graph is loaded, instead saved in
a SignatureDef accessible through the exported MetaGraph.
saver: An instance of tf.compat.v1.train.Saver that will be used to export
the metagraph. If None, a sharded Saver that restores all variables will
be used.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet, or if the graph already contains one or more legacy init ops.
"""
if not self._has_saved_variables:
raise AssertionError(
"Graph state including variables and assets has not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
signature_def_map = signature_def_map or {}
self._validate_signature_def_map(signature_def_map)
# Create a SignatureDef pointing to the graph initialization op, which will
# be added to the MetaGraphDef.
_add_op_to_signature_def_map(signature_def_map, init_op,
constants.INIT_OP_SIGNATURE_KEY)
_add_op_to_signature_def_map(signature_def_map, train_op,
constants.TRAIN_OP_SIGNATURE_KEY)
saver = self._maybe_create_saver(saver)
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=True)
# Save asset files and write them to disk, if any.
self._save_and_write_assets(meta_graph_def, assets_list)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_list=None,
clear_devices=False,
init_op=None,
train_op=None,
strip_default_attrs=False,
saver=None):
# pylint: disable=line-too-long
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_list: Assets to be saved with SavedModel.
clear_devices: Set to true if the device info on the default graph should
be cleared.
init_op: Op or group of ops to execute when the graph is loaded. Note
that when the init_op is specified it is run after the restore op at
load-time.
train_op: Op or group of ops that trains the model when run. This will
not be run automatically when the graph is loaded, instead saved in
a SignatureDef accessible through the exported MetaGraph.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
saver: An instance of tf.compat.v1.train.Saver that will be used to export the
metagraph and save variables. If None, a sharded Saver that restores
all variables will be used.
"""
# pylint: enable=line-too-long
if self._has_saved_variables:
raise AssertionError("Graph state including variables and assets has "
"already been saved. Please invoke "
"`add_meta_graph()` instead.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
signature_def_map = signature_def_map or {}
self._validate_signature_def_map(signature_def_map)
# Create a SignatureDef pointing to the graph initialization op, which will
# be added to the MetaGraphDef.
_add_op_to_signature_def_map(signature_def_map, init_op,
constants.INIT_OP_SIGNATURE_KEY)
_add_op_to_signature_def_map(signature_def_map, train_op,
constants.TRAIN_OP_SIGNATURE_KEY)
saved_model_utils.get_or_create_variables_dir(self._export_dir)
variables_path = saved_model_utils.get_variables_path(self._export_dir)
saver = self._maybe_create_saver(saver)
# Save the variables. Also, disable writing the checkpoint state proto. The
# file is not used during SavedModel loading. In addition, since a
# SavedModel can be copied or moved, this avoids the checkpoint state to
# become outdated.
saver.save(sess, variables_path, write_meta_graph=False, write_state=False)
# Export the meta graph def.
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)
# Save asset files and write them to disk, if any.
self._save_and_write_assets(meta_graph_def, assets_list)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in a serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to
disk. Protocol buffers in text format are useful for debugging, but
parsing fails when it encounters an unknown field and so is not forward
compatible. This means changes to TensorFlow may prevent deployment of
new text format SavedModels to existing serving binaries. Do not deploy
`as_text` SavedModels to production.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
metrics.IncrementWriteApi(_SAVE_BUILDER_LABEL)
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = file_io.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
path = file_io.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(
path, self._saved_model.SerializeToString(deterministic=True))
tf_logging.info("SavedModel written to: %s", compat.as_text(path))
metrics.IncrementWrite(write_version="1")
return path
@tf_export(v1=["saved_model.Builder", "saved_model.builder.SavedModelBuilder"]) # pylint: disable=missing-docstring
class SavedModelBuilder(_SavedModelBuilder):
__doc__ = _SavedModelBuilder.__doc__.replace("assets_list",
"assets_collection")
def __init__(self, export_dir):
super(SavedModelBuilder, self).__init__(export_dir=export_dir)
def _add_collections(self, assets_collection, main_op, train_op):
"""Add asset and op collections to be saved."""
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
self._maybe_add_main_op(main_op)
self._add_train_op(train_op)
def _save_and_write_assets(self, assets_collection_to_add=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
"""
# Add assets to the collection with key `saved_model.ASSETS_KEY`, in the
# graph.
asset_filename_map = _maybe_save_assets(_add_asset_to_collection,
assets_collection_to_add)
# Return if there are no assets to write.
if not asset_filename_map:
tf_logging.info("No assets to write.")
return
# Copy assets from source path to destination path.
copy_assets_to_destination_dir(asset_filename_map, self._export_dir)
def _maybe_add_main_op(self, main_op):
"""Adds main op to the SavedModel.
Args:
main_op: Main op to run as part of graph initialization. If None, no main
op will be added to the graph.
Raises:
TypeError: If the main op is provided but is not of type `Operation`.
ValueError: if the Graph already contains an init op.
"""
if main_op is None:
return
if not isinstance(main_op, ops.Operation):
raise TypeError(f"Expected {main_op} to be an Operation but got type "
f"{type(main_op)} instead.")
# Validate that no other init ops have been added to this graph already.
# We check main_op and legacy_init_op for thoroughness and explicitness.
for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY):
if ops.get_collection(init_op_key):
raise ValueError(
"Graph already contains one or more main ops under the "
f"collection {init_op_key}.")
ops.add_to_collection(constants.MAIN_OP_KEY, main_op)
def _add_train_op(self, train_op):
"""Add train op to the SavedModel.
Note that this functionality is in development, and liable to be
moved elsewhere.
Args:
train_op: Op or group of ops that are used for training. These are stored
as a collection with key TRAIN_OP_KEY, but not executed.
Raises:
TypeError if Train op is not of type `Operation`.
"""
if train_op is not None:
if (not isinstance(train_op, ops.Tensor) and
not isinstance(train_op, ops.Operation)):
raise TypeError(f"`train_op` {train_op} needs to be a Tensor or Op.")
ops.add_to_collection(constants.TRAIN_OP_KEY, train_op)
@deprecated_args(None,
"Pass your op to the equivalent parameter main_op instead.",
"legacy_init_op")
def add_meta_graph(self,
tags,
signature_def_map=None,
assets_collection=None,
legacy_init_op=None,
clear_devices=False,
main_op=None,
strip_default_attrs=False,
saver=None):
if not self._has_saved_variables:
raise AssertionError(
"Graph state including variables and assets has not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
signature_def_map = signature_def_map or {}
self._validate_signature_def_map(signature_def_map)
# legacy_init_op is deprecated, and going away in TF 2.0.
# Re-mapping to main_op, as treatment is identical regardless.
main_op = main_op if main_op is not None else legacy_init_op
# Add assets and ops
self._add_collections(assets_collection, main_op, None)
saver = self._maybe_create_saver(saver)
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
@deprecated_args(None,
"Pass your op to the equivalent parameter main_op instead.",
"legacy_init_op")
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_collection=None,
legacy_init_op=None,
clear_devices=False,
main_op=None,
strip_default_attrs=False,
saver=None):
if self._has_saved_variables:
raise AssertionError("Graph state including variables and assets has "
"already been saved. Please invoke "
"`add_meta_graph()` instead.")
# Validate the signature def map to ensure all included TensorInfos are
# properly populated.
signature_def_map = signature_def_map or {}
self._validate_signature_def_map(signature_def_map)
# legacy_init_op is deprecated, and going away in TF 2.0.
# Re-mapping to main_op, as treatment is identical regardless.
main_op = main_op or legacy_init_op
# Add assets and ops
self._add_collections(assets_collection, main_op, None)
saved_model_utils.get_or_create_variables_dir(self._export_dir)
variables_path = saved_model_utils.get_variables_path(self._export_dir)
saver = self._maybe_create_saver(saver)
# Save the variables. Also, disable writing the checkpoint state proto. The
# file is not used during SavedModel loading. In addition, since a
# SavedModel can be copied or moved, this avoids the checkpoint state to
# become outdated.
saver.save(sess, variables_path, write_meta_graph=False, write_state=False)
# Export the meta graph def.
# The graph almost certainly previously contained at least one Saver, and
# possibly several (e.g. one for loading a pretrained embedding, and another
# for the model weights). Removing the preexisting ones was the
# motivation for the clear_extraneous_savers option, but it turns out that
# there are edge cases where that option breaks the graph. Until that is
# resolved, we just leave the option set to False for now.
# TODO(soergel): Reinstate clear_extraneous_savers=True when possible.
meta_graph_def = saver.export_meta_graph(
clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
add_meta_graph.__doc__ = _SavedModelBuilder.add_meta_graph.__doc__.replace(
"assets_list", "assets_collection")
add_meta_graph_and_variables.__doc__ = \
_SavedModelBuilder.add_meta_graph_and_variables.__doc__.replace(
"assets_list", "assets_collection")
def _maybe_save_assets(write_fn, assets_to_add=None):
"""Saves assets to the meta graph.
Args:
write_fn: A function callback that writes assets into meta graph.
assets_to_add: The list where the asset paths are setup.
Returns:
A dict of asset basenames for saving to the original full path to the asset.
Raises:
ValueError: Indicating an invalid filepath tensor.
"""
# Map of target file names to original filenames
asset_filename_map = {}
if assets_to_add is None:
tf_logging.info("No assets to save.")
return asset_filename_map
# Iterate over the supplied assets, build the `AssetFile` proto and add them
# to the meta graph.
for asset_tensor in assets_to_add:
asset_source_filepath = _asset_path_from_tensor(asset_tensor)
if not asset_source_filepath:
raise ValueError(f"Asset filepath tensor {asset_tensor} in is invalid.")
asset_filename = get_asset_filename_to_add(
asset_source_filepath, asset_filename_map)
# Call the passed-in function that builds AssetFileDef proto and adds it
# to either the collection or asset_file_def field of the meta graph.
# Note that this should be done even when the file is a duplicate of an
# already-added file, as the tensor reference should still exist.
write_fn(asset_filename, asset_tensor)
# In the cases where we are adding a duplicate, this will result in the
# last of the filepaths being the one used for copying the file to the
# SavedModel. Since the files in question are the same, it doesn't matter
# either way.
asset_filename_map[asset_filename] = asset_source_filepath
tf_logging.info("Assets added to graph.")
return asset_filename_map
def get_asset_filename_to_add(asset_filepath, asset_filename_map):
"""Get a unique basename to add to the SavedModel if this file is unseen.
Assets come from users as full paths, and we save them out to the
SavedModel as basenames. In some cases, the basenames collide. Here,
we dedupe asset basenames by first checking if the file is the same,
and, if different, generate and return an index-suffixed basename
that can be used to add the asset to the SavedModel.
Args:
asset_filepath: the full path to the asset that is being saved
asset_filename_map: a dict of filenames used for saving the asset in
the SavedModel to full paths from which the filenames were derived.
Returns:
Uniquified filename string if the file is not a duplicate, or the original
filename if the file has already been seen and saved.
"""
asset_filename = os.path.basename(asset_filepath)
if asset_filename not in asset_filename_map:
# This is an unseen asset. Safe to add.
return asset_filename
other_asset_filepath = asset_filename_map[asset_filename]
if other_asset_filepath == asset_filepath:
# This is the same file, stored twice in the list. No need
# to make unique.
return asset_filename
# Else, asset_filename is in the map, and the filepath is different. Dedupe.
if not file_io.filecmp(asset_filepath, other_asset_filepath):
# Files are different; dedupe filenames.
return _get_unique_asset_filename(asset_filename, asset_filename_map)
# Files are the same; don't make unique.
return asset_filename
def _get_unique_asset_filename(asset_filename, asset_filename_map):
i = 1
unique_filename = asset_filename
while unique_filename in asset_filename_map:
unique_filename = compat.as_bytes("_").join(
[compat.as_bytes(asset_filename), compat.as_bytes(str(i))])
i += 1
return unique_filename
def _asset_path_from_tensor(path_tensor):
"""Returns the filepath value stored in constant `path_tensor`.
Args:
path_tensor: Tensor of a file-path.
Returns:
The string value i.e. path of the tensor, if valid.
Raises:
TypeError if tensor does not match expected op type, dtype or value.
"""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError(f"Asset path tensor {path_tensor} must be a Tensor.")
if path_tensor.op.type != "Const":
raise TypeError(f"Asset path tensor {path_tensor} must be of type constant."
f"Has type {path_tensor.op.type} instead.")
if path_tensor.dtype != dtypes.string:
raise TypeError(f"Asset path tensor {path_tensor}` must be of dtype string."
f"Has type {path_tensor.dtype} instead.")
str_values = path_tensor.op.get_attr("value").string_val
if len(str_values) != 1:
raise TypeError(f"Asset path tensor {path_tensor} must be a scalar.")
return str_values[0]
def _add_asset_to_metagraph(meta_graph_def, asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the meta graph def.
Args:
meta_graph_def: The meta graph def to which the asset will be added.
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the asset
proto.
"""
asset_proto = meta_graph_def.asset_file_def.add()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
def copy_assets_to_destination_dir(asset_filename_map, destination_dir):
"""Copy all assets from source path to destination path."""
assets_destination_dir = saved_model_utils.get_or_create_assets_dir(
destination_dir)
# Copy each asset from source path to destination path.
for asset_basename, asset_source_filepath in asset_filename_map.items():
asset_destination_filepath = file_io.join(
compat.as_bytes(assets_destination_dir),
compat.as_bytes(asset_basename))
# Only copy the asset file to the destination if it does not already
# exist. This is to ensure that an asset with the same name defined as
# part of multiple graphs is only copied the first time.
if not file_io.file_exists(asset_destination_filepath):
file_io.copy(asset_source_filepath, asset_destination_filepath)
tf_logging.info("Assets written to: %s",
compat.as_text(assets_destination_dir))
def _add_asset_to_collection(asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the asset collection of the graph.
Args:
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the
asset proto.
"""
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
asset_any_proto = Any()
asset_any_proto.Pack(asset_proto)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
def _add_op_to_signature_def_map(signature_def_map, op, key):
if op is not None:
signature_def_map[key] = signature_def_utils.op_signature_def(op, key)
|
|
import json
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from ..exceptions import JSONRPCInvalidRequestException
from ..jsonrpc1 import (
JSONRPC10Request,
JSONRPC10Response,
)
class TestJSONRPC10Request(unittest.TestCase):
""" Test JSONRPC10Request functionality."""
def setUp(self):
self.request_params = {
"method": "add",
"params": [1, 2],
"_id": 1,
}
def test_correct_init(self):
""" Test object is created."""
JSONRPC10Request(**self.request_params)
def test_validation_incorrect_no_parameters(self):
with self.assertRaises(ValueError):
JSONRPC10Request()
def test_method_validation_str(self):
self.request_params.update({"method": "add"})
JSONRPC10Request(**self.request_params)
def test_method_validation_not_str(self):
self.request_params.update({"method": []})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
self.request_params.update({"method": {}})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
self.request_params.update({"method": None})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_params_validation_list(self):
self.request_params.update({"params": []})
JSONRPC10Request(**self.request_params)
self.request_params.update({"params": [0]})
JSONRPC10Request(**self.request_params)
def test_params_validation_tuple(self):
self.request_params.update({"params": ()})
JSONRPC10Request(**self.request_params)
self.request_params.update({"params": tuple([0])})
JSONRPC10Request(**self.request_params)
def test_params_validation_dict(self):
self.request_params.update({"params": {}})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
self.request_params.update({"params": {"a": 0}})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_params_validation_none(self):
self.request_params.update({"params": None})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_params_validation_incorrect(self):
self.request_params.update({"params": "str"})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_request_args(self):
self.assertEqual(JSONRPC10Request("add", []).args, ())
self.assertEqual(JSONRPC10Request("add", [1, 2]).args, (1, 2))
def test_id_validation_string(self):
self.request_params.update({"_id": "id"})
JSONRPC10Request(**self.request_params)
def test_id_validation_int(self):
self.request_params.update({"_id": 0})
JSONRPC10Request(**self.request_params)
def test_id_validation_null(self):
self.request_params.update({"_id": "null"})
JSONRPC10Request(**self.request_params)
def test_id_validation_none(self):
self.request_params.update({"_id": None})
JSONRPC10Request(**self.request_params)
def test_id_validation_float(self):
self.request_params.update({"_id": 0.1})
JSONRPC10Request(**self.request_params)
def test_id_validation_list_tuple(self):
self.request_params.update({"_id": []})
JSONRPC10Request(**self.request_params)
self.request_params.update({"_id": ()})
JSONRPC10Request(**self.request_params)
def test_id_validation_default_id_none(self):
del self.request_params["_id"]
JSONRPC10Request(**self.request_params)
def test_data_method_1(self):
r = JSONRPC10Request("add", [])
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_method_2(self):
r = JSONRPC10Request(method="add", params=[])
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_params_1(self):
r = JSONRPC10Request("add", params=[], _id=None)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_params_2(self):
r = JSONRPC10Request("add", ())
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_params_3(self):
r = JSONRPC10Request("add", (1, 2))
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [1, 2],
"id": None,
})
def test_data_id_1(self):
r = JSONRPC10Request("add", [], _id="null")
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": "null",
})
def test_data_id_1_notification(self):
r = JSONRPC10Request("add", [], _id="null", is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_2(self):
r = JSONRPC10Request("add", [], _id=None)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_2_notification(self):
r = JSONRPC10Request("add", [], _id=None, is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_3(self):
r = JSONRPC10Request("add", [], _id="id")
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": "id",
})
def test_data_id_3_notification(self):
r = JSONRPC10Request("add", [], _id="id", is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_4(self):
r = JSONRPC10Request("add", [], _id=0)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": 0,
})
def test_data_id_4_notification(self):
r = JSONRPC10Request("add", [], _id=0, is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_is_notification(self):
r = JSONRPC10Request("add", [])
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], _id=None)
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], _id="null")
self.assertFalse(r.is_notification)
r = JSONRPC10Request("add", [], _id=0)
self.assertFalse(r.is_notification)
r = JSONRPC10Request("add", [], is_notification=True)
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], is_notification=True, _id=None)
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], is_notification=True, _id=0)
self.assertTrue(r.is_notification)
def test_set_unset_notification_keep_id(self):
r = JSONRPC10Request("add", [], is_notification=True, _id=0)
self.assertTrue(r.is_notification)
self.assertEqual(r.data["id"], None)
r.is_notification = False
self.assertFalse(r.is_notification)
self.assertEqual(r.data["id"], 0)
def test_error_if_notification_true_but_id_none(self):
r = JSONRPC10Request("add", [], is_notification=True, _id=None)
with self.assertRaises(ValueError):
r.is_notification = False
def test_from_json_invalid_request_method(self):
str_json = json.dumps({
"params": [1, 2],
"id": 0,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_invalid_request_params(self):
str_json = json.dumps({
"method": "add",
"id": 0,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_invalid_request_id(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_invalid_request_extra_data(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"id": 0,
"is_notification": True,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_request(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"id": 0,
})
request = JSONRPC10Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC10Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, [1, 2])
self.assertEqual(request._id, 0)
self.assertFalse(request.is_notification)
def test_from_json_request_notification(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"id": None,
})
request = JSONRPC10Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC10Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, [1, 2])
self.assertEqual(request._id, None)
self.assertTrue(request.is_notification)
def test_from_json_string_not_dict(self):
with self.assertRaises(ValueError):
JSONRPC10Request.from_json("[]")
with self.assertRaises(ValueError):
JSONRPC10Request.from_json("0")
def test_data_setter(self):
request = JSONRPC10Request(**self.request_params)
with self.assertRaises(ValueError):
request.data = []
with self.assertRaises(ValueError):
request.data = ""
with self.assertRaises(ValueError):
request.data = None
class TestJSONRPC10Response(unittest.TestCase):
""" Test JSONRPC10Response functionality."""
def setUp(self):
self.response_success_params = {
"result": "",
"error": None,
"_id": 1,
}
self.response_error_params = {
"result": None,
"error": {
"code": 1,
"message": "error",
},
"_id": 1,
}
def test_correct_init(self):
""" Test object is created."""
JSONRPC10Response(**self.response_success_params)
JSONRPC10Response(**self.response_error_params)
def test_validation_incorrect_no_parameters(self):
with self.assertRaises(ValueError):
JSONRPC10Response()
def test_validation_success_incorrect(self):
wrong_params = self.response_success_params
del wrong_params["_id"]
with self.assertRaises(ValueError):
JSONRPC10Response(**wrong_params)
def test_validation_error_incorrect(self):
wrong_params = self.response_error_params
del wrong_params["_id"]
with self.assertRaises(ValueError):
JSONRPC10Response(**wrong_params)
def test_validation_incorrect_result_and_error(self):
with self.assertRaises(ValueError):
JSONRPC10Response(result="", error="", _id=0)
response = JSONRPC10Response(error="", _id=0)
with self.assertRaises(ValueError):
response.result = ""
def test_data(self):
r = JSONRPC10Response(result="", _id=0)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"result": "",
"error": None,
"id": 0,
})
def test_data_setter(self):
response = JSONRPC10Response(**self.response_success_params)
with self.assertRaises(ValueError):
response.data = []
with self.assertRaises(ValueError):
response.data = ""
with self.assertRaises(ValueError):
response.data = None
def test_validation_id(self):
response = JSONRPC10Response(**self.response_success_params)
self.assertEqual(response._id, self.response_success_params["_id"])
|
|
"""Coverage controllers for use by pytest-cov and nose-cov."""
import os
import random
import socket
import sys
import coverage
class CovController(object):
"""Base class for different plugin implementations."""
def __init__(self, cov_source, cov_report, cov_config, config=None, nodeid=None):
"""Get some common config used by multiple derived classes."""
self.cov_source = cov_source
self.cov_report = cov_report
self.cov_config = cov_config
self.config = config
self.nodeid = nodeid
self.cov = None
self.node_descs = set()
self.failed_slaves = []
self.topdir = os.getcwd()
def set_env(self):
"""Put info about coverage into the env so that subprocesses can activate coverage."""
if self.cov_source is None:
os.environ['COV_CORE_SOURCE'] = ''
else:
os.environ['COV_CORE_SOURCE'] = os.pathsep.join(self.cov_source)
os.environ['COV_CORE_CONFIG'] = self.cov_config
@staticmethod
def unset_env():
"""Remove coverage info from env."""
os.environ.pop('COV_CORE_SOURCE', None)
os.environ.pop('COV_CORE_CONFIG', None)
@staticmethod
def get_node_desc(platform, version_info):
"""Return a description of this node."""
return 'platform %s, python %s' % (platform, '%s.%s.%s-%s-%s' % version_info[:5])
@staticmethod
def sep(stream, s, txt):
if hasattr(stream, 'sep'):
stream.sep(s, txt)
else:
sep_total = max((70 - 2 - len(txt)), 2)
sep_len = sep_total // 2
sep_extra = sep_total % 2
out = '%s %s %s\n' % (s * sep_len, txt, s * (sep_len + sep_extra))
stream.write(out)
def summary(self, stream):
"""Produce coverage reports."""
total = 0
if not self.cov_report:
with open(os.devnull, 'w') as null:
total = self.cov.report(show_missing=True, ignore_errors=True, file=null)
return total
# Output coverage section header.
if len(self.node_descs) == 1:
self.sep(stream, '-', 'coverage: %s' % ''.join(self.node_descs))
else:
self.sep(stream, '-', 'coverage')
for node_desc in sorted(self.node_descs):
self.sep(stream, ' ', '%s' % node_desc)
# Produce terminal report if wanted.
if 'term' in self.cov_report or 'term-missing' in self.cov_report:
show_missing = ('term-missing' in self.cov_report) or None
total = self.cov.report(show_missing=show_missing, ignore_errors=True, file=stream)
# Produce annotated source code report if wanted.
if 'annotate' in self.cov_report:
total = self.cov.annotate(ignore_errors=True)
stream.write('Coverage annotated source written next to source\n')
# Produce html report if wanted.
if 'html' in self.cov_report:
total = self.cov.html_report(ignore_errors=True)
stream.write('Coverage HTML written to dir %s\n' % self.cov.config.html_dir)
# Produce xml report if wanted.
if 'xml' in self.cov_report:
total = self.cov.xml_report(ignore_errors=True)
stream.write('Coverage XML written to file %s\n' % self.cov.config.xml_output)
# Report on any failed slaves.
if self.failed_slaves:
self.sep(stream, '-', 'coverage: failed slaves')
stream.write('The following slaves failed to return coverage data, '
'ensure that pytest-cov is installed on these slaves.\n')
for node in self.failed_slaves:
stream.write('%s\n' % node.gateway.id)
return total
class Central(CovController):
"""Implementation for centralised operation."""
def start(self):
"""Erase any previous coverage data and start coverage."""
self.cov = coverage.coverage(source=self.cov_source,
config_file=self.cov_config)
self.cov.erase()
self.cov.start()
self.set_env()
def finish(self):
"""Stop coverage, save data to file and set the list of coverage objects to report on."""
self.unset_env()
self.cov.stop()
self.cov.combine()
self.cov.save()
node_desc = self.get_node_desc(sys.platform, sys.version_info)
self.node_descs.add(node_desc)
class DistMaster(CovController):
"""Implementation for distributed master."""
def start(self):
"""Ensure coverage rc file rsynced if appropriate."""
if self.cov_config and os.path.exists(self.cov_config):
self.config.option.rsyncdir.append(self.cov_config)
self.cov = coverage.coverage(source=self.cov_source,
config_file=self.cov_config)
self.cov.erase()
self.cov.start()
self.cov.config.paths['source'] = [self.topdir]
def configure_node(self, node):
"""Slaves need to know if they are collocated and what files have moved."""
node.slaveinput['cov_master_host'] = socket.gethostname()
node.slaveinput['cov_master_topdir'] = self.topdir
node.slaveinput['cov_master_rsync_roots'] = [str(root) for root in node.nodemanager.roots]
def testnodedown(self, node, error):
"""Collect data file name from slave."""
# If slave doesn't return any data then it is likely that this
# plugin didn't get activated on the slave side.
if not (hasattr(node, 'slaveoutput') and 'cov_slave_node_id' in node.slaveoutput):
self.failed_slaves.append(node)
return
# If slave is not collocated then we must save the data file
# that it returns to us.
if 'cov_slave_lines' in node.slaveoutput:
data_suffix = '%s.%s.%06d.%s' % (
socket.gethostname(), os.getpid(),
random.randint(0, 999999),
node.slaveoutput['cov_slave_node_id']
)
cov = coverage.coverage(source=self.cov_source,
data_suffix=data_suffix,
config_file=self.cov_config)
cov.start()
cov.data.lines = node.slaveoutput['cov_slave_lines']
cov.data.arcs = node.slaveoutput['cov_slave_arcs']
cov.stop()
cov.save()
path = node.slaveoutput['cov_slave_path']
self.cov.config.paths['source'].append(path)
# Record the slave types that contribute to the data file.
rinfo = node.gateway._rinfo()
node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info)
self.node_descs.add(node_desc)
def finish(self):
"""Combines coverage data and sets the list of coverage objects to report on."""
# Combine all the suffix files into the data file.
self.cov.stop()
self.cov.combine()
self.cov.save()
class DistSlave(CovController):
"""Implementation for distributed slaves."""
def start(self):
"""Determine what data file and suffix to contribute to and start coverage."""
# Determine whether we are collocated with master.
self.is_collocated = (socket.gethostname() == self.config.slaveinput['cov_master_host'] and
self.topdir == self.config.slaveinput['cov_master_topdir'])
# If we are not collocated then rewrite master paths to slave paths.
if not self.is_collocated:
master_topdir = self.config.slaveinput['cov_master_topdir']
slave_topdir = self.topdir
self.cov_source = [source.replace(master_topdir, slave_topdir)
for source in self.cov_source]
self.cov_config = self.cov_config.replace(master_topdir, slave_topdir)
# Erase any previous data and start coverage.
self.cov = coverage.coverage(source=self.cov_source,
data_suffix=True,
config_file=self.cov_config)
self.cov.erase()
self.cov.start()
self.set_env()
def finish(self):
"""Stop coverage and send relevant info back to the master."""
self.unset_env()
self.cov.stop()
if self.is_collocated:
# We don't combine data if we're collocated - we can get
# race conditions in the .combine() call (it's not atomic)
# The data is going to be combined in the master.
self.cov.save()
# If we are collocated then just inform the master of our
# data file to indicate that we have finished.
self.config.slaveoutput['cov_slave_node_id'] = self.nodeid
else:
self.cov.combine()
self.cov.save()
# If we are not collocated then add the current path
# and coverage data to the output so we can combine
# it on the master node.
# Send all the data to the master over the channel.
self.config.slaveoutput['cov_slave_path'] = self.topdir
self.config.slaveoutput['cov_slave_node_id'] = self.nodeid
self.config.slaveoutput['cov_slave_lines'] = self.cov.data.lines
self.config.slaveoutput['cov_slave_arcs'] = self.cov.data.arcs
def summary(self, stream):
"""Only the master reports so do nothing."""
pass
|
|
import difflib
from test.support import run_unittest, findfile
import unittest
import doctest
import sys
class TestWithAscii(unittest.TestCase):
def test_one_insert(self):
sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('insert', 0, 0, 0, 1),
('equal', 0, 100, 1, 101)])
self.assertEqual(sm.bpopular, set())
sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 50, 0, 50),
('insert', 50, 50, 50, 51),
('equal', 50, 100, 51, 101)])
self.assertEqual(sm.bpopular, set())
def test_one_delete(self):
sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40)
self.assertAlmostEqual(sm.ratio(), 0.994, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 40, 0, 40),
('delete', 40, 41, 40, 40),
('equal', 41, 81, 40, 80)])
def test_bjunk(self):
sm = difflib.SequenceMatcher(isjunk=lambda x: x == ' ',
a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40)
self.assertEqual(sm.bjunk, set())
sm = difflib.SequenceMatcher(isjunk=lambda x: x == ' ',
a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40 + ' ' * 20)
self.assertEqual(sm.bjunk, {' '})
sm = difflib.SequenceMatcher(isjunk=lambda x: x in [' ', 'b'],
a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40 + ' ' * 20)
self.assertEqual(sm.bjunk, {' ', 'b'})
class TestAutojunk(unittest.TestCase):
"""Tests for the autojunk parameter added in 2.7"""
def test_one_insert_homogenous_sequence(self):
# By default autojunk=True and the heuristic kicks in for a sequence
# of length 200+
seq1 = 'b' * 200
seq2 = 'a' + 'b' * 200
sm = difflib.SequenceMatcher(None, seq1, seq2)
self.assertAlmostEqual(sm.ratio(), 0, places=3)
self.assertEqual(sm.bpopular, {'b'})
# Now turn the heuristic off
sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False)
self.assertAlmostEqual(sm.ratio(), 0.9975, places=3)
self.assertEqual(sm.bpopular, set())
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, next, group_gen)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, next, diff_gen)
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
self.assertEqual("- \tI am a buggy", diff[0])
self.assertEqual("? --\n", diff[1])
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next two lines to baseline this test
#with open('test_difflib_expect.html','w') as fp:
# fp.write(actual)
with open(findfile('test_difflib_expect.html')) as fp:
self.assertEqual(actual, fp.read())
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
class TestOutputFormat(unittest.TestCase):
def test_tab_delimiter(self):
args = ['one', 'two', 'Original', 'Current',
'2005-01-26 23:30:50', '2010-04-02 10:20:52']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], [
"--- Original\t2005-01-26 23:30:50",
"+++ Current\t2010-04-02 10:20:52"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], [
"*** Original\t2005-01-26 23:30:50",
"--- Current\t2010-04-02 10:20:52"])
def test_no_trailing_tab_on_empty_filedate(self):
args = ['one', 'two', 'Original', 'Current']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"])
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(
TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs,
TestOutputFormat, Doctests)
if __name__ == '__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookie.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
from os import path
from time import time
from random import random
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from cPickle import dump, load, HIGHEST_PROTOCOL
from werkzeug import ClosingIterator, dump_cookie, parse_cookie, CallbackDict
_sha1_re = re.compile(r'^[a-fA-F0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return random()
def generate_key(salt=None):
return sha1('%s%s%s' % (salt, time(), _urandom())).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved."""
return self.modified or self.new
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions in the filesystem like
PHP does.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None):
SessionStore.__init__(self, session_class)
if path is None:
from tempfile import gettempdir
path = gettempdir()
self.path = path
self.filename_template = filename_template
def get_session_filename(self, sid):
return path.join(self.path, self.filename_template % sid)
def save(self, session):
f = file(self.get_session_filename(session.sid), 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
# Late import because Google Appengine won't allow os.unlink
from os import unlink
unlink(fn)
except OSError:
pass
def get(self, sid):
fn = self.get_session_filename(sid)
if not self.is_valid_key(sid) or not path.exists(fn):
return self.new()
else:
f = file(fn, 'rb')
try:
data = load(f)
finally:
f.close()
return self.session_class(data, sid, False)
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~werkzeug.dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import itertools
import copy
import functools
import inspect
import re
import discord.utils
from .core import Group, Command
from .errors import CommandError
__all__ = (
'Paginator',
'HelpCommand',
'DefaultHelpCommand',
'MinimalHelpCommand',
)
# help -> shows info of bot on top/bottom and lists subcommands
# help command -> shows detailed info of command
# help command <subcommand chain> -> same as above
# <description>
# <command signature with aliases>
# <long doc>
# Cog:
# <command> <shortdoc>
# <command> <shortdoc>
# Other Cog:
# <command> <shortdoc>
# No Category:
# <command> <shortdoc>
# Type <prefix>help command for more info on a command.
# You can also type <prefix>help category for more info on a category.
class Paginator:
"""A class that aids in paginating code blocks for Discord messages.
.. container:: operations
.. describe:: len(x)
Returns the total number of characters in the paginator.
Attributes
-----------
prefix: :class:`str`
The prefix inserted to every page. e.g. three backticks.
suffix: :class:`str`
The suffix appended at the end of every page. e.g. three backticks.
max_size: :class:`int`
The maximum amount of codepoints allowed in a page.
linesep: :class:`str`
The character string inserted between lines. e.g. a newline character.
.. versionadded:: 1.7
"""
def __init__(self, prefix='```', suffix='```', max_size=2000, linesep='\n'):
self.prefix = prefix
self.suffix = suffix
self.max_size = max_size
self.linesep = linesep
self.clear()
def clear(self):
"""Clears the paginator to have no pages."""
if self.prefix is not None:
self._current_page = [self.prefix]
self._count = len(self.prefix) + self._linesep_len # prefix + newline
else:
self._current_page = []
self._count = 0
self._pages = []
@property
def _prefix_len(self):
return len(self.prefix) if self.prefix else 0
@property
def _suffix_len(self):
return len(self.suffix) if self.suffix else 0
@property
def _linesep_len(self):
return len(self.linesep)
def add_line(self, line='', *, empty=False):
"""Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
"""
max_page_size = self.max_size - self._prefix_len - self._suffix_len - 2 * self._linesep_len
if len(line) > max_page_size:
raise RuntimeError(f'Line exceeds maximum page size {max_page_size}')
if self._count + len(line) + self._linesep_len > self.max_size - self._suffix_len:
self.close_page()
self._count += len(line) + self._linesep_len
self._current_page.append(line)
if empty:
self._current_page.append('')
self._count += self._linesep_len
def close_page(self):
"""Prematurely terminate a page."""
if self.suffix is not None:
self._current_page.append(self.suffix)
self._pages.append(self.linesep.join(self._current_page))
if self.prefix is not None:
self._current_page = [self.prefix]
self._count = len(self.prefix) + self._linesep_len # prefix + linesep
else:
self._current_page = []
self._count = 0
def __len__(self):
total = sum(len(p) for p in self._pages)
return total + self._count
@property
def pages(self):
"""List[:class:`str`]: Returns the rendered list of pages."""
# we have more than just the prefix in our current page
if len(self._current_page) > (0 if self.prefix is None else 1):
self.close_page()
return self._pages
def __repr__(self):
fmt = '<Paginator prefix: {0.prefix!r} suffix: {0.suffix!r} linesep: {0.linesep!r} max_size: {0.max_size} count: {0._count}>'
return fmt.format(self)
def _not_overriden(f):
f.__help_command_not_overriden__ = True
return f
class _HelpCommandImpl(Command):
def __init__(self, inject, *args, **kwargs):
super().__init__(inject.command_callback, *args, **kwargs)
self._original = inject
self._injected = inject
async def prepare(self, ctx):
self._injected = injected = self._original.copy()
injected.context = ctx
self.callback = injected.command_callback
on_error = injected.on_help_command_error
if not hasattr(on_error, '__help_command_not_overriden__'):
if self.cog is not None:
self.on_error = self._on_error_cog_implementation
else:
self.on_error = on_error
await super().prepare(ctx)
async def _parse_arguments(self, ctx):
# Make the parser think we don't have a cog so it doesn't
# inject the parameter into `ctx.args`.
original_cog = self.cog
self.cog = None
try:
await super()._parse_arguments(ctx)
finally:
self.cog = original_cog
async def _on_error_cog_implementation(self, dummy, ctx, error):
await self._injected.on_help_command_error(ctx, error)
@property
def clean_params(self):
result = self.params.copy()
try:
del result[next(iter(result))]
except StopIteration:
raise ValueError('Missing context parameter') from None
else:
return result
def _inject_into_cog(self, cog):
# Warning: hacky
# Make the cog think that get_commands returns this command
# as well if we inject it without modifying __cog_commands__
# since that's used for the injection and ejection of cogs.
def wrapped_get_commands(*, _original=cog.get_commands):
ret = _original()
ret.append(self)
return ret
# Ditto here
def wrapped_walk_commands(*, _original=cog.walk_commands):
yield from _original()
yield self
functools.update_wrapper(wrapped_get_commands, cog.get_commands)
functools.update_wrapper(wrapped_walk_commands, cog.walk_commands)
cog.get_commands = wrapped_get_commands
cog.walk_commands = wrapped_walk_commands
self.cog = cog
def _eject_cog(self):
if self.cog is None:
return
# revert back into their original methods
cog = self.cog
cog.get_commands = cog.get_commands.__wrapped__
cog.walk_commands = cog.walk_commands.__wrapped__
self.cog = None
class HelpCommand:
r"""The base implementation for help command formatting.
.. note::
Internally instances of this class are deep copied every time
the command itself is invoked to prevent a race condition
mentioned in :issue:`2123`.
This means that relying on the state of this class to be
the same between command invocations would not work as expected.
Attributes
------------
context: Optional[:class:`Context`]
The context that invoked this help formatter. This is generally set after
the help command assigned, :func:`command_callback`\, has been called.
show_hidden: :class:`bool`
Specifies if hidden commands should be shown in the output.
Defaults to ``False``.
verify_checks: Optional[:class:`bool`]
Specifies if commands should have their :attr:`.Command.checks` called
and verified. If ``True``, always calls :attr:`.Command.checks`.
If ``None``, only calls :attr:`.Command.checks` in a guild setting.
If ``False``, never calls :attr:`.Command.checks`. Defaults to ``True``.
.. versionchanged:: 1.7
command_attrs: :class:`dict`
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`.Command` constructor.
"""
MENTION_TRANSFORMS = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere',
r'<@!?[0-9]{17,22}>': '@deleted-user',
r'<@&[0-9]{17,22}>': '@deleted-role',
}
MENTION_PATTERN = re.compile('|'.join(MENTION_TRANSFORMS.keys()))
def __new__(cls, *args, **kwargs):
# To prevent race conditions of a single instance while also allowing
# for settings to be passed the original arguments passed must be assigned
# to allow for easier copies (which will be made when the help command is actually called)
# see issue 2123
self = super().__new__(cls)
# Shallow copies cannot be used in this case since it is not unusual to pass
# instances that need state, e.g. Paginator or what have you into the function
# The keys can be safely copied as-is since they're 99.99% certain of being
# string keys
deepcopy = copy.deepcopy
self.__original_kwargs__ = {k: deepcopy(v) for k, v in kwargs.items()}
self.__original_args__ = deepcopy(args)
return self
def __init__(self, **options):
self.show_hidden = options.pop('show_hidden', False)
self.verify_checks = options.pop('verify_checks', True)
self.command_attrs = attrs = options.pop('command_attrs', {})
attrs.setdefault('name', 'help')
attrs.setdefault('help', 'Shows this message')
self.context = None
self._command_impl = _HelpCommandImpl(self, **self.command_attrs)
def copy(self):
obj = self.__class__(*self.__original_args__, **self.__original_kwargs__)
obj._command_impl = self._command_impl
return obj
def _add_to_bot(self, bot):
command = _HelpCommandImpl(self, **self.command_attrs)
bot.add_command(command)
self._command_impl = command
def _remove_from_bot(self, bot):
bot.remove_command(self._command_impl.name)
self._command_impl._eject_cog()
def add_check(self, func):
"""
Adds a check to the help command.
.. versionadded:: 1.4
Parameters
----------
func
The function that will be used as a check.
"""
self._command_impl.add_check(func)
def remove_check(self, func):
"""
Removes a check from the help command.
This function is idempotent and will not raise an exception if
the function is not in the command's checks.
.. versionadded:: 1.4
Parameters
----------
func
The function to remove from the checks.
"""
self._command_impl.remove_check(func)
def get_bot_mapping(self):
"""Retrieves the bot mapping passed to :meth:`send_bot_help`."""
bot = self.context.bot
mapping = {cog: cog.get_commands() for cog in bot.cogs.values()}
mapping[None] = [c for c in bot.commands if c.cog is None]
return mapping
@property
def invoked_with(self):
"""Similar to :attr:`Context.invoked_with` except properly handles
the case where :meth:`Context.send_help` is used.
If the help command was used regularly then this returns
the :attr:`Context.invoked_with` attribute. Otherwise, if
it the help command was called using :meth:`Context.send_help`
then it returns the internal command name of the help command.
Returns
---------
:class:`str`
The command name that triggered this invocation.
"""
command_name = self._command_impl.name
ctx = self.context
if ctx is None or ctx.command is None or ctx.command.qualified_name != command_name:
return command_name
return ctx.invoked_with
def get_command_signature(self, command):
"""Retrieves the signature portion of the help page.
Parameters
------------
command: :class:`Command`
The command to get the signature of.
Returns
--------
:class:`str`
The signature for the command.
"""
parent = command.parent
entries = []
while parent is not None:
if not parent.signature or parent.invoke_without_command:
entries.append(parent.name)
else:
entries.append(parent.name + ' ' + parent.signature)
parent = parent.parent
parent_sig = ' '.join(reversed(entries))
if len(command.aliases) > 0:
aliases = '|'.join(command.aliases)
fmt = f'[{command.name}|{aliases}]'
if parent_sig:
fmt = parent_sig + ' ' + fmt
alias = fmt
else:
alias = command.name if not parent_sig else parent_sig + ' ' + command.name
return f'{self.context.clean_prefix}{alias} {command.signature}'
def remove_mentions(self, string):
"""Removes mentions from the string to prevent abuse.
This includes ``@everyone``, ``@here``, member mentions and role mentions.
Returns
-------
:class:`str`
The string with mentions removed.
"""
def replace(obj, *, transforms=self.MENTION_TRANSFORMS):
return transforms.get(obj.group(0), '@invalid')
return self.MENTION_PATTERN.sub(replace, string)
@property
def cog(self):
"""A property for retrieving or setting the cog for the help command.
When a cog is set for the help command, it is as-if the help command
belongs to that cog. All cog special methods will apply to the help
command and it will be automatically unset on unload.
To unbind the cog from the help command, you can set it to ``None``.
Returns
--------
Optional[:class:`Cog`]
The cog that is currently set for the help command.
"""
return self._command_impl.cog
@cog.setter
def cog(self, cog):
# Remove whatever cog is currently valid, if any
self._command_impl._eject_cog()
# If a new cog is set then inject it.
if cog is not None:
self._command_impl._inject_into_cog(cog)
def command_not_found(self, string):
"""|maybecoro|
A method called when a command is not found in the help command.
This is useful to override for i18n.
Defaults to ``No command called {0} found.``
Parameters
------------
string: :class:`str`
The string that contains the invalid command. Note that this has
had mentions removed to prevent abuse.
Returns
---------
:class:`str`
The string to use when a command has not been found.
"""
return f'No command called "{string}" found.'
def subcommand_not_found(self, command, string):
"""|maybecoro|
A method called when a command did not have a subcommand requested in the help command.
This is useful to override for i18n.
Defaults to either:
- ``'Command "{command.qualified_name}" has no subcommands.'``
- If there is no subcommand in the ``command`` parameter.
- ``'Command "{command.qualified_name}" has no subcommand named {string}'``
- If the ``command`` parameter has subcommands but not one named ``string``.
Parameters
------------
command: :class:`Command`
The command that did not have the subcommand requested.
string: :class:`str`
The string that contains the invalid subcommand. Note that this has
had mentions removed to prevent abuse.
Returns
---------
:class:`str`
The string to use when the command did not have the subcommand requested.
"""
if isinstance(command, Group) and len(command.all_commands) > 0:
return f'Command "{command.qualified_name}" has no subcommand named {string}'
return f'Command "{command.qualified_name}" has no subcommands.'
async def filter_commands(self, commands, *, sort=False, key=None):
"""|coro|
Returns a filtered list of commands and optionally sorts them.
This takes into account the :attr:`verify_checks` and :attr:`show_hidden`
attributes.
Parameters
------------
commands: Iterable[:class:`Command`]
An iterable of commands that are getting filtered.
sort: :class:`bool`
Whether to sort the result.
key: Optional[Callable[:class:`Command`, Any]]
An optional key function to pass to :func:`py:sorted` that
takes a :class:`Command` as its sole parameter. If ``sort`` is
passed as ``True`` then this will default as the command name.
Returns
---------
List[:class:`Command`]
A list of commands that passed the filter.
"""
if sort and key is None:
key = lambda c: c.name
iterator = commands if self.show_hidden else filter(lambda c: not c.hidden, commands)
if self.verify_checks is False:
# if we do not need to verify the checks then we can just
# run it straight through normally without using await.
return sorted(iterator, key=key) if sort else list(iterator)
if self.verify_checks is None and not self.context.guild:
# if verify_checks is None and we're in a DM, don't verify
return sorted(iterator, key=key) if sort else list(iterator)
# if we're here then we need to check every command if it can run
async def predicate(cmd):
try:
return await cmd.can_run(self.context)
except CommandError:
return False
ret = []
for cmd in iterator:
valid = await predicate(cmd)
if valid:
ret.append(cmd)
if sort:
ret.sort(key=key)
return ret
def get_max_size(self, commands):
"""Returns the largest name length of the specified command list.
Parameters
------------
commands: Sequence[:class:`Command`]
A sequence of commands to check for the largest size.
Returns
--------
:class:`int`
The maximum width of the commands.
"""
as_lengths = (discord.utils._string_width(c.name) for c in commands)
return max(as_lengths, default=0)
def get_destination(self):
"""Returns the :class:`~discord.abc.Messageable` where the help command will be output.
You can override this method to customise the behaviour.
By default this returns the context's channel.
Returns
-------
:class:`.abc.Messageable`
The destination where the help command will be output.
"""
return self.context.channel
async def send_error_message(self, error):
"""|coro|
Handles the implementation when an error happens in the help command.
For example, the result of :meth:`command_not_found` will be passed here.
You can override this method to customise the behaviour.
By default, this sends the error message to the destination
specified by :meth:`get_destination`.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
Parameters
------------
error: :class:`str`
The error message to display to the user. Note that this has
had mentions removed to prevent abuse.
"""
destination = self.get_destination()
await destination.send(error)
@_not_overriden
async def on_help_command_error(self, ctx, error):
"""|coro|
The help command's error handler, as specified by :ref:`ext_commands_error_handler`.
Useful to override if you need some specific behaviour when the error handler
is called.
By default this method does nothing and just propagates to the default
error handlers.
Parameters
------------
ctx: :class:`Context`
The invocation context.
error: :class:`CommandError`
The error that was raised.
"""
pass
async def send_bot_help(self, mapping):
"""|coro|
Handles the implementation of the bot command page in the help command.
This function is called when the help command is called with no arguments.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
Also, the commands in the mapping are not filtered. To do the filtering
you will have to call :meth:`filter_commands` yourself.
Parameters
------------
mapping: Mapping[Optional[:class:`Cog`], List[:class:`Command`]]
A mapping of cogs to commands that have been requested by the user for help.
The key of the mapping is the :class:`~.commands.Cog` that the command belongs to, or
``None`` if there isn't one, and the value is a list of commands that belongs to that cog.
"""
return None
async def send_cog_help(self, cog):
"""|coro|
Handles the implementation of the cog page in the help command.
This function is called when the help command is called with a cog as the argument.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
To get the commands that belong to this cog see :meth:`Cog.get_commands`.
The commands returned not filtered. To do the filtering you will have to call
:meth:`filter_commands` yourself.
Parameters
-----------
cog: :class:`Cog`
The cog that was requested for help.
"""
return None
async def send_group_help(self, group):
"""|coro|
Handles the implementation of the group page in the help command.
This function is called when the help command is called with a group as the argument.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
To get the commands that belong to this group without aliases see
:attr:`Group.commands`. The commands returned not filtered. To do the
filtering you will have to call :meth:`filter_commands` yourself.
Parameters
-----------
group: :class:`Group`
The group that was requested for help.
"""
return None
async def send_command_help(self, command):
"""|coro|
Handles the implementation of the single command page in the help command.
It should be noted that this method does not return anything -- rather the
actual message sending should be done inside this method. Well behaved subclasses
should use :meth:`get_destination` to know where to send, as this is a customisation
point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with :attr:`HelpCommand.context`.
.. admonition:: Showing Help
:class: helpful
There are certain attributes and methods that are helpful for a help command
to show such as the following:
- :attr:`Command.help`
- :attr:`Command.brief`
- :attr:`Command.short_doc`
- :attr:`Command.description`
- :meth:`get_command_signature`
There are more than just these attributes but feel free to play around with
these to help you get started to get the output that you want.
Parameters
-----------
command: :class:`Command`
The command that was requested for help.
"""
return None
async def prepare_help_command(self, ctx, command=None):
"""|coro|
A low level method that can be used to prepare the help command
before it does anything. For example, if you need to prepare
some state in your subclass before the command does its processing
then this would be the place to do it.
The default implementation does nothing.
.. note::
This is called *inside* the help command callback body. So all
the usual rules that happen inside apply here as well.
Parameters
-----------
ctx: :class:`Context`
The invocation context.
command: Optional[:class:`str`]
The argument passed to the help command.
"""
pass
async def command_callback(self, ctx, *, command=None):
"""|coro|
The actual implementation of the help command.
It is not recommended to override this method and instead change
the behaviour through the methods that actually get dispatched.
- :meth:`send_bot_help`
- :meth:`send_cog_help`
- :meth:`send_group_help`
- :meth:`send_command_help`
- :meth:`get_destination`
- :meth:`command_not_found`
- :meth:`subcommand_not_found`
- :meth:`send_error_message`
- :meth:`on_help_command_error`
- :meth:`prepare_help_command`
"""
await self.prepare_help_command(ctx, command)
bot = ctx.bot
if command is None:
mapping = self.get_bot_mapping()
return await self.send_bot_help(mapping)
# Check if it's a cog
cog = bot.get_cog(command)
if cog is not None:
return await self.send_cog_help(cog)
maybe_coro = discord.utils.maybe_coroutine
# If it's not a cog then it's a command.
# Since we want to have detailed errors when someone
# passes an invalid subcommand, we need to walk through
# the command group chain ourselves.
keys = command.split(' ')
cmd = bot.all_commands.get(keys[0])
if cmd is None:
string = await maybe_coro(self.command_not_found, self.remove_mentions(keys[0]))
return await self.send_error_message(string)
for key in keys[1:]:
try:
found = cmd.all_commands.get(key)
except AttributeError:
string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key))
return await self.send_error_message(string)
else:
if found is None:
string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key))
return await self.send_error_message(string)
cmd = found
if isinstance(cmd, Group):
return await self.send_group_help(cmd)
else:
return await self.send_command_help(cmd)
class DefaultHelpCommand(HelpCommand):
"""The implementation of the default help command.
This inherits from :class:`HelpCommand`.
It extends it with the following attributes.
Attributes
------------
width: :class:`int`
The maximum number of characters that fit in a line.
Defaults to 80.
sort_commands: :class:`bool`
Whether to sort the commands in the output alphabetically. Defaults to ``True``.
dm_help: Optional[:class:`bool`]
A tribool that indicates if the help command should DM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is DM'd. If ``False``, none of the help
output is DM'd. If ``None``, then the bot will only DM when the help
message becomes too long (dictated by more than :attr:`dm_help_threshold` characters).
Defaults to ``False``.
dm_help_threshold: Optional[:class:`int`]
The number of characters the paginator must accumulate before getting DM'd to the
user if :attr:`dm_help` is set to ``None``. Defaults to 1000.
indent: :class:`int`
How much to indent the commands from a heading. Defaults to ``2``.
commands_heading: :class:`str`
The command list's heading string used when the help command is invoked with a category name.
Useful for i18n. Defaults to ``"Commands:"``
no_category: :class:`str`
The string used when there is a command which does not belong to any category(cog).
Useful for i18n. Defaults to ``"No Category"``
paginator: :class:`Paginator`
The paginator used to paginate the help command output.
"""
def __init__(self, **options):
self.width = options.pop('width', 80)
self.indent = options.pop('indent', 2)
self.sort_commands = options.pop('sort_commands', True)
self.dm_help = options.pop('dm_help', False)
self.dm_help_threshold = options.pop('dm_help_threshold', 1000)
self.commands_heading = options.pop('commands_heading', "Commands:")
self.no_category = options.pop('no_category', 'No Category')
self.paginator = options.pop('paginator', None)
if self.paginator is None:
self.paginator = Paginator()
super().__init__(**options)
def shorten_text(self, text):
""":class:`str`: Shortens text to fit into the :attr:`width`."""
if len(text) > self.width:
return text[:self.width - 3].rstrip() + '...'
return text
def get_ending_note(self):
""":class:`str`: Returns help command's ending note. This is mainly useful to override for i18n purposes."""
command_name = self.invoked_with
return (
f"Type {self.context.clean_prefix}{command_name} command for more info on a command.\n"
f"You can also type {self.context.clean_prefix}{command_name} category for more info on a category."
)
def add_indented_commands(self, commands, *, heading, max_size=None):
"""Indents a list of commands after the specified heading.
The formatting is added to the :attr:`paginator`.
The default implementation is the command name indented by
:attr:`indent` spaces, padded to ``max_size`` followed by
the command's :attr:`Command.short_doc` and then shortened
to fit into the :attr:`width`.
Parameters
-----------
commands: Sequence[:class:`Command`]
A list of commands to indent for output.
heading: :class:`str`
The heading to add to the output. This is only added
if the list of commands is greater than 0.
max_size: Optional[:class:`int`]
The max size to use for the gap between indents.
If unspecified, calls :meth:`~HelpCommand.get_max_size` on the
commands parameter.
"""
if not commands:
return
self.paginator.add_line(heading)
max_size = max_size or self.get_max_size(commands)
get_width = discord.utils._string_width
for command in commands:
name = command.name
width = max_size - (get_width(name) - len(name))
entry = f'{self.indent * " "}{name:<{width}} {command.short_doc}'
self.paginator.add_line(self.shorten_text(entry))
async def send_pages(self):
"""A helper utility to send the page output from :attr:`paginator` to the destination."""
destination = self.get_destination()
for page in self.paginator.pages:
await destination.send(page)
def add_command_formatting(self, command):
"""A utility function to format the non-indented block of commands and groups.
Parameters
------------
command: :class:`Command`
The command to format.
"""
if command.description:
self.paginator.add_line(command.description, empty=True)
signature = self.get_command_signature(command)
self.paginator.add_line(signature, empty=True)
if command.help:
try:
self.paginator.add_line(command.help, empty=True)
except RuntimeError:
for line in command.help.splitlines():
self.paginator.add_line(line)
self.paginator.add_line()
def get_destination(self):
ctx = self.context
if self.dm_help is True:
return ctx.author
elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold:
return ctx.author
else:
return ctx.channel
async def prepare_help_command(self, ctx, command):
self.paginator.clear()
await super().prepare_help_command(ctx, command)
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
if bot.description:
# <description> portion
self.paginator.add_line(bot.description, empty=True)
no_category = f'\u200b{self.no_category}:'
def get_category(command, *, no_category=no_category):
cog = command.cog
return cog.qualified_name + ':' if cog is not None else no_category
filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
max_size = self.get_max_size(filtered)
to_iterate = itertools.groupby(filtered, key=get_category)
# Now we can add the commands to the page.
for category, commands in to_iterate:
commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)
self.add_indented_commands(commands, heading=category, max_size=max_size)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_command_help(self, command):
self.add_command_formatting(command)
self.paginator.close_page()
await self.send_pages()
async def send_group_help(self, group):
self.add_command_formatting(group)
filtered = await self.filter_commands(group.commands, sort=self.sort_commands)
self.add_indented_commands(filtered, heading=self.commands_heading)
if filtered:
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_cog_help(self, cog):
if cog.description:
self.paginator.add_line(cog.description, empty=True)
filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands)
self.add_indented_commands(filtered, heading=self.commands_heading)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
class MinimalHelpCommand(HelpCommand):
"""An implementation of a help command with minimal output.
This inherits from :class:`HelpCommand`.
Attributes
------------
sort_commands: :class:`bool`
Whether to sort the commands in the output alphabetically. Defaults to ``True``.
commands_heading: :class:`str`
The command list's heading string used when the help command is invoked with a category name.
Useful for i18n. Defaults to ``"Commands"``
aliases_heading: :class:`str`
The alias list's heading string used to list the aliases of the command. Useful for i18n.
Defaults to ``"Aliases:"``.
dm_help: Optional[:class:`bool`]
A tribool that indicates if the help command should DM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is DM'd. If ``False``, none of the help
output is DM'd. If ``None``, then the bot will only DM when the help
message becomes too long (dictated by more than :attr:`dm_help_threshold` characters).
Defaults to ``False``.
dm_help_threshold: Optional[:class:`int`]
The number of characters the paginator must accumulate before getting DM'd to the
user if :attr:`dm_help` is set to ``None``. Defaults to 1000.
no_category: :class:`str`
The string used when there is a command which does not belong to any category(cog).
Useful for i18n. Defaults to ``"No Category"``
paginator: :class:`Paginator`
The paginator used to paginate the help command output.
"""
def __init__(self, **options):
self.sort_commands = options.pop('sort_commands', True)
self.commands_heading = options.pop('commands_heading', "Commands")
self.dm_help = options.pop('dm_help', False)
self.dm_help_threshold = options.pop('dm_help_threshold', 1000)
self.aliases_heading = options.pop('aliases_heading', "Aliases:")
self.no_category = options.pop('no_category', 'No Category')
self.paginator = options.pop('paginator', None)
if self.paginator is None:
self.paginator = Paginator(suffix=None, prefix=None)
super().__init__(**options)
async def send_pages(self):
"""A helper utility to send the page output from :attr:`paginator` to the destination."""
destination = self.get_destination()
for page in self.paginator.pages:
await destination.send(page)
def get_opening_note(self):
"""Returns help command's opening note. This is mainly useful to override for i18n purposes.
The default implementation returns ::
Use `{prefix}{command_name} [command]` for more info on a command.
You can also use `{prefix}{command_name} [category]` for more info on a category.
Returns
-------
:class:`str`
The help command opening note.
"""
command_name = self.invoked_with
return (
f"Use `{self.context.clean_prefix}{command_name} [command]` for more info on a command.\n"
f"You can also use `{self.context.clean_prefix}{command_name} [category]` for more info on a category."
)
def get_command_signature(self, command):
return f'{self.context.clean_prefix}{command.qualified_name} {command.signature}'
def get_ending_note(self):
"""Return the help command's ending note. This is mainly useful to override for i18n purposes.
The default implementation does nothing.
Returns
-------
:class:`str`
The help command ending note.
"""
return None
def add_bot_commands_formatting(self, commands, heading):
"""Adds the minified bot heading with commands to the output.
The formatting should be added to the :attr:`paginator`.
The default implementation is a bold underline heading followed
by commands separated by an EN SPACE (U+2002) in the next line.
Parameters
-----------
commands: Sequence[:class:`Command`]
A list of commands that belong to the heading.
heading: :class:`str`
The heading to add to the line.
"""
if commands:
# U+2002 Middle Dot
joined = '\u2002'.join(c.name for c in commands)
self.paginator.add_line(f'__**{heading}**__')
self.paginator.add_line(joined)
def add_subcommand_formatting(self, command):
"""Adds formatting information on a subcommand.
The formatting should be added to the :attr:`paginator`.
The default implementation is the prefix and the :attr:`Command.qualified_name`
optionally followed by an En dash and the command's :attr:`Command.short_doc`.
Parameters
-----------
command: :class:`Command`
The command to show information of.
"""
fmt = '{0}{1} \N{EN DASH} {2}' if command.short_doc else '{0}{1}'
self.paginator.add_line(fmt.format(self.context.clean_prefix, command.qualified_name, command.short_doc))
def add_aliases_formatting(self, aliases):
"""Adds the formatting information on a command's aliases.
The formatting should be added to the :attr:`paginator`.
The default implementation is the :attr:`aliases_heading` bolded
followed by a comma separated list of aliases.
This is not called if there are no aliases to format.
Parameters
-----------
aliases: Sequence[:class:`str`]
A list of aliases to format.
"""
self.paginator.add_line(f'**{self.aliases_heading}** {", ".join(aliases)}', empty=True)
def add_command_formatting(self, command):
"""A utility function to format commands and groups.
Parameters
------------
command: :class:`Command`
The command to format.
"""
if command.description:
self.paginator.add_line(command.description, empty=True)
signature = self.get_command_signature(command)
if command.aliases:
self.paginator.add_line(signature)
self.add_aliases_formatting(command.aliases)
else:
self.paginator.add_line(signature, empty=True)
if command.help:
try:
self.paginator.add_line(command.help, empty=True)
except RuntimeError:
for line in command.help.splitlines():
self.paginator.add_line(line)
self.paginator.add_line()
def get_destination(self):
ctx = self.context
if self.dm_help is True:
return ctx.author
elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold:
return ctx.author
else:
return ctx.channel
async def prepare_help_command(self, ctx, command):
self.paginator.clear()
await super().prepare_help_command(ctx, command)
async def send_bot_help(self, mapping):
ctx = self.context
bot = ctx.bot
if bot.description:
self.paginator.add_line(bot.description, empty=True)
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
no_category = f'\u200b{self.no_category}'
def get_category(command, *, no_category=no_category):
cog = command.cog
return cog.qualified_name if cog is not None else no_category
filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
to_iterate = itertools.groupby(filtered, key=get_category)
for category, commands in to_iterate:
commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)
self.add_bot_commands_formatting(commands, category)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_cog_help(self, cog):
bot = self.context.bot
if bot.description:
self.paginator.add_line(bot.description, empty=True)
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
if cog.description:
self.paginator.add_line(cog.description, empty=True)
filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands)
if filtered:
self.paginator.add_line(f'**{cog.qualified_name} {self.commands_heading}**')
for command in filtered:
self.add_subcommand_formatting(command)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_group_help(self, group):
self.add_command_formatting(group)
filtered = await self.filter_commands(group.commands, sort=self.sort_commands)
if filtered:
note = self.get_opening_note()
if note:
self.paginator.add_line(note, empty=True)
self.paginator.add_line(f'**{self.commands_heading}**')
for command in filtered:
self.add_subcommand_formatting(command)
note = self.get_ending_note()
if note:
self.paginator.add_line()
self.paginator.add_line(note)
await self.send_pages()
async def send_command_help(self, command):
self.add_command_formatting(command)
self.paginator.close_page()
await self.send_pages()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import subprocess
from abc import abstractmethod, abstractproperty
from contextlib import contextmanager
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import environment_as
from pants.util.dirutil import relativize_paths
from pants.util.meta import AbstractClass
logger = logging.getLogger(__name__)
class Executor(AbstractClass):
"""Executes java programs."""
@staticmethod
def _scrub_args(classpath, main, jvm_options, args, cwd):
classpath = maybe_list(classpath)
if not isinstance(main, string_types) or not main:
raise ValueError('A non-empty main classname is required, given: {}'.format(main))
jvm_options = maybe_list(jvm_options or ())
args = maybe_list(args or ())
return classpath, main, jvm_options, args, cwd
class Error(Exception):
"""Indicates an error launching a java program."""
class InvalidDistribution(ValueError):
"""Indicates an invalid Distribution was used to construct this runner."""
class Runner(object):
"""A re-usable executor that can run a configured java command line."""
@abstractproperty
def executor(self):
"""Returns the executor this runner uses to run itself."""
@property
def cmd(self):
"""Returns a string representation of the command that will be run."""
return ' '.join(self.command)
@abstractproperty
def command(self):
"""Returns a copy of the command line that will be run as a list of command line tokens."""
@abstractmethod
def run(self, stdout=None, stderr=None, cwd=None):
"""Runs the configured java command.
If there is a problem executing tha java program subclasses should raise Executor.Error.
Its guaranteed that all arguments are valid as documented in `execute`
:param stdout: An optional stream to pump stdout to; defaults to `sys.stdout`.
:param stderr: An optional stream to pump stderr to; defaults to `sys.stderr`.
:param string cwd: optionally set the working directory
"""
def __init__(self, distribution):
"""Constructs an Executor that can be used to launch java programs.
:param distribution: a validated java distribution to use when launching java programs.
"""
if not hasattr(distribution, 'java') or not hasattr(distribution, 'validate'):
raise self.InvalidDistribution('A valid distribution is required, given: {}'
.format(distribution))
distribution.validate()
self._distribution = distribution
@property
def distribution(self):
"""Returns the `Distribution` this executor runs via."""
return self._distribution
def runner(self, classpath, main, jvm_options=None, args=None, cwd=None):
"""Returns an `Executor.Runner` for the given java command."""
return self._runner(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
def execute(self, classpath, main, jvm_options=None, args=None, stdout=None, stderr=None,
cwd=None):
"""Launches the java program defined by the classpath and main.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param string cwd: optionally set the working directory
Returns the exit code of the java program.
Raises Executor.Error if there was a problem launching java itself.
"""
executor = self.runner(classpath=classpath, main=main, jvm_options=jvm_options, args=args,
cwd=cwd)
return executor.run(stdout=stdout, stderr=stderr, cwd=cwd)
@abstractmethod
def _runner(self, classpath, main, jvm_options, args, cwd=None):
"""Subclasses should return a `Runner` that can execute the given java main."""
def _create_command(self, classpath, main, jvm_options, args, cwd=None):
cmd = [self._distribution.java]
cmd.extend(jvm_options)
if cwd:
classpath = relativize_paths(classpath, cwd)
cmd.extend(['-cp', os.pathsep.join(classpath), main])
cmd.extend(args)
return cmd
class CommandLineGrabber(Executor):
"""Doesn't actually execute anything, just captures the cmd line."""
def __init__(self, distribution):
super(CommandLineGrabber, self).__init__(distribution=distribution)
self._command = None # Initialized when we run something.
def _runner(self, classpath, main, jvm_options, args, cwd=None):
self._command = self._create_command(classpath, main, jvm_options, args, cwd=cwd)
class Runner(self.Runner):
@property
def executor(_):
return self
@property
def command(_):
return list(self._command)
def run(_, stdout=None, stderr=None, cwd=None):
return 0
return Runner()
@property
def cmd(self):
return self._command
class SubprocessExecutor(Executor):
"""Executes java programs by launching a jvm in a subprocess."""
_SCRUBBED_ENV = {
# We attempt to control the classpath for correctness, caching and invalidation reasons and
# allowing CLASSPATH to influence would be a hermeticity leak
'CLASSPATH': None,
# We attempt to control jvm options and give user's explicit control in some cases as well.
# In all cases we want predictable behavior - pants defaults, repo defaults, or user tweaks
# specified on the command line. In addition cli options can affect outputs; ie: class debug
# info, target classfile version, etc - all breaking hermeticity.
'_JAVA_OPTIONS': None,
'JAVA_TOOL_OPTIONS': None
}
@classmethod
@contextmanager
def _maybe_scrubbed_env(cls):
for env_var in cls._SCRUBBED_ENV:
value = os.getenv(env_var)
if value:
logger.warn('Scrubbing {env_var}={value}'.format(env_var=env_var, value=value))
with environment_as(**cls._SCRUBBED_ENV):
yield
def __init__(self, distribution):
super(SubprocessExecutor, self).__init__(distribution=distribution)
self._buildroot = get_buildroot()
def _create_command(self, classpath, main, jvm_options, args, cwd=None):
cwd = cwd or self._buildroot
return super(SubprocessExecutor, self)._create_command(classpath, main, jvm_options,
args, cwd=cwd)
def _runner(self, classpath, main, jvm_options, args, cwd=None):
command = self._create_command(classpath, main, jvm_options, args, cwd=cwd)
class Runner(self.Runner):
@property
def executor(_):
return self
@property
def command(_):
return list(command)
def run(_, stdout=None, stderr=None, cwd=None):
return self._spawn(command, stdout=stdout, stderr=stderr, cwd=cwd).wait()
return Runner()
def spawn(self, classpath, main, jvm_options=None, args=None, cwd=None, **subprocess_args):
"""Spawns the java program passing any extra subprocess kwargs on to subprocess.Popen.
Returns the Popen process object handle to the spawned java program subprocess.
"""
cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
return self._spawn(cmd, cwd, **subprocess_args)
def _spawn(self, cmd, cwd=None, **subprocess_args):
with self._maybe_scrubbed_env():
cwd = cwd or self._buildroot
logger.debug('Executing: {cmd} args={args} at cwd={cwd}'
.format(cmd=' '.join(cmd), args=subprocess_args, cwd=cwd))
try:
return subprocess.Popen(cmd, cwd=cwd, **subprocess_args)
except OSError as e:
raise self.Error('Problem executing {0}: {1}'.format(self._distribution.java, e))
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import pytest
def testShouldFireKeyPressEvents(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = driver.find_element(by=By.ID, value="result")
assert "press:" in result.text
def testShouldFireKeyDownEvents(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("I")
result = driver.find_element(by=By.ID, value="result")
assert "down" in result.text
def testShouldFireKeyUpEvents(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = driver.find_element(by=By.ID, value="result")
assert "up:" in result.text
def testShouldTypeLowerCaseLetters(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("abc def")
assert keyReporter.get_attribute("value") == "abc def"
def testShouldBeAbleToTypeCapitalLetters(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("ABC DEF")
assert keyReporter.get_attribute("value") == "ABC DEF"
def testShouldBeAbleToTypeQuoteMarks(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("\"")
assert keyReporter.get_attribute("value") == "\""
def testShouldBeAbleToTypeTheAtCharacter(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("@")
assert keyReporter.get_attribute("value") == "@"
def testShouldBeAbleToMixUpperAndLowerCaseLetters(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("[email protected]")
assert keyReporter.get_attribute("value") == "[email protected]"
def testArrowKeysShouldNotBePrintable(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys(Keys.ARROW_LEFT)
assert keyReporter.get_attribute("value") == ""
def testListOfArrowKeysShouldNotBePrintable(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys([Keys.ARROW_LEFT])
assert keyReporter.get_attribute("value") == ""
def testShouldBeAbleToUseArrowKeys(driver, pages):
pages.load("javascriptPage.html")
keyReporter = driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("Tet", Keys.ARROW_LEFT, "s")
assert keyReporter.get_attribute("value") == "Test"
def testWillSimulateAKeyUpWhenEnteringTextIntoInputElements(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyUp")
element.send_keys("I like cheese")
result = driver.find_element(by=By.ID, value="result")
assert result.text == "I like cheese"
def testWillSimulateAKeyDownWhenEnteringTextIntoInputElements(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyDown")
element.send_keys("I like cheese")
result = driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
assert result.text == "I like chees"
def testWillSimulateAKeyPressWhenEnteringTextIntoInputElements(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyPress")
element.send_keys("I like cheese")
result = driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
assert result.text == "I like chees"
def testWillSimulateAKeyUpWhenEnteringTextIntoTextAreas(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyUpArea")
element.send_keys("I like cheese")
result = driver.find_element(by=By.ID, value="result")
assert result.text == "I like cheese"
def testWillSimulateAKeyDownWhenEnteringTextIntoTextAreas(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyDownArea")
element.send_keys("I like cheese")
result = driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
assert result.text == "I like chees"
def testWillSimulateAKeyPressWhenEnteringTextIntoTextAreas(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyPressArea")
element.send_keys("I like cheese")
result = driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
assert result.text == "I like chees"
def testShouldReportKeyCodeOfArrowKeysUpDownEvents(driver, pages):
pages.load("javascriptPage.html")
result = driver.find_element(by=By.ID, value="result")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(Keys.ARROW_DOWN)
assert "down: 40" in result.text.strip()
assert "up: 40" in result.text.strip()
element.send_keys(Keys.ARROW_UP)
assert "down: 38" in result.text.strip()
assert "up: 38" in result.text.strip()
element.send_keys(Keys.ARROW_LEFT)
assert "down: 37" in result.text.strip()
assert "up: 37" in result.text.strip()
element.send_keys(Keys.ARROW_RIGHT)
assert "down: 39" in result.text.strip()
assert "up: 39" in result.text.strip()
# And leave no rubbish/printable keys in the "keyReporter"
assert element.get_attribute("value") == ""
def testNumericNonShiftKeys(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
numericLineCharsNonShifted = "`1234567890-=[]\\,.'/42"
element.send_keys(numericLineCharsNonShifted)
assert element.get_attribute("value") == numericLineCharsNonShifted
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
@pytest.mark.xfail_remote(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
def testNumericShiftKeys(driver, pages):
pages.load("javascriptPage.html")
result = driver.find_element(by=By.ID, value="result")
element = driver.find_element(by=By.ID, value="keyReporter")
numericShiftsEtc = "~!@#$%^&*()_+{}:i\"<>?|END~"
element.send_keys(numericShiftsEtc)
assert element.get_attribute("value") == numericShiftsEtc
assert "up: 16" in result.text.strip()
def testLowerCaseAlphaKeys(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
lowerAlphas = "abcdefghijklmnopqrstuvwxyz"
element.send_keys(lowerAlphas)
assert element.get_attribute("value") == lowerAlphas
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
@pytest.mark.xfail_remote(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
def testUppercaseAlphaKeys(driver, pages):
pages.load("javascriptPage.html")
result = driver.find_element(by=By.ID, value="result")
element = driver.find_element(by=By.ID, value="keyReporter")
upperAlphas = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
element.send_keys(upperAlphas)
assert element.get_attribute("value") == upperAlphas
assert "up: 16" in result.text.strip()
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
@pytest.mark.xfail_remote(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
def testAllPrintableKeys(driver, pages):
pages.load("javascriptPage.html")
result = driver.find_element(by=By.ID, value="result")
element = driver.find_element(by=By.ID, value="keyReporter")
allPrintable = "!\"#$%&'()*+,-./0123456789:<=>?@ ABCDEFGHIJKLMNOPQRSTUVWXYZ [\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
element.send_keys(allPrintable)
assert element.get_attribute("value") == allPrintable
assert "up: 16" in result.text.strip()
def testArrowKeysAndPageUpAndDown(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(
"a" + Keys.LEFT + "b" + Keys.RIGHT +
Keys.UP + Keys.DOWN + Keys.PAGE_UP + Keys.PAGE_DOWN + "1")
assert element.get_attribute("value") == "ba1"
# def testHomeAndEndAndPageUpAndPageDownKeys(driver, pages):
# // FIXME: macs don't have HOME keys, would PGUP work?
# if (Platform.getCurrent().is(Platform.MAC)) {
# return
# }
# pages.load("javascriptPage.html")
# element = driver.find_element(by=By.ID, value="keyReporter")
# element.send_keys("abc" + Keys.HOME + "0" + Keys.LEFT + Keys.RIGHT +
# Keys.PAGE_UP + Keys.PAGE_DOWN + Keys.END + "1" + Keys.HOME +
# "0" + Keys.PAGE_UP + Keys.END + "111" + Keys.HOME + "00")
# assert element.get_attribute("value") == "0000abc1111"
def testDeleteAndBackspaceKeys(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcdefghi")
assert element.get_attribute("value") == "abcdefghi"
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.DELETE)
assert element.get_attribute("value") == "abcdefgi"
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.BACK_SPACE)
assert element.get_attribute("value") == "abcdfgi"
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
@pytest.mark.xfail_remote(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
def testSpecialSpaceKeys(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.SPACE + "fgh" + Keys.SPACE + "ij")
assert element.get_attribute("value") == "abcd fgh ij"
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
@pytest.mark.xfail_remote(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1255258')
def testNumberpadAndFunctionKeys(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(
"abcd" + Keys.MULTIPLY + Keys.SUBTRACT + Keys.ADD +
Keys.DECIMAL + Keys.SEPARATOR + Keys.NUMPAD0 + Keys.NUMPAD9 +
Keys.ADD + Keys.SEMICOLON + Keys.EQUALS + Keys.DIVIDE +
Keys.NUMPAD3 + "abcd")
assert element.get_attribute("value") == "abcd*-+.,09+;=/3abcd"
element.clear()
element.send_keys("FUNCTION" + Keys.F2 + "-KEYS" + Keys.F2)
element.send_keys("" + Keys.F2 + "-TOO" + Keys.F2)
assert element.get_attribute("value") == "FUNCTION-KEYS-TOO"
def testShiftSelectionDeletes(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd efgh")
assert element.get_attribute("value") == "abcd efgh"
element.send_keys(Keys.SHIFT, Keys.LEFT, Keys.LEFT, Keys.LEFT)
element.send_keys(Keys.DELETE)
assert element.get_attribute("value") == "abcd e"
def testShouldTypeIntoInputElementsThatHaveNoTypeAttribute(driver, pages):
pages.load("formPage.html")
element = driver.find_element(by=By.ID, value="no-type")
element.send_keys("Should Say Cheese")
assert element.get_attribute("value") == "Should Say Cheese"
def testShouldTypeAnInteger(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(1234)
assert element.get_attribute("value") == "1234"
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Group.users_seen'
db.add_column('sentry_groupedmessage', 'users_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Group.users_seen'
db.delete_column('sentry_groupedmessage', 'users_seen')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
# coding: utf-8
import httplib
import json
from urllib import urlencode
from urllib2 import urlopen, Request
# python-iugu package modules
import base
import config
class IuguMerchant(base.IuguApi):
def __init__(self, **kwargs):
super(IuguMerchant, self).__init__(**kwargs)
self.__conn = base.IuguRequests()
def create_payment_token(self, card_number, first_name, last_name,
month, year, verification_value, method="credit_card"):
"""Sends credit_card data of a customer and returns a token
for payment process without needing to persist personal data
of customers.
:param method: string 'credit_card' or options given by API.
:param card_number: str of card number
:param first_name: string with consumer/buyer first name
:param last_name: consumer/buyer last name
:param month: two digits to Month expiry date of card
:param year: four digits to Year expiry date of card
:param verification_value: CVV
:returns: token_id as id, response, extra_info and method
=> http://iugu.com/referencias/api#tokens-e-cobranca-direta
"""
urn = "/v1/payment_token"
data = [('data[last_name]', last_name), ('data[first_name]', first_name),
('data[verification_value]', verification_value),
('data[month]', month), ('data[year]', year),
('data[number]', card_number)]
data.append(("account_id", self.account_id)) # work less this
data.append(("test", self.is_mode_test()))
data.append(("method", method))
token_data = self.__conn.post(urn, data)
return Token(token_data)
def create_charge(self, consumer_email, items, token=None, payer=None):
"""
Creates an invoice and returns a direct charge done.
:param items: is instance of class of the merchant.Item
:param token: an instance of Token. It's used to credit card payments.
If argument token is None it's used to method=bank_slip
"""
data = [] # data fields of charge. It'll encode
urn = "/v1/charge"
if isinstance(items, list):
for item in items:
assert type(item) is Item
data.extend(item.to_data())
else:
assert type(items) is Item
data.extend(items.to_data())
if token and isinstance(token, Token):
token_id = token.id
data.append(("token", token_id))
else:
data.append(("method", "bank_slip"))
if payer is not None:
assert type(payer) is Payer
data.extend(payer.to_data())
data.append(("email", consumer_email))
results = self.__conn.post(urn, data)
return Charge(results)
class Charge(object):
"""
This class receives response of request create_charge. Useful only to view
status and invoice_id
:attribute invoice_id: ID of Invoice created
"""
def __init__(self, invoice):
self.invoice = invoice
if 'message' in invoice:
self.message = invoice['message']
if 'errors' in invoice:
self.errors = invoice['errors']
if 'success' in invoice:
self.success = invoice['success']
if 'invoice_id' in invoice:
self.invoice_id = invoice['invoice_id']
def is_success(self):
try:
if self.success == True:
return True
except:
pass
return False
class Token(object):
"""
This class is representation of payment method to API.
"""
def __init__(self, token_data):
self.token_data = token_data
if 'id' in token_data:
self.id = token_data['id']
if 'extra_info' in token_data:
self.extra_info = token_data['extra_info']
if 'method' in token_data:
self.method = token_data['method']
@property
def is_test(self):
if 'test' in self.token_data.keys() and self.token_data['test'] == True:
return True
else:
return False
@property
def status(self):
try:
if 'errors' in self.token_data.keys():
return self.token_data['errors']
except:
pass
return 200
class Payer(object):
def __init__(self, name, email, address=None, cpf_cnpj=None, phone_prefix=None, phone=None):
self.cpf_cnpj = cpf_cnpj
self.name = name
self.email = email
self.phone_prefix = phone_prefix
self.phone = phone
if isinstance(address, Address):
self.address = address
def to_data(self):
"""
Returns tuples to encode with urllib.urlencode
"""
as_tuple = []
key = "payer"
as_tuple.append(("{payer}[cpf_cnpj]".format(
payer=key), self.cpf_cnpj))
as_tuple.append(("{payer}[name]".format(payer=key), self.name))
as_tuple.append(("{payer}[email]".format(payer=key), self.email))
if self.address:
as_tuple.append(("{payer}[address.zip_code]".format(
payer=key), self.address.zip_code))
as_tuple.append(("{payer}[address.number]".format(
payer=key), self.address.number))
return as_tuple
class Address(object):
def __init__(self, street, number, city, state, country, zip_code):
self.street = street
self.number = number
self.city = city
self.state = state
self.country = country
self.zip_code = zip_code
class Item(object):
"""
This class represent a checkout item. It's used to create a charge, mainly,
within IuguMerchant class.
"""
def __init__(self, description, quantity, price_cents, **kwargs):
self.description = description
self.quantity = quantity
self.price_cents = price_cents # must be integer 10.90 => 1090
self.id = kwargs.get("id")
self.created_at = kwargs.get("created_at")
self.updated_at = kwargs.get("updated_at")
self.price = kwargs.get("price")
# useful for subscriptions subitems
self.recurrent = kwargs.get("recurrent") # boolean
self.total = kwargs.get("total")
# command for eliminate an item
self.destroy = None
def __str__(self):
return "%s" % self.description
def to_data(self, is_subscription=False):
"""
Returns tuples to encode with urllib.urlencode
"""
as_tuple = []
key = "items"
if is_subscription is True:
key = "subitems" # run to adapt the API subscription
if self.id:
as_tuple.append(("{items}[][id]".format(items=key), self.id))
as_tuple.append(("{items}[][description]".format(items=key),
self.description))
as_tuple.append(("{items}[][quantity]".format(items=key),
self.quantity))
as_tuple.append(("{items}[][price_cents]".format(items=key),
self.price_cents))
if self.recurrent:
value_recurrent = str(self.recurrent)
value_recurrent = value_recurrent.lower()
as_tuple.append(("{items}[][recurrent]".format(items=key),
value_recurrent))
if self.destroy is not None:
value_destroy = str(self.destroy)
value_destroy = value_destroy.lower()
as_tuple.append(("{items}[][_destroy]".format(items=key),
value_destroy))
return as_tuple
def remove(self):
"""
Marks the item that will removed after save an invoice
"""
self.destroy = True
class Transfers(object):
__conn = base.IuguRequests()
__urn = "/v1/transfers"
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.created_at = kwargs.get("created_at")
self.amount_cents = kwargs.get("amount_cents")
self.amount_localized = kwargs.get("amount_localized")
self.receiver = kwargs.get("receiver")
self.sender = kwargs.get("sender")
def send(self, receiver_id, amount_cents):
"""
To send amount_cents to receiver_id
"""
data = []
data.append(("receiver_id", receiver_id))
data.append(("amount_cents", amount_cents))
response = self.__conn.post(self.__urn, data)
return Transfers(**response)
@classmethod
def getitems(self):
"""
Gets sent and received transfers for use in API_KEY
"""
response = self.__conn.get(self.__urn, [])
sent = response["sent"]
received = response["received"]
transfers = []
for t in sent:
transfer_obj = Transfers(**t)
transfers.append(transfer_obj)
for r in received:
transfer_obj = Transfers(**r)
transfers.append(transfer_obj)
return transfers
|
|
from collections import OrderedDict
import os.path
import shutil
import pytest
from edalize import get_edatool
tests_dir = os.path.dirname(__file__)
class TestFixture:
"""A fixture that makes an edalize backend with work_root directory
Create this object using the make_edalize_test factory fixture. This passes
through its `tool_name` and sets up a temporary directory for `work_root`,
then passes its keyword arguments through to the TestFixture initializer.
Args:
tool_name: The name of the tool
work_root: The directory to treat as a work root
test_name: The name to call the backend. Defaults to
`'test_<tool_name>_0'`
param_types: A list of parameter types. Defaults to `['plusarg',
'vlogdefine', 'vlogparam']` (the parameter types supported
by most simulators).
files: A list of files to use. Defaults to `None`, which means to use
:py:data:`FILES`.
tool_options: Dictionary passed to _setup_backend. Defaults to `{}`.
ref_dir: A reference directory relative to `test_<tool_name>`. Defaults
to `'.'`
use_vpi: If true, set up backend with definitions from :attr:`VPI`.
Defaults to `False`.
"""
def __init__(
self,
tool_name,
work_root,
test_name=None,
param_types=["plusarg", "vlogdefine", "vlogparam"],
files=None,
tool_options={},
ref_dir=".",
use_vpi=False,
toplevel="top_module",
):
raw_ref_dir = os.path.join(tests_dir, "test_" + tool_name, ref_dir)
self.test_name = (
"test_{}_0".format(tool_name) if test_name is None else test_name
)
self.ref_dir = os.path.normpath(raw_ref_dir)
self.work_root = work_root
self.backend = _setup_backend(
self.test_name,
tool_name,
param_types,
files,
tool_options,
work_root,
use_vpi,
toplevel,
)
def compare_files(self, files, ref_subdir="."):
"""Check some files in the work root match those in the ref directory
The files argument gives the list of files to check. These are
interpreted as paths relative to the work directory and relative to
self.ref_dir / ref_subdir.
This is a wrapper around edalize_common.compare_files: see its
documentation for how to use the :envvar:`GOLDEN_RUN` environment
variable to copy across a golden reference.
"""
ref_dir = os.path.normpath(os.path.join(self.ref_dir, ref_subdir))
return compare_files(ref_dir, self.work_root, files)
def copy_to_work_root(self, path):
shutil.copy(
os.path.join(self.ref_dir, path), os.path.join(self.work_root, path)
)
@pytest.fixture
def make_edalize_test(monkeypatch, tmpdir):
"""A factory fixture to make an edalize backend with work_root directory
The returned factory method takes a `tool_name` (the name of the tool) and
the keyword arguments supported by :class:`TestFixture`. It returns a
:class:`TestFixture` object, whose `work_root` is a temporary directory.
"""
# Prepend directory `mock_commands` to PATH environment variable
monkeypatch.setenv("PATH", os.path.join(tests_dir, "mock_commands"), ":")
created = []
def _fun(tool_name, **kwargs):
work_root = tmpdir / str(len(created))
work_root.mkdir()
fixture = TestFixture(tool_name, str(work_root), **kwargs)
created.append(fixture)
return fixture
return _fun
def compare_files(ref_dir, work_root, files):
"""Check that all *files* in *work_root* match those in *ref_dir*.
If the environment variable :envvar:`GOLDEN_RUN` is set, the *files* in
*work_root* are copied to *ref_dir* to become the new reference.
"""
for f in files:
reference_file = os.path.join(ref_dir, f)
generated_file = os.path.join(work_root, f)
assert os.path.exists(generated_file)
if "GOLDEN_RUN" in os.environ:
shutil.copy(generated_file, reference_file)
with open(reference_file) as fref, open(generated_file) as fgen:
assert fref.read() == fgen.read(), f
def param_gen(paramtypes):
"""Generate dictionary of definitions in *paramtypes* list."""
defs = OrderedDict()
for paramtype in paramtypes:
for datatype in ["bool", "int", "str"]:
if datatype == "int":
default = 42
elif datatype == "str":
default = "hello"
else:
default = True
defs[paramtype + "_" + datatype] = {
"datatype": datatype,
"default": default,
"description": "",
"paramtype": paramtype,
}
return defs
def _setup_backend(
name, tool, paramtypes, files, tool_options, work_root, use_vpi, toplevel
):
"""Set up a backend.
The backend is called *name*, is set up for *tool* with *tool_options*,
*paramtypes*, and, if *use_vpi* is ``True``, definitions from :attr:`VPI`.
If *files* is None, files are taken from :attr:`FILES`.
"""
parameters = param_gen(paramtypes)
_vpi = []
if use_vpi:
_vpi = VPI
for v in VPI:
for f in v["src_files"]:
_f = os.path.join(work_root, f)
if not os.path.exists(os.path.dirname(_f)):
os.makedirs(os.path.dirname(_f))
with open(_f, "a"):
os.utime(_f, None)
edam = {
"name": name,
"files": FILES if files is None else files,
"parameters": parameters,
"tool_options": {tool: tool_options},
"toplevel": toplevel,
"vpi": _vpi,
}
return get_edatool(tool)(edam=edam, work_root=work_root)
FILES = [
{"name": "qip_file.qip", "file_type": "QIP"},
{"name": "qsys_file", "file_type": "QSYS"},
{"name": "sdc_file", "file_type": "SDC"},
{"name": "bmm_file", "file_type": "BMM"},
{"name": "sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pcf_file.pcf", "file_type": "PCF"},
{"name": "ucf_file.ucf", "file_type": "UCF"},
{"name": "user_file", "file_type": "user"},
{"name": "tcl_file.tcl", "file_type": "tclSource"},
{"name": "waiver_file.waiver", "file_type": "waiver"},
{"name": "vlog_file.v", "file_type": "verilogSource"},
{"name": "vlog05_file.v", "file_type": "verilogSource-2005"},
{"name": "vlog_incfile", "file_type": "verilogSource", "is_include_file": True},
{"name": "vhdl_file.vhd", "file_type": "vhdlSource"},
{"name": "vhdl_lfile", "file_type": "vhdlSource", "logical_name": "libx"},
{"name": "vhdl2008_file", "file_type": "vhdlSource-2008"},
{"name": "xci_file.xci", "file_type": "xci"},
{"name": "xdc_file.xdc", "file_type": "xdc"},
{"name": "bootrom.mem", "file_type": "mem"},
{"name": "c_file.c", "file_type": "cSource"},
{"name": "cpp_file.cpp", "file_type": "cppSource"},
{"name": "c_header.h", "file_type": "cSource", "is_include_file": True},
{"name": "c_header.h", "file_type": "cppSource", "is_include_file": True},
{"name": "config.vbl", "file_type": "veribleLintRules"},
{"name": "verible_waiver.vbw", "file_type": "veribleLintWaiver"},
{"name": "verible_waiver2.vbw", "file_type": "veribleLintWaiver"},
{"name": "config.sby.j2", "file_type": "sbyConfigTemplate"},
{"name": "another_sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pdc_constraint_file.pdc", "file_type": "PDC"},
{"name": "pdc_floorplan_constraint_file.pdc", "file_type": "FPPDC"},
{"name": "lpf_file.lpf", "file_type": "LPF"},
]
"""Files of all supported file types."""
VPI = [
{
"src_files": ["src/vpi_1/f1", "src/vpi_1/f3"],
"include_dirs": ["src/vpi_1/"],
"libs": ["some_lib"],
"name": "vpi1",
},
{"src_files": ["src/vpi_2/f4"], "include_dirs": [], "libs": [], "name": "vpi2"},
]
"""Predefined VPI modules to build."""
|
|
import threading
import sys
from django.contrib.sessions.models import Session
from django.template import RequestContext, TemplateDoesNotExist
from django.test import Client, TestCase
from mock import MagicMock, Mock, patch
from django_mobile import get_flavour, set_flavour
from django_mobile.conf import settings
from django_mobile.compat import get_engine
from django_mobile.middleware import MobileDetectionMiddleware, \
SetFlavourMiddleware
IS_PYTHON_3 = sys.version > '3'
def _reset():
'''
Reset the thread local.
'''
import django_mobile
del django_mobile._local
django_mobile._local = threading.local()
def str_p3_response( string ) :
"""
Since response.content is a binary string in python 3,
we decode it to make it comparable to str objects
( python 2 compatibility )
"""
if IS_PYTHON_3 :
return string.decode( 'ASCII' )
return string
class BaseTestCase(TestCase):
def setUp(self):
_reset()
def tearDown(self):
_reset()
class BasicFunctionTests(BaseTestCase):
def test_set_flavour(self):
set_flavour('full')
self.assertEqual(get_flavour(), 'full')
set_flavour('mobile')
self.assertEqual(get_flavour(), 'mobile')
self.assertRaises(ValueError, set_flavour, 'spam')
def test_set_flavour_with_cookie_backend(self):
original_FLAVOURS_STORAGE_BACKEND = settings.FLAVOURS_STORAGE_BACKEND
try:
settings.FLAVOURS_STORAGE_BACKEND = 'cookie'
response = self.client.get('/')
self.assertFalse(settings.FLAVOURS_COOKIE_KEY in response.cookies)
response = self.client.get('/', {
settings.FLAVOURS_GET_PARAMETER: 'mobile',
})
self.assertTrue(settings.FLAVOURS_COOKIE_KEY in response.cookies)
self.assertTrue(response.cookies[settings.FLAVOURS_COOKIE_KEY], u'mobile')
self.assertContains(response, 'Mobile!')
finally:
settings.FLAVOURS_STORAGE_BACKEND = original_FLAVOURS_STORAGE_BACKEND
def test_set_flavour_with_session_backend(self):
original_FLAVOURS_STORAGE_BACKEND = settings.FLAVOURS_STORAGE_BACKEND
try:
settings.FLAVOURS_STORAGE_BACKEND = 'session'
request = Mock()
request.session = {}
set_flavour('mobile', request=request)
self.assertEqual(request.session, {})
set_flavour('mobile', request=request, permanent=True)
self.assertEqual(request.session, {
settings.FLAVOURS_SESSION_KEY: u'mobile'
})
self.assertEqual(get_flavour(request), 'mobile')
response = self.client.get('/')
self.assertFalse('sessionid' in response.cookies)
response = self.client.get('/', {
settings.FLAVOURS_GET_PARAMETER: 'mobile',
})
self.assertTrue('sessionid' in response.cookies)
sessionid = response.cookies['sessionid'].value
session = Session.objects.get(session_key=sessionid)
session_data = session.get_decoded()
self.assertTrue(settings.FLAVOURS_SESSION_KEY in session_data)
self.assertEqual(session_data[settings.FLAVOURS_SESSION_KEY], 'mobile')
finally:
settings.FLAVOURS_STORAGE_BACKEND = original_FLAVOURS_STORAGE_BACKEND
class TemplateLoaderTests(BaseTestCase):
def test_load_template_on_filesystem(self):
from django.template.loaders import app_directories, filesystem
@patch.object(app_directories.Loader, 'load_template')
@patch.object(filesystem.Loader, 'load_template')
def testing(filesystem_loader, app_directories_loader):
filesystem_loader.side_effect = TemplateDoesNotExist('error')
app_directories_loader.side_effect = TemplateDoesNotExist('error')
from django_mobile.loader import Loader
loader = Loader(get_engine())
set_flavour('mobile')
try:
loader.load_template('base.html', template_dirs=None)
except TemplateDoesNotExist:
pass
self.assertEqual(filesystem_loader.call_args[0][0], 'mobile/base.html')
self.assertEqual(app_directories_loader.call_args[0][0], 'mobile/base.html')
set_flavour('full')
try:
loader.load_template('base.html', template_dirs=None)
except TemplateDoesNotExist:
pass
self.assertEqual(filesystem_loader.call_args[0][0], 'full/base.html')
self.assertEqual(app_directories_loader.call_args[0][0], 'full/base.html')
testing()
def test_load_template_source_on_filesystem(self):
from django.template.loaders import app_directories, filesystem
@patch.object(app_directories.Loader, 'load_template_source')
@patch.object(filesystem.Loader, 'load_template_source')
def testing(filesystem_loader, app_directories_loader):
filesystem_loader.side_effect = TemplateDoesNotExist('error')
app_directories_loader.side_effect = TemplateDoesNotExist('error')
from django_mobile.loader import Loader
loader = Loader(get_engine())
set_flavour('mobile')
try:
loader.load_template_source('base.html', template_dirs=None)
except TemplateDoesNotExist:
pass
self.assertEqual(filesystem_loader.call_args[0][0], 'mobile/base.html')
self.assertEqual(app_directories_loader.call_args[0][0], 'mobile/base.html')
set_flavour('full')
try:
loader.load_template_source('base.html', template_dirs=None)
except TemplateDoesNotExist:
pass
self.assertEqual(filesystem_loader.call_args[0][0], 'full/base.html')
self.assertEqual(app_directories_loader.call_args[0][0], 'full/base.html')
testing()
def test_functional(self):
from django.template.loader import render_to_string
set_flavour('full')
result = render_to_string('index.html')
result = result.strip()
self.assertEqual(result, 'Hello .')
# simulate RequestContext
result = render_to_string('index.html', context_instance=RequestContext(Mock()))
result = result.strip()
self.assertEqual(result, 'Hello full.')
set_flavour('mobile')
result = render_to_string('index.html')
result = result.strip()
self.assertEqual(result, 'Mobile!')
def test_loading_unexisting_template(self):
from django.template.loader import render_to_string
try:
render_to_string('not_existent.html')
except TemplateDoesNotExist as e:
self.assertEqual(e.args, ('not_existent.html',))
else:
self.fail('TemplateDoesNotExist was not raised.')
class MobileDetectionMiddlewareTests(BaseTestCase):
@patch('django_mobile.middleware.set_flavour')
def test_mobile_browser_agent(self, set_flavour):
request = Mock()
request.META = {
'HTTP_USER_AGENT': 'My Mobile Browser',
}
middleware = MobileDetectionMiddleware()
middleware.process_request(request)
self.assertEqual(set_flavour.call_args, (('mobile', request), {}))
@patch('django_mobile.middleware.set_flavour')
def test_desktop_browser_agent(self, set_flavour):
request = Mock()
request.META = {
'HTTP_USER_AGENT': 'My Desktop Browser',
}
middleware = MobileDetectionMiddleware()
middleware.process_request(request)
self.assertEqual(set_flavour.call_args, (('full', request), {}))
class SetFlavourMiddlewareTests(BaseTestCase):
def test_set_default_flavour(self):
request = Mock()
request.META = MagicMock()
request.GET = {}
middleware = SetFlavourMiddleware()
middleware.process_request(request)
# default flavour is set
self.assertEqual(get_flavour(), 'full')
@patch('django_mobile.middleware.set_flavour')
def test_set_flavour_through_get_parameter(self, set_flavour):
request = Mock()
request.META = MagicMock()
request.GET = {'flavour': 'mobile'}
middleware = SetFlavourMiddleware()
middleware.process_request(request)
self.assertEqual(set_flavour.call_args,
(('mobile', request), {'permanent': True}))
class RealAgentNameTests(BaseTestCase):
def assertFullFlavour(self, agent):
client = Client(HTTP_USER_AGENT=agent)
response = client.get('/')
if str_p3_response( response.content.strip() ) != 'Hello full.':
self.fail(u'Agent is matched as mobile: %s' % agent)
def assertMobileFlavour(self, agent):
client = Client(HTTP_USER_AGENT=agent)
response = client.get('/')
if str_p3_response( response.content.strip() ) != 'Mobile!':
self.fail(u'Agent is not matched as mobile: %s' % agent)
def test_ipad(self):
self.assertFullFlavour(u'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10')
def test_iphone(self):
self.assertMobileFlavour(u'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3')
def test_motorola_xoom(self):
self.assertFullFlavour(u'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13')
def test_opera_mobile_on_android(self):
'''
Regression test of issue #9
'''
self.assertMobileFlavour(u'Opera/9.80 (Android 2.3.3; Linux; Opera Mobi/ADR-1111101157; U; en) Presto/2.9.201 Version/11.50')
class RegressionTests(BaseTestCase):
def setUp(self):
self.desktop = Client()
# wap triggers mobile behaviour
self.mobile = Client(HTTP_USER_AGENT='wap')
def test_multiple_browser_access(self):
'''
Regression test of issue #2
'''
response = self.desktop.get('/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Hello full.')
response = self.mobile.get('/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Mobile!')
response = self.desktop.get('/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Hello full.')
response = self.mobile.get('/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Mobile!')
def test_cache_page_decorator(self):
response = self.mobile.get('/cached/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Mobile!')
response = self.desktop.get('/cached/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Hello full.')
response = self.mobile.get('/cached/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Mobile!')
response = self.desktop.get('/cached/')
self.assertEqual( str_p3_response( response.content.strip() ), 'Hello full.')
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import json
import os
import re
import subprocess
import sys
from distutils.dist import Distribution
from functools import wraps
from itertools import chain
from pkgutil import walk_packages
import click
from babel.messages import frontend
from babel.messages.pofile import read_po
from flask.helpers import get_root_path
from indico.util.console import cformat
@click.group()
def cli():
os.chdir(os.path.join(get_root_path('indico'), '..'))
TRANSLATIONS_DIR = 'indico/translations'
MESSAGES_POT = os.path.join(TRANSLATIONS_DIR, 'messages.pot')
MESSAGES_JS_POT = os.path.join(TRANSLATIONS_DIR, 'messages-js.pot')
MESSAGES_REACT_POT = os.path.join(TRANSLATIONS_DIR, 'messages-react.pot')
DEFAULT_OPTIONS = {
'init_catalog': {
'output_dir': TRANSLATIONS_DIR
},
'extract_messages': {
'keywords': 'N_:1,2',
'width': '120',
'output_file': MESSAGES_POT,
'mapping_file': 'babel.cfg'
},
'compile_catalog': {
'domain': 'messages',
'directory': TRANSLATIONS_DIR
},
'update_catalog': {
'input_file': MESSAGES_POT,
'output_dir': TRANSLATIONS_DIR,
'domain': 'messages'
},
# JavaScript
'init_catalog_js': {
'output_dir': TRANSLATIONS_DIR,
'domain': 'messages-js'
},
'extract_messages_js': {
'keywords': '$T gettext ngettext:1,2 pgettext:1c,2 npgettext:1c,2,3',
'width': '120',
'output_file': MESSAGES_JS_POT,
'mapping_file': 'babel-js.cfg',
'no_default_keywords': '1'
},
'update_catalog_js': {
'input_file': MESSAGES_JS_POT,
'output_dir': TRANSLATIONS_DIR,
'domain': 'messages-js'
},
# JavaScript / React
'init_catalog_react': {
'output_dir': TRANSLATIONS_DIR,
'domain': 'messages-react'
},
'update_catalog_react': {
'input_file': MESSAGES_REACT_POT,
'output_dir': TRANSLATIONS_DIR,
'domain': 'messages-react'
},
}
def find_packages(path, prefix=''):
yield prefix
prefix = prefix + '.'
for _, name, ispkg in walk_packages(path, prefix):
if ispkg:
yield name
def wrap_distutils_command(command_class):
@wraps(command_class)
def _wrapper(**kwargs):
import indico
command = command_class(Distribution({
'name': 'indico',
'version': indico.__version__,
'packages': list(find_packages(indico.__path__, indico.__name__))
}))
for key, val in kwargs.items():
setattr(command, key, val)
command.finalize_options()
command.run()
return _wrapper
def _make_command(cmd_name):
cmd_class = getattr(frontend, re.sub(r'_(js|react)$', '', cmd_name))
cmd = click.command(cmd_name.replace('_', '-'))(wrap_distutils_command(cmd_class))
for opt, short_opt, description in cmd_class.user_options:
long_opt_name = opt.rstrip('=')
var_name = long_opt_name.replace('-', '_')
opts = ['--' + long_opt_name]
if short_opt:
opts.append('-' + short_opt)
default = DEFAULT_OPTIONS.get(cmd_name, {}).get(var_name)
is_flag = not opt.endswith('=')
cmd = click.option(*(opts + [var_name]), is_flag=is_flag, default=default, help=description)(cmd)
return cmd
cmd_list = ['init_catalog', 'extract_messages', 'update_catalog', 'compile_catalog',
'init_catalog_js', 'extract_messages_js', 'update_catalog_js',
'init_catalog_react', 'update_catalog_react']
for cmd_name in cmd_list:
cli.add_command(_make_command(cmd_name))
@cli.command()
def extract_messages_react():
output = subprocess.check_output(['npx', 'react-jsx-i18n', 'extract', 'indico/web/client/', 'indico/modules/'],
env=dict(os.environ, FORCE_COLOR='1'))
with open(MESSAGES_REACT_POT, 'wb') as f:
f.write(output)
@cli.command()
def compile_catalog_react():
for locale in os.listdir(TRANSLATIONS_DIR):
po_file = os.path.join(TRANSLATIONS_DIR, locale, 'LC_MESSAGES', 'messages-react.po')
json_file = os.path.join(TRANSLATIONS_DIR, locale, 'LC_MESSAGES', 'messages-react.json')
if not os.path.exists(po_file):
continue
output = subprocess.check_output(['npx', 'react-jsx-i18n', 'compile', po_file])
json.loads(output) # just to be sure the JSON is valid
with open(json_file, 'wb') as f:
f.write(output)
@cli.command()
def check_format_strings():
"""Check whether format strings match.
This helps finding cases where e.g. the original string uses
``{error}`` but the translation uses ``{erro}``, resulting
in errors when using the translated string.
"""
root_path = 'indico/translations'
paths = set()
for root, dirs, files in os.walk(root_path):
for file in files:
if file.endswith('.po'):
paths.add(os.path.join(root, file))
all_valid = True
for path in paths:
invalid = _get_invalid_po_format_strings(path)
if invalid:
all_valid = False
click.echo(f'Found invalid format strings in {os.path.relpath(path, root_path)}')
for item in invalid:
click.echo(cformat('%{yellow}{}%{reset} | %{yellow!}{}%{reset}\n%{red}{}%{reset} != %{red!}{}%{reset}')
.format(item['orig'], item['trans'],
list(item['orig_placeholders']), list(item['trans_placeholders'])))
click.echo()
sys.exit(0 if all_valid else 1)
def _extract_placeholders(string):
return set(re.findall(r'(\{[^}]+\})', string))
def _get_invalid_po_format_strings(path):
with open(path, 'rb') as f:
po_data = read_po(f)
invalid = []
for msg in po_data:
all_orig = msg.id if isinstance(msg.id, tuple) else (msg.id,)
all_trans = msg.string if isinstance(msg.string, tuple) else (msg.string,)
if not any(all_trans): # not translated
continue
for orig, trans in zip(all_orig, all_trans):
# brace format only; python-format (%s etc) is too vague
# since there are many strings containing e.g. just `%`
# which are never used for formatting, and babel's
# `_validate_format` checker fails on those too
orig_placeholders = _extract_placeholders(orig)
# in some cases the english singular doesn't use the placeholder but rather e.g. "One".
# but depending on the language (usually with nplurals=1) the singular version MUST include
# the placeholder, so we need to consider those as well
orig_plural_placeholders = set(chain.from_iterable(map(_extract_placeholders, all_orig[1:])))
trans_placeholders = _extract_placeholders(trans)
if trans_placeholders not in (orig_placeholders, orig_plural_placeholders):
invalid.append({
'orig': orig,
'trans': trans,
'orig_placeholders': orig_placeholders,
'trans_placeholders': trans_placeholders
})
return invalid
|
|
from .utils import UsingURLPatterns
from django.conf.urls import include, url
from rest_framework import serializers
from rest_framework import status, versioning
from rest_framework.decorators import APIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.test import APIRequestFactory, APITestCase
from rest_framework.versioning import NamespaceVersioning
from rest_framework.relations import PKOnlyObject
import pytest
class RequestVersionView(APIView):
def get(self, request, *args, **kwargs):
return Response({'version': request.version})
class ReverseView(APIView):
def get(self, request, *args, **kwargs):
return Response({'url': reverse('another', request=request)})
class RequestInvalidVersionView(APIView):
def determine_version(self, request, *args, **kwargs):
scheme = self.versioning_class()
scheme.allowed_versions = ('v1', 'v2')
return (scheme.determine_version(request, *args, **kwargs), scheme)
def get(self, request, *args, **kwargs):
return Response({'version': request.version})
factory = APIRequestFactory()
def dummy_view(request):
pass
def dummy_pk_view(request, pk):
pass
class TestRequestVersion:
def test_unversioned(self):
view = RequestVersionView.as_view()
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v1.example.org')
response = view(request)
assert response.data == {'version': 'v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_accept_header_versioning(self):
scheme = versioning.AcceptHeaderVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/', HTTP_ACCEPT='*/*; version=1.2.3')
response = view(request)
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
assert response.data == {'version': None}
def test_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/1.2.3/endpoint/')
response = view(request, version='1.2.3')
assert response.data == {'version': '1.2.3'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
def test_namespace_versioning(self):
class FakeResolverMatch:
namespace = 'v1'
scheme = versioning.NamespaceVersioning
view = RequestVersionView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v1')
assert response.data == {'version': 'v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'version': None}
class TestURLReversing(UsingURLPatterns, APITestCase):
included = [
url(r'^namespaced/$', dummy_view, name='another'),
url(r'^example/(?P<pk>\d+)/$', dummy_pk_view, name='example-detail')
]
urlpatterns = [
url(r'^v1/', include(included, namespace='v1')),
url(r'^another/$', dummy_view, name='another'),
url(r'^(?P<version>[^/]+)/another/$', dummy_view, name='another'),
]
def test_reverse_unversioned(self):
view = ReverseView.as_view()
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=v1')
response = view(request)
assert response.data == {'url': 'http://testserver/another/?version=v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v1.example.org')
response = view(request)
assert response.data == {'url': 'http://v1.example.org/another/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
response = view(request, version='v1')
assert response.data == {'url': 'http://testserver/v1/another/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_namespace_versioning(self):
class FakeResolverMatch:
namespace = 'v1'
scheme = versioning.NamespaceVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v1')
assert response.data == {'url': 'http://testserver/v1/namespaced/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
class TestInvalidVersion:
def test_invalid_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = RequestInvalidVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=v3')
response = view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_invalid_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = RequestInvalidVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v3.example.org')
response = view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_invalid_accept_header_versioning(self):
scheme = versioning.AcceptHeaderVersioning
view = RequestInvalidVersionView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=v3')
response = view(request)
assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE
def test_invalid_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = RequestInvalidVersionView.as_view(versioning_class=scheme)
request = factory.get('/v3/endpoint/')
response = view(request, version='v3')
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_invalid_namespace_versioning(self):
class FakeResolverMatch:
namespace = 'v3'
scheme = versioning.NamespaceVersioning
view = RequestInvalidVersionView.as_view(versioning_class=scheme)
request = factory.get('/v3/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v3')
assert response.status_code == status.HTTP_404_NOT_FOUND
class TestHyperlinkedRelatedField(UsingURLPatterns, APITestCase):
included = [
url(r'^namespaced/(?P<pk>\d+)/$', dummy_pk_view, name='namespaced'),
]
urlpatterns = [
url(r'^v1/', include(included, namespace='v1')),
url(r'^v2/', include(included, namespace='v2'))
]
def setUp(self):
super(TestHyperlinkedRelatedField, self).setUp()
class MockQueryset(object):
def get(self, pk):
return 'object %s' % pk
self.field = serializers.HyperlinkedRelatedField(
view_name='namespaced',
queryset=MockQueryset()
)
request = factory.get('/')
request.versioning_scheme = NamespaceVersioning()
request.version = 'v1'
self.field._context = {'request': request}
def test_bug_2489(self):
assert self.field.to_internal_value('/v1/namespaced/3/') == 'object 3'
with pytest.raises(serializers.ValidationError):
self.field.to_internal_value('/v2/namespaced/3/')
class TestNamespaceVersioningHyperlinkedRelatedFieldScheme(UsingURLPatterns, APITestCase):
included = [
url(r'^namespaced/(?P<pk>\d+)/$', dummy_pk_view, name='namespaced'),
]
urlpatterns = [
url(r'^v1/', include(included, namespace='v1')),
url(r'^v2/', include(included, namespace='v2')),
url(r'^non-api/(?P<pk>\d+)/$', dummy_pk_view, name='non-api-view')
]
def _create_field(self, view_name, version):
request = factory.get("/")
request.versioning_scheme = NamespaceVersioning()
request.version = version
field = serializers.HyperlinkedRelatedField(
view_name=view_name,
read_only=True)
field._context = {'request': request}
return field
def test_api_url_is_properly_reversed_with_v1(self):
field = self._create_field('namespaced', 'v1')
assert field.to_representation(PKOnlyObject(3)) == 'http://testserver/v1/namespaced/3/'
def test_api_url_is_properly_reversed_with_v2(self):
field = self._create_field('namespaced', 'v2')
assert field.to_representation(PKOnlyObject(5)) == 'http://testserver/v2/namespaced/5/'
def test_non_api_url_is_properly_reversed_regardless_of_the_version(self):
"""
Regression test for #2711
"""
field = self._create_field('non-api-view', 'v1')
assert field.to_representation(PKOnlyObject(10)) == 'http://testserver/non-api/10/'
field = self._create_field('non-api-view', 'v2')
assert field.to_representation(PKOnlyObject(10)) == 'http://testserver/non-api/10/'
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
GNBUILD_DIR = 'gnbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir, arch).
TOOLCHAIN_PACKAGE_MAP = {
'arm_glibc': ('nacl_arm_glibc', '%(platform)s_arm_glibc', 'arm'),
'x86_glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc', 'x86'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl', 'pnacl')
}
def GetToolchainDirName(tcname):
"""Return the directory name for a given toolchain"""
return TOOLCHAIN_PACKAGE_MAP[tcname][1] % {'platform': getos.GetPlatform()}
def GetToolchainDir(pepperdir, tcname):
"""Return the full path to a given toolchain within a given sdk root"""
return os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
def GetToolchainLibc(tcname):
if tcname == 'pnacl':
return 'newlib'
for libc in ('glibc', 'newlib', 'host'):
if libc in tcname:
return libc
def GetToolchainNaClInclude(pepperdir, tcname, arch=None):
tcpath = GetToolchainDir(pepperdir, tcname)
if arch is None:
arch = TOOLCHAIN_PACKAGE_MAP[tcname][2]
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GNBUILD_DIR + '-' + arch)
def GetGnBuiltLib(tc, arch):
if 'glibc' in tc:
out_dir = 'glibc_%s' % arch
elif arch == 'pnacl':
out_dir = 'newlib_pnacl'
else:
out_dir = 'clang_newlib_%s' % arch
return os.path.join(GetNinjaOutDir('x64'), out_dir)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86', 'x64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
if arch == 'x86':
arch = 'x86-32'
elif arch == 'x64':
arch = 'x86-64'
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcdir, _ = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcdir % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tcname):
"""Copies NaCl headers to expected locations in the toolchain."""
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[GetToolchainLibc(tcname)])
def GnNinjaInstall(pepperdir, toolchains):
tools_files_x86 = [
['sel_ldr', 'sel_ldr_x86_32'],
]
tools_files_x64 = [
['sel_ldr', 'sel_ldr_x86_64'],
['ncval_new', 'ncval'],
['clang_newlib_arm/elf_loader.nexe', 'elf_loader_arm.nexe'],
['irt_x86/irt_core.nexe', 'irt_core_x86_32.nexe'],
['irt_x64/irt_core.nexe', 'irt_core_x86_64.nexe'],
]
tools_files_arm = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_x64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
if platform == 'linux':
tools_files_x86 += [['nonsfi_loader', 'nonsfi_loader_x86_32'],
['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32']]
tools_files_x64 += [['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64']]
# Add ARM trusted binaries (linux only)
if not options.no_arm_trusted:
tools_files_x64 += [
['irt_arm/irt_core.nexe', 'irt_core_arm.nexe'],
]
tools_files_arm += [
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['nonsfi_loader', 'nonsfi_loader_arm'],
['sel_ldr', 'sel_ldr_arm']
]
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_x86 + tools_files_x64:
if platform == 'win' and not os.path.splitext(pair[0])[1]:
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_x64)
if platform != 'mac':
InstallFiles(GetNinjaOutDir('x86'), tools_dir, tools_files_x86)
if platform == 'linux':
InstallFiles(GetNinjaOutDir('arm'), tools_dir, tools_files_arm)
stub_dir = os.path.join(SRC_DIR, 'ppapi/native_client/src/untrusted/irt_stub')
for tc in toolchains:
if tc in ('host', 'clang-newlib'):
continue
elif tc == 'pnacl':
xarches = ('pnacl', 'x86', 'x64', 'arm')
elif tc in ('x86_glibc'):
xarches = ('x86', 'x64')
elif tc == 'arm_glibc':
xarches = ('arm',)
else:
raise AssertionError('unexpected toolchain value: %s' % tc)
for xarch in xarches:
src_dir = GetGnBuiltLib(tc, xarch)
src_dir = os.path.join(src_dir, 'obj', 'ppapi', 'native_client', 'src',
'untrusted', 'irt_stub')
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, ['libppapi_stub.a'])
InstallFiles(stub_dir, dst_dir, ['libppapi.a'])
if 'glibc' in tc:
InstallFiles(stub_dir, dst_dir, ['libppapi.so'])
def GnNinjaBuildAll(rel_out_dir):
def MakeNinjaRelPath(suffix):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), rel_out_dir + suffix)
platform = getos.GetPlatform()
GnNinjaBuild('x64', MakeNinjaRelPath('-x64'),
['nacl_sdk_untrusted=true'])
if platform != 'mac':
GnNinjaBuild('x86', MakeNinjaRelPath('-x86'))
if platform == 'linux':
GnNinjaBuild('arm', MakeNinjaRelPath('-arm'))
def GetGNExecutable(platform):
# TODO(sbc): Remove this code, which is duplicated from mb.py and simply
# rely on the depot_tools gn wrapper which should be in the PATH.
# http://crbug.com/588794
if platform == 'linux':
subdir, exe = 'linux64', 'gn'
elif platform == 'mac':
subdir, exe = 'mac', 'gn'
else:
subdir, exe = 'win', 'gn.exe'
return os.path.join(SRC_DIR, 'buildtools', subdir, exe)
def GnNinjaBuild(arch, out_dir, extra_gn_args=None):
gn_args = ['is_debug=false']
if extra_gn_args is not None:
gn_args += extra_gn_args
platform = getos.GetPlatform()
if platform == 'mac':
if options.mac_sdk:
gn_args.append('mac_sdk_min="%s"' % options.mac_sdk)
# Without this the target_cpu='arm' build complains about missing code
# signing identity
gn_args.append('use_ios_simulator=true')
gn_exe = GetGNExecutable(platform)
if arch is not None:
gn_args.append('target_cpu="%s"' % arch)
if arch == 'arm':
if options.no_arm_trusted:
gn_args.append('enable_cross_trusted=false')
gn_args = ' '.join(gn_args)
buildbot_common.Run([gn_exe, 'gen', '--args=%s' % gn_args, out_dir],
cwd=SRC_DIR)
buildbot_common.Run(['ninja', '-C', out_dir, 'nacl_core_sdk'], cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GNBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GnNinjaBuildAll(GNBUILD_DIR)
GnNinjaInstall(pepperdir, toolchains)
for toolchain in toolchains:
if toolchain not in ('host', 'clang-newlib'):
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, toolchain),
toolchain)
if 'pnacl' in toolchains:
# NOTE: gn build all untrusted code in the x86 build
build_dir = GetNinjaOutDir('x64')
nacl_arches = ['x86', 'x64', 'arm']
for nacl_arch in nacl_arches:
shim_file = os.path.join(build_dir, 'clang_newlib_' + nacl_arch, 'obj',
'ppapi', 'native_client', 'src', 'untrusted',
'pnacl_irt_shim', 'libpnacl_irt_shim.a')
pnacldir = GetToolchainDir(pepperdir, 'pnacl')
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(shim_file, pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
dsc_toolchains = []
for t in toolchains:
if t.startswith('x86_') or t.startswith('arm_'):
if t[4:] not in dsc_toolchains:
dsc_toolchains.append(t[4:])
elif t == 'host':
dsc_toolchains.append(getos.GetPlatform())
else:
dsc_toolchains.append(t)
filters['TOOLS'] = dsc_toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=dsc_toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, args=None):
BuildStepMakeAll(pepperdir, 'src', 'Build Libraries Debug',
clean=True, config='Debug', args=args)
BuildStepMakeAll(pepperdir, 'src', 'Build Libraries Release',
clean=True, config='Release', args=args)
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gn build directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if buildbot_common.IsSDKBuilder():
options.archive = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'x86_glibc', 'arm_glibc', 'clang-newlib', 'host']
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
'usr/lib/arm-linux-gnueabihf/libstdc++.so.6'
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir)
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
|
|
import os
import jinja2
import webapp2
import re
import hashlib
import string
import random
# import user
# import post
# import comment
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
def valid_user(username):
regex = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
return username and regex.match(username)
def valid_pass(password):
regex = re.compile(r"^.{3,20}$")
return password and regex.match(password)
def valid_email(email):
regex = re.compile(r"^[\S]+@[\S]+.[\S]+$")
return not email or regex.match(email)
def make_salt():
return ''.join(random.choice(string.lowercase) for i in range(5))
def user_logged_in(self):
"""Method for checking if a user is logged in via a cookie
Takes an HTTP request and verifies the user cookie to check the user is
logged in. If a user is logged in the user data is returned.
"""
u = self.request.cookies.get('name')
try:
u = u.split('|')
if u[1] == hashlib.sha256(u[0] + 'blog').hexdigest():
user = User.get_by_id(int(u[0]))
# self.write(user)
user.logged_in = True
return user
except:
return None
class Handler(webapp2.RequestHandler):
"""
Helper class to simplify common calls to the webapp2.RequestHandler.
write() - Simplifies self.responst.out.write() to self.write()
render_str() - Simplifies calling a jinja template
render() - Calls write() on render_str() with a template and optional
parameters to render the webpage.
"""
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Post(db.Model):
title = db.StringProperty(required=True)
content = db.TextProperty(required=True)
submitter_id = db.IntegerProperty(required=True)
submitter = db.StringProperty(required=True)
likes = db.StringListProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
class User(db.Model):
name = db.StringProperty(required=True)
password = db.StringProperty(required=True)
email = db.StringProperty(required=False)
created = db.DateTimeProperty(auto_now_add=True)
class Comment(db.Model):
post_id = db.IntegerProperty(required=True)
body = db.TextProperty(required=True)
submitter_id = db.IntegerProperty(required=True)
submitter = db.StringProperty(required=False)
created = db.DateTimeProperty(auto_now_add=True)
class MainPage(Handler):
"""
Class to handle rendering the blog's main page.
"""
def get(self):
posts = db.GqlQuery("SELECT * FROM Post "
"ORDER BY created DESC LIMIT 10")
user = user_logged_in(self)
self.render("posts.html",
posts=posts,
user=user)
class NewEntry(Handler):
"""
Class to handle the page for displaying a newly created post from the user.
"""
def get(self, post_id):
try:
post = Post.get_by_id(int(post_id))
user = user_logged_in(self)
user_id = None
if user:
user_id = user.key().id()
comments = {}
comments = db.GqlQuery("SELECT * FROM Comment "
"WHERE post_id = :id ORDER BY created DESC",
id=int(post_id))
self.render("postpage.html",
user=user,
user_id=user_id,
post=post,
comments=comments)
except:
self.redirect("/")
def post(self, post_id):
post = Post.get_by_id(int(post_id))
body = self.request.get("body")
try:
user = user_logged_in(self)
user_id = user.key().id()
comment = Comment(post_id=int(post_id),
body=body,
submitter_id=user_id,
submitter=user.name)
comment.put()
self.redirect("/" + str(post_id))
except:
self.redirect("/login")
class NewUser(Handler):
"""Handles the User signup page functions.
Contains a GET request function that renders a signup form.
Contains a POST request to submit and validate the user signup information.
Validates a valid username, password, and email. Stores the user into the
database along with encrypted login information.
Attributes:
user: User information structure
signup_error: Dictionary of errors that can occur during signup.
"""
def get(self):
user = user_logged_in(self)
if not user:
self.render("signup.html", user={})
else:
self.render("/")
def post(self):
# Initialize and fetch data from signup form
signup_error = False
params = {}
email_valid = True
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
# Verify a valid username, email, password, and matching verfication.
if not valid_email(email):
signup_error = True
params['email_error'] = "Invalid email"
else:
params['email'] = email
if not valid_user(username):
signup_error = True
params['user_error'] = "Invalid username"
else:
# Handles checking if a username already exists.
# TODO: Should probably make this a function to clean up the code.
u = db.GqlQuery("Select * FROM User WHERE name = :n", n=username)
out = []
for x in u:
out.append(x.name)
if len(out) > 0:
signup_error = True
params['user_error'] = "User already Exists"
else:
params['username'] = username
if not valid_pass(password):
signup_error = True
params['pass_error'] = "Invalid password"
if password != verify:
signup_error = True
params['match_error'] = "Passwords do no match"
# If the signup is valid we create the user in the database.
if signup_error:
self.render("signup.html", user={}, **params)
else:
salt = make_salt()
h = hashlib.sha256(username + password + salt).hexdigest()
h = '%s|%s' % (h, salt)
u = User(name=username, password=h, email=email)
u.put()
user_id = u.key().id()
cookie = (str(user_id) +
'|' +
hashlib.sha256(str(user_id) + 'blog').hexdigest())
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie',
'name=%s; Path=/' % cookie)
self.redirect("/welcome")
class LoginPage(Handler):
"""
Class that handles creating and submitting the login page and information.
The login information is then added to a cookie with enctrypted info.
"""
def get(self):
try:
user = user_logged_in(self)
if user.logged_in:
self.redirect("/")
except:
self.render('login.html', user=user)
def post(self):
username = self.request.get("username")
password = self.request.get("password")
u = db.GqlQuery("SELECT * FROM User WHERE name=:n", n=username).get()
if u:
uid = u.key().id()
salt = u.password.split('|')[1]
h = hashlib.sha256(username + password + salt).hexdigest()
if username == u.name and h == u.password.split('|')[0]:
cookie = (str(uid) +
'|' +
hashlib.sha256(str(uid) + 'blog').hexdigest())
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie',
'name=%s; Path=/' % cookie)
self.redirect('/welcome')
error = "Could not login with Username and password"
self.render('login.html', user={}, user_error=error)
class LogoutPage(Handler):
"""
Class that handles user logout.
"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie',
'name=''; Path=/')
self.redirect('/signup')
class UserPage(Handler):
"""
Class that handles rendering a page welcoming a user at login.
"""
def get(self):
user = user_logged_in(self)
if user:
self.render("userpage.html", user=user)
else:
self.redirect('/login')
class PostPage(Handler):
"""
Class that handles rendering the form to create a post and submits a post
to the database.
"""
def get(self):
user = user_logged_in(self)
if user:
self.render("form.html", user=user)
else:
self.redirect('/login')
def post(self):
title = self.request.get("subject")
content = self.request.get("content")
user = user_logged_in(self)
if not user.logged_in:
self.redirect('/login')
if title and content:
p = Post(title=title,
content=content,
submitter_id=user.key().id(),
submitter=user.name,
likes=[])
p.put()
post_id = p.key().id()
self.redirect('/' + str(post_id))
else:
error = "Enter a title and content!"
self.render("form.html", error=error)
class EditPost(Handler):
"""
Class that handles rendering a page to edit a user's post and submits the
updated page to the database.
"""
def get(self, post_id):
logged_in = False
user = user_logged_in(self)
if user:
p = Post.get_by_id(int(post_id))
self.render("editpost.html", user=user, post=p)
else:
self.redirect("/login")
def post(self, post_id):
logged_in = False
user = user_logged_in(self)
p = Post.get_by_id(int(post_id))
if user and user.key().id() == p.submitter_id:
p.title = self.request.get("subject")
p.content = self.request.get("content")
p.put()
self.redirect("/")
else:
self.redirect("/login")
class DeletePost(Handler):
"""
Class that handles the request to delete a post and remove it from the
database.
"""
def get(self, post_id):
try:
user = user_logged_in(self)
p = Post.get_by_id(int(post_id))
if user.key().id() == p.submitter_id:
p.delete()
except:
self.redirect("/")
class LikePage(Handler):
"""
Class that handles the request to like a different user's post.
"""
def get(self, post_id):
user = user_logged_in(self)
p = Post.get_by_id(int(post_id))
if user and user.key().id() != p.submitter_id:
if user.name not in p.likes:
p.likes.append(user.name)
p.put()
self.redirect("/")
else:
self.redirect("/login")
class EditComment(Handler):
"""
Class that handles the request to like a user's comment on a post.
"""
def get(self, comment_id):
try:
user = user_logged_in(self)
user_id = user.key().id()
comment = Comment.get_by_id(int(comment_id))
post = Post.get_by_id(comment.post_id)
self.render("editcomment.html",
user=user,
user_id=user_id,
post=post,
comment=comment)
except:
self.redirect("/login")
def post(self, comment_id):
try:
user = user_logged_in(self)
comment = Comment.get_by_id(int(comment_id))
if user and user.key().id() == comment.submitter_id:
comment.body = self.request.get("body")
comment.put()
self.redirect("/" + str(comment.post_id))
except:
self.redirect("/login")
class DeleteComment(Handler):
"""
Class that handles the request to delete a comment on a post.
"""
def get(self, comment_id):
try:
user = user_logged_in(self)
comment = Comment.get_by_id(int(comment_id))
if user.logged_in and user.key().id() == comment.submitter_id:
comment.delete()
self.redirect("/" + str(comment.post_id))
except:
self.redirect("/login")
app = webapp2.WSGIApplication([('/', MainPage),
('/newpost', PostPage),
('/editpost/(\d+)', EditPost),
('/deletepost/(\d+)', DeletePost),
('/signup', NewUser),
('/login', LoginPage),
('/logout', LogoutPage),
('/welcome', UserPage),
('/like/(\d+)', LikePage),
('/editcomment/(\d+)', EditComment),
('/deletecomment/(\d+)', DeleteComment),
('/(\d+)', NewEntry)],
debug=True)
|
|
import logging
import os
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seaserv import ccnet_api, seafile_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.permissions import IsProVersion
from seahub.api2.utils import api_error
from seahub.base.templatetags.seahub_tags import email2nickname, email2contact_email
from seahub.utils import get_file_audit_events, generate_file_audit_event_type, \
get_file_update_events, get_perm_audit_events, is_valid_email
from seahub.utils.timeutils import datetime_to_isoformat_timestr, utc_datetime_to_isoformat_timestr
from seahub.utils.repo import is_valid_repo_id_format
logger = logging.getLogger(__name__)
class AdminLogsLoginLogs(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def get(self, request):
""" Get all login logs.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_view_user_log():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
start = (current_page - 1) * per_page
end = start + per_page
from seahub_extra.sysadmin_extra.models import UserLoginLog
logs = UserLoginLog.objects.all().order_by('-login_date')[start:end]
count = UserLoginLog.objects.all().count()
# Use dict to reduce memcache fetch cost in large for-loop.
nickname_dict = {}
contact_email_dict = {}
user_email_set = set([log.username for log in logs])
for e in user_email_set:
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
logs_info = []
for log in logs:
data = {}
data['login_time'] = datetime_to_isoformat_timestr(log.login_date)
data['login_ip'] = log.login_ip
data['log_success'] = log.login_success
user_email = log.username
data['name'] = nickname_dict.get(user_email, '')
data['email'] = user_email
data['contact_email'] = contact_email_dict.get(user_email, '')
logs_info.append(data)
resp = {
'login_log_list': logs_info,
'total_count': count,
}
return Response(resp)
class AdminLogsFileAccessLogs(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def get(self, request):
""" Get all file access logs.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_view_user_log():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
user_selected = request.GET.get('email', None)
if user_selected and not is_valid_email(user_selected):
error_msg = 'email %s invalid.' % user_selected
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id_selected = request.GET.get('repo_id', None)
if repo_id_selected and not is_valid_repo_id_format(repo_id_selected):
error_msg = 'repo_id %s invalid.' % repo_id_selected
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
start = per_page * (current_page - 1)
limit = per_page + 1
if user_selected:
org_id = -1
orgs = ccnet_api.get_orgs_by_user(user_selected)
if orgs:
org_id = orgs[0].org_id
elif repo_id_selected:
org_id = seafile_api.get_org_id_by_repo_id(repo_id_selected)
else:
org_id = 0
# org_id = 0, show all file audit
events = get_file_audit_events(user_selected, org_id, repo_id_selected, start, limit) or []
if len(events) > per_page:
events = events[:per_page]
has_next_page = True
else:
has_next_page = False
# Use dict to reduce memcache fetch cost in large for-loop.
nickname_dict = {}
contact_email_dict = {}
repo_dict = {}
user_email_set = set()
repo_id_set = set()
for event in events:
user_email_set.add(event.user)
repo_id_set.add(event.repo_id)
for e in user_email_set:
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
for e in repo_id_set:
if e not in repo_dict:
repo_dict[e] = seafile_api.get_repo(e)
events_info = []
for ev in events:
data = {}
user_email = ev.user
data['email'] = user_email
data['name'] = nickname_dict.get(user_email, '')
data['contact_email'] = contact_email_dict.get(user_email, '')
data['ip'] = ev.ip
data['event_type'], data['device'] = generate_file_audit_event_type(ev)
data['time'] = utc_datetime_to_isoformat_timestr(ev.timestamp)
repo_id = ev.repo_id
data['repo_id'] = repo_id
repo = repo_dict.get(repo_id, None)
data['repo_name'] = repo.name if repo else ''
if ev.file_path.endswith('/'):
data['file_or_dir_name'] = '/' if ev.file_path == '/' else os.path.basename(ev.file_path.rstrip('/'))
else:
data['file_or_dir_name'] = os.path.basename(ev.file_path)
events_info.append(data)
resp = {
'file_access_log_list': events_info,
'has_next_page': has_next_page,
}
return Response(resp)
class AdminLogsFileUpdateLogs(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def get(self, request):
""" Get all file update logs.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_view_user_log():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
user_selected = request.GET.get('email', None)
if user_selected and not is_valid_email(user_selected):
error_msg = 'email %s invalid.' % user_selected
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id_selected = request.GET.get('repo_id', None)
if repo_id_selected and not is_valid_repo_id_format(repo_id_selected):
error_msg = 'repo_id %s invalid.' % repo_id_selected
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
start = per_page * (current_page - 1)
limit = per_page
# org_id = 0, show all file audit
events = get_file_update_events(user_selected, 0, repo_id_selected, start, limit) or []
has_next_page = True if len(events) == per_page else False
# Use dict to reduce memcache fetch cost in large for-loop.
nickname_dict = {}
contact_email_dict = {}
repo_dict = {}
user_email_set = set()
repo_id_set = set()
for event in events:
user_email_set.add(event.user)
repo_id_set.add(event.repo_id)
for e in user_email_set:
if e not in nickname_dict:
nickname_dict[e] = email2nickname(e)
if e not in contact_email_dict:
contact_email_dict[e] = email2contact_email(e)
for e in repo_id_set:
if e not in repo_dict:
repo_dict[e] = seafile_api.get_repo(e)
events_info = []
for ev in events:
data = {}
user_email = ev.user
data['email'] = user_email
data['name'] = nickname_dict.get(user_email, '')
data['contact_email'] = contact_email_dict.get(user_email, '')
data['time'] = utc_datetime_to_isoformat_timestr(ev.timestamp)
repo_id = ev.repo_id
data['repo_id'] = repo_id
repo = repo_dict.get(repo_id, None)
data['repo_name'] = repo.name if repo else ''
data['repo_encrypted'] = repo.encrypted if repo else None
data['file_operation'] = ev.file_oper
data['commit_id'] = ev.commit_id
events_info.append(data)
resp = {
'file_update_log_list': events_info,
'has_next_page': has_next_page,
}
return Response(resp)
class AdminLogsSharePermissionLogs(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def get(self, request):
""" Get all share permissions logs.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_view_user_log():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
user_selected = request.GET.get('email', None)
if user_selected and not is_valid_email(user_selected):
error_msg = 'email %s invalid.' % user_selected
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo_id_selected = request.GET.get('repo_id', None)
if repo_id_selected and not is_valid_repo_id_format(repo_id_selected):
error_msg = 'repo_id %s invalid.' % repo_id_selected
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
start = per_page * (current_page - 1)
limit = per_page
# org_id = 0, show all file audit
events = get_perm_audit_events(user_selected, 0, repo_id_selected, start, limit) or []
has_next_page = True if len(events) == per_page else False
# Use dict to reduce memcache fetch cost in large for-loop.
from_nickname_dict = {}
from_contact_email_dict = {}
to_nickname_dict = {}
to_contact_email_dict = {}
repo_dict = {}
to_group_name_dict = {}
from_user_email_set = set()
to_user_email_set = set()
repo_id_set = set()
to_group_id_set = set()
department_set = set()
for event in events:
from_user_email_set.add(event.from_user)
repo_id_set.add(event.repo_id)
if is_valid_email(event.to):
to_user_email_set.add(event.to)
if event.to.isdigit():
to_group_id_set.add(event.to)
for e in from_user_email_set:
if e not in from_nickname_dict:
from_nickname_dict[e] = email2nickname(e)
if e not in from_contact_email_dict:
from_contact_email_dict[e] = email2contact_email(e)
for e in to_user_email_set:
if e not in to_nickname_dict:
to_nickname_dict[e] = email2nickname(e)
if e not in to_contact_email_dict:
to_contact_email_dict[e] = email2contact_email(e)
for e in repo_id_set:
if e not in repo_dict:
repo_dict[e] = seafile_api.get_repo(e)
for group_id in to_group_id_set:
if group_id not in to_group_name_dict:
group = ccnet_api.get_group(int(group_id))
to_group_name_dict[group_id] = group.group_name
if group.parent_group_id != 0:
department_set.add(group_id)
events_info = []
for ev in events:
data = {}
from_user_email = ev.from_user
data['from_user_email'] = from_user_email
data['from_user_name'] = from_nickname_dict.get(from_user_email, '')
data['from_user_contact_email'] = from_contact_email_dict.get(from_user_email, '')
data['etype'] = ev.etype
data['permission'] = ev.permission
repo_id = ev.repo_id
data['repo_id'] = repo_id
repo = repo_dict.get(repo_id, None)
data['repo_name'] = repo.name if repo else ''
data['folder'] = '/' if ev.file_path == '/' else os.path.basename(ev.file_path.rstrip('/'))
data['date'] = utc_datetime_to_isoformat_timestr(ev.timestamp)
data['share_type'] = 'all'
data['to_user_email'] = ''
data['to_user_name'] = ''
data['to_user_contact_email'] = ''
data['to_group_id'] = ''
data['to_group_name'] = ''
if is_valid_email(ev.to):
to_user_email = ev.to
data['to_user_email'] = to_user_email
data['to_user_name'] = to_nickname_dict.get(to_user_email, '')
data['to_user_contact_email'] = to_contact_email_dict.get(to_user_email, '')
data['share_type'] = 'user'
if ev.to.isdigit():
to_group_id = ev.to
data['to_group_id'] = to_group_id
data['to_group_name'] = to_group_name_dict.get(to_group_id, '')
if to_group_id in department_set:
data['share_type'] = 'department'
else:
data['share_type'] = 'group'
events_info.append(data)
resp = {
'share_permission_log_list': events_info,
'has_next_page': has_next_page,
}
return Response(resp)
|
|
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos_lag_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.vyos.facts.facts import Facts
from ansible.module_utils.network.common.utils import to_list, dict_diff
from ansible.module_utils.six import iteritems
from ansible.module_utils.network. \
vyos.utils.utils import search_obj_in_list, \
get_lst_diff_for_dicts, list_diff_want_only, list_diff_have_only
class Lag_interfaces(ConfigBase):
"""
The vyos_lag_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lag_interfaces',
]
params = ['arp_monitor', 'hash_policy', 'members', 'mode', 'name', 'primary']
def __init__(self, module):
super(Lag_interfaces, self).__init__(module)
def get_lag_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset,
self.gather_network_resources)
lag_interfaces_facts = facts['ansible_network_resources'].get('lag_interfaces')
if not lag_interfaces_facts:
return []
return lag_interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
commands = list()
warnings = list()
existing_lag_interfaces_facts = self.get_lag_interfaces_facts()
commands.extend(self.set_config(existing_lag_interfaces_facts))
if commands:
if self._module.check_mode:
resp = self._connection.edit_config(commands, commit=False)
else:
resp = self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
if self._module._diff:
result['diff'] = resp['diff'] if result['changed'] else None
changed_lag_interfaces_facts = self.get_lag_interfaces_facts()
result['before'] = existing_lag_interfaces_facts
if result['changed']:
result['after'] = changed_lag_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lag_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_lag_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
state = self._module.params['state']
if state in ('merged', 'replaced', 'overridden') and not want:
self._module.fail_json(msg='value of config parameter must not be empty for state {0}'.format(state))
if state == 'overridden':
commands.extend(self._state_overridden(want, have))
elif state == 'deleted':
if want:
for want_item in want:
name = want_item['name']
obj_in_have = search_obj_in_list(name, have)
commands.extend(self._state_deleted(obj_in_have))
else:
for have_item in have:
commands.extend(self._state_deleted(have_item))
else:
for want_item in want:
name = want_item['name']
obj_in_have = search_obj_in_list(name, have)
if state == 'merged':
commands.extend(self._state_merged(want_item, obj_in_have))
elif state == 'replaced':
commands.extend(self._state_replaced(want_item, obj_in_have))
return commands
def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
if have:
commands.extend(self._render_del_commands(want, have))
commands.extend(self._state_merged(want, have))
return commands
def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for have_item in have:
lag_name = have_item['name']
obj_in_want = search_obj_in_list(lag_name, want)
if not obj_in_want:
commands.extend(self._purge_attribs(have_item))
for want_item in want:
name = want_item['name']
obj_in_have = search_obj_in_list(name, have)
commands.extend(self._state_replaced(want_item, obj_in_have))
return commands
def _state_merged(self, want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
if have:
commands.extend(self._render_updates(want, have))
else:
commands.extend(self._render_set_commands(want))
return commands
def _state_deleted(self, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
if have:
commands.extend(self._purge_attribs(have))
return commands
def _render_updates(self, want, have):
commands = []
temp_have_members = have.pop('members', None)
temp_want_members = want.pop('members', None)
updates = dict_diff(have, want)
if temp_have_members:
have['members'] = temp_have_members
if temp_want_members:
want['members'] = temp_want_members
commands.extend(self._add_bond_members(want, have))
if updates:
for key, value in iteritems(updates):
if value:
if key == 'arp_monitor':
commands.extend(
self._add_arp_monitor(updates, key, want, have)
)
else:
commands.append(self._compute_command(have['name'], key, str(value)))
return commands
def _render_set_commands(self, want):
commands = []
have = []
params = Lag_interfaces.params
for attrib in params:
value = want[attrib]
if value:
if attrib == 'arp_monitor':
commands.extend(
self._add_arp_monitor(want, attrib, want, have)
)
elif attrib == 'members':
commands.extend(
self._add_bond_members(want, have)
)
elif attrib != 'name':
commands.append(
self._compute_command(want['name'], attrib, value=str(value))
)
return commands
def _purge_attribs(self, have):
commands = []
for item in Lag_interfaces.params:
if have.get(item):
if item == 'members':
commands.extend(
self._delete_bond_members(have)
)
elif item != 'name':
commands.append(
self._compute_command(have['name'], attrib=item, remove=True)
)
return commands
def _render_del_commands(self, want, have):
commands = []
params = Lag_interfaces.params
for attrib in params:
if attrib == 'members':
commands.extend(
self._update_bond_members(attrib, want, have)
)
elif attrib == 'arp_monitor':
commands.extend(
self._update_arp_monitor(attrib, want, have)
)
elif have.get(attrib) and not want.get(attrib):
commands.append(
self._compute_command(have['name'], attrib, remove=True)
)
return commands
def _add_bond_members(self, want, have):
commands = []
diff_members = get_lst_diff_for_dicts(want, have, 'members')
if diff_members:
for key in diff_members:
commands.append(
self._compute_command(key['member'], 'bond-group', want['name'], type='ethernet')
)
return commands
def _add_arp_monitor(self, updates, key, want, have):
commands = []
arp_monitor = updates.get(key) or {}
diff_targets = self._get_arp_monitor_target_diff(want, have, key, 'target')
if 'interval' in arp_monitor:
commands.append(
self._compute_command(
key=want['name'] + ' arp-monitor', attrib='interval', value=str(arp_monitor['interval'])
)
)
if diff_targets:
for target in diff_targets:
commands.append(
self._compute_command(key=want['name'] + ' arp-monitor', attrib='target', value=target)
)
return commands
def _delete_bond_members(self, have):
commands = []
for member in have['members']:
commands.append(
self._compute_command(
member['member'], 'bond-group', have['name'], remove=True, type='ethernet'
)
)
return commands
def _update_arp_monitor(self, key, want, have):
commands = []
want_arp_target = []
have_arp_target = []
want_arp_monitor = want.get(key) or {}
have_arp_monitor = have.get(key) or {}
if want_arp_monitor and 'target' in want_arp_monitor:
want_arp_target = want_arp_monitor['target']
if have_arp_monitor and 'target' in have_arp_monitor:
have_arp_target = have_arp_monitor['target']
if 'interval' in have_arp_monitor and not want_arp_monitor:
commands.append(
self._compute_command(
key=have['name'] + ' arp-monitor', attrib='interval', remove=True
)
)
if 'target' in have_arp_monitor:
target_diff = list_diff_have_only(want_arp_target, have_arp_target)
if target_diff:
for target in target_diff:
commands.append(
self._compute_command(
key=have['name'] + ' arp-monitor', attrib='target', value=target, remove=True
)
)
return commands
def _update_bond_members(self, key, want, have):
commands = []
want_members = want.get(key) or []
have_members = have.get(key) or []
members_diff = list_diff_have_only(want_members, have_members)
if members_diff:
for member in members_diff:
commands.append(
self._compute_command(
member['member'], 'bond-group', have['name'], True, 'ethernet'
)
)
return commands
def _get_arp_monitor_target_diff(self, want_list, have_list, dict_name, lst):
want_arp_target = []
have_arp_target = []
want_arp_monitor = want_list.get(dict_name) or {}
if want_arp_monitor and lst in want_arp_monitor:
want_arp_target = want_arp_monitor[lst]
if not have_list:
diff = want_arp_target
else:
have_arp_monitor = have_list.get(dict_name) or {}
if have_arp_monitor and lst in have_arp_monitor:
have_arp_target = have_arp_monitor[lst]
diff = list_diff_want_only(want_arp_target, have_arp_target)
return diff
def _compute_command(self, key, attrib, value=None, remove=False, type='bonding'):
if remove:
cmd = 'delete interfaces ' + type
else:
cmd = 'set interfaces ' + type
cmd += (' ' + key)
if attrib == 'arp_monitor':
attrib = 'arp-monitor'
elif attrib == 'hash_policy':
attrib = 'hash-policy'
cmd += (' ' + attrib)
if value:
cmd += (" '" + value + "'")
return cmd
|
|
"""Based on a Python Cookbook entry.
Title: Decorator for BindingConstants at compile time
Submitter: Raymond Hettinger
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/277940
Example uses:
Importing
=========
from schevo.lib import optimize
Module optimization
===================
import sys
optimize.bind_all(sys.modules[__name__]) # Last line of module.
Class optimization
==================
class Foo(object): pass
optimize.bind_all(Foo)
Function decorator
==================
@optimize.make_constants()
def foo():
pass
Recursive functions
===================
Because of the way decorators work, recursive functions should use the
following method of optimization.
def foo():
...
foo()
foo = optimize._make_constants(foo)
"""
# Determine whether or not to optimize based on environment variable.
import os
if os.environ.get('SCHEVO_OPTIMIZE', '1') == '1':
OPTIMIZE = True
else:
OPTIMIZE = False
from types import FunctionType, ClassType
from opcode import opmap, HAVE_ARGUMENT, EXTENDED_ARG
if OPTIMIZE:
globals().update(opmap)
def _make_constants(f, builtin_only=False, stoplist=[], verbose=False):
try:
co = f.func_code
except AttributeError:
return f # Jython doesn't have a func_code attribute.
newcode = map(ord, co.co_code)
newconsts = list(co.co_consts)
names = co.co_names
codelen = len(newcode)
import __builtin__
env = vars(__builtin__).copy()
if builtin_only:
stoplist = dict.fromkeys(stoplist)
stoplist.update(f.func_globals)
else:
env.update(f.func_globals)
# First pass converts global lookups into constants
i = 0
while i < codelen:
opcode = newcode[i]
if opcode in (EXTENDED_ARG, STORE_GLOBAL):
return f # for simplicity, only optimize common cases
if opcode == LOAD_GLOBAL:
oparg = newcode[i+1] + (newcode[i+2] << 8)
name = co.co_names[oparg]
if name in env and name not in stoplist:
value = env[name]
for pos, v in enumerate(newconsts):
if v is value:
break
else:
pos = len(newconsts)
newconsts.append(value)
newcode[i] = LOAD_CONST
newcode[i+1] = pos & 0xFF
newcode[i+2] = pos >> 8
if verbose:
print name, '-->', value
i += 1
if opcode >= HAVE_ARGUMENT:
i += 2
# Second pass folds tuples of constants and constant attribute lookups
i = 0
while i < codelen:
newtuple = []
while newcode[i] == LOAD_CONST:
oparg = newcode[i+1] + (newcode[i+2] << 8)
newtuple.append(newconsts[oparg])
i += 3
opcode = newcode[i]
if not newtuple:
i += 1
if opcode >= HAVE_ARGUMENT:
i += 2
continue
if opcode == LOAD_ATTR:
obj = newtuple[-1]
oparg = newcode[i+1] + (newcode[i+2] << 8)
name = names[oparg]
try:
value = getattr(obj, name)
except AttributeError:
continue
deletions = 1
elif opcode == BUILD_TUPLE:
oparg = newcode[i+1] + (newcode[i+2] << 8)
if oparg != len(newtuple):
continue
deletions = len(newtuple)
value = tuple(newtuple)
else:
continue
reljump = deletions * 3
newcode[i-reljump] = JUMP_FORWARD
newcode[i-reljump+1] = (reljump-3) & 0xFF
newcode[i-reljump+2] = (reljump-3) >> 8
n = len(newconsts)
newconsts.append(value)
newcode[i] = LOAD_CONST
newcode[i+1] = n & 0xFF
newcode[i+2] = n >> 8
i += 3
if verbose:
print "new folded constant:", value
codestr = ''.join(map(chr, newcode))
codeobj = type(co)(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, codestr, tuple(newconsts), co.co_names,
co.co_varnames, co.co_filename, co.co_name,
co.co_firstlineno, co.co_lnotab, co.co_freevars,
co.co_cellvars)
return type(f)(codeobj, f.func_globals, f.func_name, f.func_defaults,
f.func_closure)
if OPTIMIZE:
_make_constants = _make_constants(_make_constants) # optimize thyself!
else:
def _make_constants(f, builtin_only=False, stoplist=[], verbose=False):
return f
def bind_all(mc, builtin_only=False, stoplist=[], verbose=False):
"""Recursively apply constant binding to functions in a module or class.
Use as the last line of the module (after everything is defined, but
before test code). In modules that need modifiable globals, set
builtin_only to True.
"""
try:
d = vars(mc)
except TypeError:
return
for k, v in d.items():
if k in stoplist:
pass
elif hasattr(v, '__do_not_optimize__') and v.__do_not_optimize__:
pass
elif type(v) is FunctionType:
newv = _make_constants(v, builtin_only, stoplist, verbose)
setattr(mc, k, newv)
elif type(v) in (type, ClassType):
bind_all(v, builtin_only, stoplist, verbose)
if not OPTIMIZE:
def bind_all(mc, builtin_only=False, stoplist=[], verbose=False):
pass
@_make_constants
def make_constants(builtin_only=False, stoplist=[], verbose=False):
""" Return a decorator for optimizing global references.
Replaces global references with their currently defined values.
If not defined, the dynamic (runtime) global lookup is left undisturbed.
If builtin_only is True, then only builtins are optimized.
Variable names in the stoplist are also left undisturbed.
Also, folds constant attr lookups and tuples of constants.
If verbose is True, prints each substitution as is occurs
"""
if type(builtin_only) == type(make_constants):
raise ValueError("The bind_constants decorator must have arguments.")
return lambda f: _make_constants(f, builtin_only, stoplist, verbose)
class OptimizingMetaclass(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
bind_all(cls)
def build_optimizing_metaclass(builtin_only=False, stoplist=[], verbose=False):
class _OptimizingMetaclass(type):
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
bind_all(cls, builtin_only, stoplist, verbose)
return _OptimizingMetaclass
if not OPTIMIZE:
class OptimizingMetaclass(type):
pass
def build_optimizing_metaclass(builtin_only=False, stoplist=[],
verbose=False):
return OptimizingMetaclass
def do_not_optimize(fn):
"""Decorates a function as __do_not_optimize__."""
fn.__do_not_optimize__ = True
return fn
|
|
# coding: utf-8
"""
THIS SOFTWARE IS LICENSED UNDER THE BSD LICENSE CONDITIONS.
FOR LICENCE DETAILS SEE share/LICENSE.TXT
(c) 2005-2009, Marco Hoehle <[email protected]>
(c) 2010, Hanspeter Spalinger <[email protected]>
Ldap Plugin for ud2 Client
This class provides the admin functionailty.
This file should NOT be distributed with the default ud2client, as it contains structural information about the backend.
"""
import UniDomain.plugins.ldapdb as ldapdb
import UniDomain.Classes as Classes
import ldap
import ldap.sasl
import os
import base64
import logging
class Author(Classes.Author):
"""Ldap Authorization Class"""
def authorize(self, user):
"""Check this users authorization."""
db = ldap_dbb(self.config, user)
if not db.get_ldapconn():
logging.error('Authorization to ldap-server failed.')
return False
self.db = db
return db
class ldap_dbb(ldapdb.ldap_dbb):
"""Ldap database backend class for admins. some special methods in here we dont want usual domads see."""
def update_dnsSOA(self, host):
"""housekeeping job shall update DNS SOA (time) record regulary """
res = self.conn.search(self.config.ldapbase, ldap.SCOPE_SUBTREE, "(&(objectClass=dnsZone)(relativeDomainName=@))", ["SOARecord"])
res = self.conn.result(res)[1]
if len(res) > 1:
logging.warning('Warning: Multiple SOA records found! Using %s', res[0][0])
SOAdn, SOArecord = res[0]
SOArecord = SOArecord["SOARecord"][0]
nowstr = self.nowstr()
if SOArecord and nowstr:
SOArecord = SOArecord.split()
SOArecord[2] = nowstr
newSOA = " "
newSOA = newSOA.join(SOArecord)
logging.info("DNS replace \'%s\' SOARecord with : %s ", SOAdn, newSOA )
mod_attr = [( ldap.MOD_REPLACE, 'SOARecord', newSOA )]
return self.conn.result(self.conn.modify(SOAdn, mod_attr))
else:
return False
def init_domain(self, domain):
"""initialise a new domain.
domain shall be the domains name
implementation detail. Domads actualy can create domains....inside their own domain. They should not work since we do not support nested domains (?)"""
if self.get_itemID(domain):
logging.warning('Domain already exists! Nothing changed.')
return False
logging.debug('intialising new domain %s with default values: ', domain)
master_dn = 'ou=%s,%s' % (domain, self.config.ldapbase)
master_domain = [
('objectClass', ['top', 'udDomain']),
('description', ['this is your base domain container']),
('udGroup', ['defaults']) ]
server_dn = "ou=%s,%s" % ('server', master_dn)
server = [
('objectClass', ['top', 'udHostContainer']),
('description', 'all servers go here'),
('udGroup', 'defaults') ]
DMZ_dn = "ou=%s,%s" % ('DMZ', server_dn)
DMZ = [
('objectClass', ['top', 'udHostContainer']),
('description', 'DMZ hosts may have special security and compliance guidelines'),
('policyClass', ['DMZ']),
('udGroup', 'defaults') ]
internal_dn = "ou=%s,%s" % ('intern', server_dn)
internal = [
('objectClass', ['top', 'udHostContainer']),
('description', 'internal hosts.'),
('policyClass', ['DMZ']),
('udGroup', 'defaults') ]
workstation_dn = "ou=%s,%s" % ('workstation', master_dn)
workstation = [
('objectClass', ['top', 'udHostContainer']),
('description', 'all workstations and desktops below this ou'),
('udGroup', 'domainDefault') ]
settings_dn = "cn=%s,%s" % ('settings', master_dn)
settings = [('objectClass', ['top'])]
classes_dn = "cn=%s,%s" % ('classes', settings_dn)
classes = [('objectClass', ['top'])]
defaults_dn = "cn=%s,%s" % ('defaults', classes_dn)
defaults = [
('objectClass', ['top', 'udGroup']),
('description', 'Domain defaults per URZ.'),
('uid', ['hoehle','gschwina','sindling']),
('unixGroup', ['urzwheel', 'urz']),
('policyClass', ['managed', 'intern', 'kerberos', 'UD.UNIBAS.CH']) ]
res = [
self.conn.add(master_dn, master_domain),
self.conn.add(server_dn, server),
self.conn.add(DMZ_dn, DMZ),
self.conn.add(internal_dn, internal),
self.conn.add(workstation_dn, workstation),
self.conn.add(settings_dn, settings),
self.conn.add(classes_dn, classes),
self.conn.add(defaults_dn, defaults)]
# wait for all writes to finish.
for x in res:
self.conn.result(x)
logging.debug('done\n')
return True
def list_domains(self):
"""list all domains in the db backend"""
res = self.conn.result(self.conn.search(self.config.ldapbase, ldap.SCOPE_SUBTREE, '(objectClass=udDomain)', ['ou','description']))[1]
return [(att['ou'][0], att['description'][0], self.norm_dn(dn)) for (dn, att) in res]
#--- DOMAD admin functions for Enterprise admins.
def add_domad(self, domain, domad, password, fullname=False):
"""add a new domain admin to a domain.
domain shall be a domainName or ID
domad shall be of format abc/domad
if fullname is specified, this will be the persons sn, else we add some fake value."""
if not domain.endswith(self.config.ldapbase):
domain = self.get_itemID(domain)
if not domain:
logging.warning('can not add %s to domain %s. No such domain.\n', domad, domain)
return False
if not domad.endswith('/domad'):
domad = domad + '/domad'
#search for domad in ALL domains. Else we risk name conflicts.
if self.get_itemID(domad, self.config.ldapbase):
logging.warning('domad %s already exists. Not changing', domad)
return False
domad_dn = "uid=%s,%s" % (domad, domain)
try:
import sha
salt = os.urandom(4)
h = sha.new(password)
h.update(salt)
pw = "{SSHA}" + base64.b64encode(h.digest() + salt)
except Exception, err:
logging.error('Error: add_domad(): Trouble generating password hash\n\t%s\n', str(err))
return False
try:
if not fullname:
fullname = domad + ' Domain Administrator'
domad = [
('cn', domad),
('objectClass', ['top', 'person', 'organizationalPerson', 'inetorgperson']),
('description', 'domain administrator account to manage all systems'),
('userPassword', pw),
('sn', fullname) ]
#wait for add to finish.
self.conn.result(self.conn.add(domad_dn, domad))
except Exception, err:
logging.warning('add_domad(): Trouble adding to ldap\n\t%s\n', str(err) )
return False
logging.info('added %s to domain %s', domad, domain)
return True
def delete_domad(self, domad, domain=False):
"""delete domad from domain in the db-backend
domain shall be a domainName or ID
domad shall be of format abc/domad"""
if not domain:
domain = self.config.ldapbase
elif not domain.endswith(self.config.ldapbase):
domain = self.get_itemID(domain)
if not domain:
logging.warning('can not delete %s to from %s. No such domain.', domad, domain)
return False
if not domad.endswith('/domad'):
domad = domad + '/domad'
domad_dn = self.get_itemID(domad, domain)
if not domad_dn:
logging.warning('No domad named %s in %s', domad, domain)
return False
try:
#wait for add to finish.
self.conn.result(self.conn.delete(domad_dn))
except Exception, err:
logging.error('delete_domad(): Trouble deleting\n\t%s', str(err))
return False
logging.info('deleted %s from domain %s', domad, domain)
return True
def list_domad(self, domad=False, domain=False):
"""list all domads (in domain, or all)
domad shall be of format abc/domad (lists all domads if not specified.
domain shall be a domainName or ID (list admins from all domains if not specified)"""
if not domain:
domain = self.config.ldapbase
elif not domain.endswith(self.config.ldapbase):
domain = self.get_itemID(domain)
if not domain:
logging.warning('Warning: No domain named %s.', domain)
return []
if not domad:
domad = '*/domad'
elif not domad.endswith('/domad'):
domad = domad + '/domad'
#wait for search to finish
res = self.conn.result(self.conn.search(domain, ldap.SCOPE_SUBTREE, '(&(objectClass=Person)(uid=%s))' % domad, ['cn', 'description','sn']))[1]
return [(att['cn'][0], att['sn'][0], att['description'][0], self.norm_dn(dn)) for (dn, att) in res]
#--- HOST admin functions for Enterprise admins.
def delete_host(self, host, domain=False):
"""delete a host to a domain
host shall be the hosts fqdn
Domain shall be a domainName or ID"""
if not domain:
domain = self.domainID
elif not domain.endswith(self.config.ldapbase):
domain = self.get_itemID(domain)
if not domain:
logging.warning('can not delete %s from %s. No such domain.', host, domain)
return False
#FIXME: I dont like this krb5-dependancy...
if not host.startswith('host/'):
host = 'host/%s' % (host)
hostID = self.get_itemID(host, domain)
if not hostID:
logging.warning('No Host named %s in %s', host, domain)
return False
try:
#wait for delete to finsih.
self.conn.result(self.conn.delete(hostID))
except Exception, err:
logging.error('delete_host(): Trouble deleting\n\t' + str(err))
return False
logging.info('deleted host %s from %s', host, domain)
return True
|
|
#!/usr/bin/env python
"""
Minimal backend for an mcash powered store with unlimited supplies.
Our main persona is a friendly pizza shop at the corner.
"""
import functools
import json
import logging
import md5
import os
import random
import time
import urlparse
import uuid
import requests
import tornado.ioloop
import tornado.options
import tornado.web
JSON_CONTENT = 'application/vnd.api+json'
ORDER_EXPIRES_SEC = 600
shops = {}
transactions = {}
def memoize_singleton(func):
cache = []
@functools.wraps(func)
def memoizer(*args, **kwargs):
if cache:
return cache[0]
rtv = func(*args, **kwargs)
if rtv is not None:
cache.append(rtv)
return rtv
return memoizer
def memoize(func):
cache = {}
@functools.wraps(func)
def memoizer(*args, **kwargs):
key = '|'.join(map(str, args) + map(str, kwargs))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return memoizer
@memoize_singleton
def mcash_headers():
O = tornado.options.options
headers = {}
headers['X-Mcash-Merchant'] = O.mcash_merchant
headers['X-Mcash-User'] = O.mcash_user
headers['Authorization'] = O.mcash_secret
headers['X-Testbed-Token'] = O.mcash_token
return headers
@memoize_singleton
def base_url(request):
base = urlparse.urlparse(tornado.options.options.mcash_callback_uri or request.full_url())
return '%s://%s' % (base.scheme, base.netloc)
@memoize_singleton
def register_shortlink(request):
O = tornado.options.options
data = {'callback_uri': '%s/api/callback/shortlink/' % base_url(request)}
if O.mcash_serial_number:
data['serial_number'] = O.mcash_serial_number
r = requests.post(O.mcash_endpoint + 'shortlink/', headers=mcash_headers(), data=data)
if r.ok:
shortlink_id = r.json()['id']
logging.info('Shortlink generated: %s %s' % (data['callback_uri'], shortlink_id))
return shortlink_id
else:
logging.error('Error creating a shortlink %s %s %s %s' % (r.status_code, r.url, r.headers, data), exc_info=True)
def generate_inventory(shopid):
if shopid not in shops:
selection = ['Roma', 'Milan', 'Bologna', 'Parma', 'Venice', 'Pomodoro',\
'Quattro Stagioni', 'Vegan', 'of %s' % shopid.capitalize()]
shops[shopid] = {'pizzas': {}, 'toppings': {}, 'sizes': {}}
for (pid, ingred) in enumerate(['garlic', 'extra cheese', 'pepperoni'], 1):
shops[shopid]['toppings'][pid] = {'id': pid, 'name': ingred, 'price': random.randrange(2, 12)}
for (pid, size) in enumerate([28, 32, 36], 0):
shops[shopid]['sizes'][size] = {'id': size, 'name': '%s cm' % size, 'price': pid * 5}
for (pid, pizza) in enumerate(random.sample(selection, random.randrange(4, len(selection))), 1):
image = 'images/%s/%s.jpg' % (shopid, pizza.lower().replace(' ', '_'))
shops[shopid]['pizzas'][pid] = {'id': pid, 'name': 'Pizza %s' % pizza,\
'image': image, 'price': random.randrange(35, 55)}
@memoize
def get_shop_selection(shopid, category, pid=None):
if shopid not in shops:
generate_inventory(shopid)
content = shops[shopid][category]
# ember-data requires {'pizza': {'id': ...}} or {'pizza': [{'id': 1, ..}, ...]}
return json.dumps({category[:-1]: content[pid] if pid is not None and pid in content else content.values()})
class MessageBuffer(object):
def __init__(self):
self.waiters = {}
self.cache = []
self.cache_size = 200
def register_callback(self, unique_order, callback):
if unique_order not in self.waiters:
self.waiters[unique_order] = set([callback])
else:
self.waiters[unique_order].add(callback)
def cancel_wait(self, unique_order, callback):
if unique_order in self.waiters:
self.waiters[unique_order].remove(callback)
if not self.waiters[unique_order]:
del self.waiters[unique_order]
def payment_arrived(self, unique_order):
if unique_order in self.waiters:
for cb in self.waiters[unique_order]:
try:
cb()
except Exception:
logging.error('Error in waiter callback', exc_info=True)
del self.waiters[unique_order]
global_message_buffer = MessageBuffer()
class PollHandler(tornado.web.RequestHandler):
def __init__(self, *args, **kwargs):
self.unique_order = None
super(PollHandler, self).__init__(*args, **kwargs)
def get(self, unique_order):
raise tornado.web.HTTPError(405)
@tornado.web.asynchronous
def post(self, unique_order):
if unique_order not in transactions:
logging.info('Unknown unique_order polled')
raise tornado.web.HTTPError(404)
self.unique_order = unique_order
if transactions[unique_order]['status'] > 2:
self.callback()
else:
global_message_buffer.register_callback(unique_order, self.callback)
def callback(self):
# client connection is still open
logging.info('Poll callback for %s' % self.unique_order)
if not self.request.connection.stream.closed():
result = {'result': transactions[self.unique_order]['status'] == 4}
self.finish(json.dumps(result))
def on_connection_close(self):
if hasattr(self, 'unique_order'):
global_message_buffer.cancel_wait(self.unique_order, self.callback)
class PaymentHandler(tornado.web.RequestHandler):
def post(self, unique_order):
logging.info('Payment callback arrived: %s' % self.request.body)
try:
body = json.loads(self.request.body)
except ValueError as error:
logging.error('Unexpected JSON in callback %s %s' % (error, self.request.body))
raise tornado.web.HTTPError(400)
if 'object' in body:
transaction_id = body['object']['tid']
status = body['object']['status']
if unique_order in transactions:
if status != 'fail':
uri = '%spayment_request/%s/' % (tornado.options.options.mcash_endpoint, transaction_id)
response = requests.put(uri, data={'action': 'capture'}, headers=mcash_headers())
if not response.ok:
# TODO check if the error is recoverable
logging.error('payment capture failed: %s %s %s %s' % (response.status_code, response.content, unique_order, transaction_id))
raise tornado.web.HTTPError(500)
transactions[unique_order]['status'] = 4
logging.info('payment capture succeded: %s %s' % (unique_order, transaction_id))
else:
transactions[unique_order]['status'] = 3
logging.info('payment rejected %s %s' % (unique_order, transaction_id))
global_message_buffer.payment_arrived(unique_order)
self.clear_cookie('uuid')
else:
logging.info('Event %s %s' % (body['event'], body['id']))
self.write('OK')
class ShortlinkHandler(tornado.web.RequestHandler):
def post(self):
logging.info('Shortlink callback arrived: %s' % self.request.body)
try:
customer = json.loads(self.request.body)['object']['id']
unique_order = json.loads(self.request.body)['object']['argstring']
except ValueError:
logging.error('Unexpected JSON in callback %s' % self.request.body)
raise tornado.web.HTTPError(400)
if unique_order in transactions:
amount = transactions[unique_order]['amount']
O = tornado.options.options
data = {}
data['amount'] = amount
data['currency'] = O.mcash_currency
data['callback_uri'] = '%s/api/callback/payment/%s/' % (base_url(self.request), unique_order)
data['allow_credit'] = O.allow_credit
data['customer'] = customer
data['pos_id'] = transactions[unique_order]['shopid']
data['pos_tid'] = unique_order
data['action'] = 'auth'
data['text'] = transactions[unique_order]['shopid']
uri = '%spayment_request/' % O.mcash_endpoint
response = requests.post(uri, headers=mcash_headers(), data=data)
if not response.ok:
logging.error('payment authorization request failed: %s %s %s %s %s' % (response.status_code, response.content, response.url, mcash_headers(), data))
raise tornado.web.HTTPError(500)
transaction_id = response.json()['id']
transactions[unique_order]['transaction_id'] = transaction_id
transactions[unique_order]['status'] = 1
logging.info('payment authorization request succeded: %s %s %s' % (unique_order, transaction_id, data['callback_uri']))
self.write('OK')
class ProductHandler(tornado.web.RequestHandler):
def get(self, shopid, category, pid=None):
self.set_header('Content-Type', JSON_CONTENT)
self.write(get_shop_selection(shopid, category, pid))
def post(self, shopid, category):
if shopid not in shops:
raise tornado.web.HTTPError(404)
self.set_header('Content-Type', JSON_CONTENT)
order = None
try:
amount = self._validate_content(shopid)
if amount > 0:
shortlink_id = register_shortlink(self.request)
order = self._generate_order(shopid, shortlink_id, amount)
except ValueError:
logging.error('Error in shortlink generation', exc_info=True)
if not order:
raise tornado.web.HTTPError(400)
self.write(order)
def _validate_content(self, shopid):
content = json.loads(self.request.body)
try:
inventory = shops[shopid]
toppings = dict([(x['id'], x) for x in inventory['toppings']])
if not isinstance(content, list):
return -1
amount = 0
for piece in content:
if not isinstance(piece, dict):
return -1
if piece['id'] not in inventory['pizzas']:
logging.info('Invalid pizza id: %s %s' % (piece['id'], shopid))
return -1
if 'size' in piece:
if piece['size'] in inventory['sizes']:
amount += inventory['sizes'][piece['size']]['price']
else:
logging.info('Invalid size: %s %s' % (piece['size'], shopid))
if 'toppings' in piece:
for t in piece['toppings']:
if t in inventory['toppings']:
amount += inventory['toppings'][t['id']]['price']
else:
logging.info('Invalid topping: %s %s' % (t, shopid))
return amount
except Exception:
logging.error('Error in content validation', exc_info=True)
return -1
def _generate_order(self, shopid, shortlink_id, amount):
now = int(time.time())
user = self.get_cookie('uuid', None)
if not user: # set token only when needed
user = str(uuid.uuid1())
self.set_cookie('uuid', user, expires=now + 30)
h = md5.new(user)
h.update(shopid)
h.update(self.request.body)
unique_order = h.hexdigest()
logging.info('User uuid for order: %s %s' % (unique_order, user))
payment_cookie = self.get_cookie(unique_order, '')
if not payment_cookie:
transactions[unique_order] = {'shopid': shopid, 'amount': amount, 'issued': now, 'user': user, 'status': 1}
self.set_cookie(unique_order, str(now), expires=now + ORDER_EXPIRES_SEC)
order = {'id': unique_order, \
'amount': amount, \
'poll_uri': '%s/api/poll/%s/' % (base_url(self.request), unique_order), \
'qrcode_url': tornado.options.options.mcash_qrcode % (shortlink_id, unique_order)}
return json.dumps(order)
def _check_header(self, key, value=None):
return key in self.request.headers and self.request.headers.get(key).lower() == (value or JSON_CONTENT).lower()
class NCStaticFileHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
# Disable cache
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def describe_config():
tornado.options.define('cookie_secret', default='sssecccc', help='Change this to a real secret')
tornado.options.define('favicon', default='static/favicon.ico', help='Path to favicon.ico')
tornado.options.define('static_path', default='static/', help='Path static items')
tornado.options.define('port', default=8888, help='Port to run webservice')
tornado.options.define('config', default='server.conf', help='Config file location')
tornado.options.define('mcash_callback_uri', default=None, help='Callback URI for mcash')
tornado.options.define('mcash_endpoint', default='https://mcashtestbed.appspot.com/merchant/v1/', help='API to call')
# probably better to set in at once like mcash headers as a string
tornado.options.define('mcash_merchant', help='X-Mcash-Merchant')
tornado.options.define('mcash_user', help='X-Mcash-User')
tornado.options.define('mcash_secret', help='Authorization header')
tornado.options.define('mcash_token', help='X-Testbed-Token')
tornado.options.define('mcash_serial_number', help='Optional serial number for shortlink generation')
tornado.options.define('mcash_qrcode', default='https://api.mca.sh/shortlink/v1/qr_image/%s/%s', help='Should have %s marks for shortlink id and argument')
tornado.options.define('mcash_currency', default='NOK', help='Currency for transactions')
tornado.options.define('allow_credit', default=False, help='Credit allowed for payment request')
def main():
describe_config()
tornado.options.parse_command_line()
options = tornado.options.options
if os.path.exists(options.config):
tornado.options.parse_config_file(options.config)
settings = {
'static_path': os.path.join(os.path.dirname(__file__), '..', options.static_path),
'cookie_secret': options.cookie_secret,
'login_url': '/login',
'xsrf_cookies': False,
'autoreload': True
}
handlers = [
(r'/api/products/([^/]+)/(pizzas|sizes|toppings)/?', ProductHandler),
(r'/api/products/([^/]+)/(pizzas|sizes|toppings)/(\w+)/?', ProductHandler),
(r'/api/poll/([^/]{16,32})/', PollHandler),
(r'/api/callback/shortlink/', ShortlinkHandler),
(r'/api/callback/payment/([^/]{16,32})/', PaymentHandler),
(r'/(.*)', NCStaticFileHandler, {'path': settings['static_path']})
]
application = tornado.web.Application(handlers, **settings)
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import uuid
import datetime
import time
import json
import logging
import optparse
import shutil
import sys
import subprocess
import os.path
import socket
logger = logging.getLogger('Precheck')
has_warnings=False
has_errors=False
#request types
HTTP_REQUEST_GET='GET'
HTTP_REQUEST_POST='POST'
HTTP_REQUEST_DELETE='DELETE'
#HTTP CODE
HTTP_OK=200
HTTP_CREATED=201
HTTP_BAD_REQUEST=400
HTTP_FORBIDDEN=403
HTTP_CONFLICT=409
#defaults
EXIT_MESSAGE = "Make sure to provide correct cluster information including port, admin user name and password. Default values will be used if you omit the command parameters.";
DEFAULT_HTTP_PORT=8080
DEFAULT_ADMIN_USER='admin'
DEFAULT_LOG_DIR='/tmp/preinstall_checks'
DEFAULT_LOG_FILE='preinstall_checks.log'
DEFAULT_HTTP_REQUEST_TYPE=HTTP_REQUEST_GET
DEFAULT_AMBARI_SERVER_PROPERTIES='/etc/ambari-server/conf/ambari.properties'
DEFAULT_MAX_COUNTER=4
DEFAULT_TIMER_LONG=5
DEFAULT_POLLING_TIMER_REQUEST=10
DEFAULT_MINDISKSPACE=2.0 #in GB
DEFAULT_MINDISKSPACEUSRLIB=1.0 #in GB
#ops
OPERATION_HOST_CHECK='host_check'
OPERATION_VALIDATE_BLUEPRINT='validate_blueprint'
OPERATIONS=[OPERATION_HOST_CHECK, OPERATION_VALIDATE_BLUEPRINT]
#codes
CODE_SUCCESS=0
CODE_ERROR=1
CODE_WARNING=2
CODE_CONNECTION_REFUSED=7
#labels
LABEL_OK='[ OK ]'
LABEL_WARNING='[WARNING]'
LABEL_ERROR='[ ERROR ]'
#status
STATUS_ACCEPTED='Accepted'
STATUS_COMPLETED='COMPLETED'
STATUS_PASSED='PASSED'
STATUS_WARNING='FAILED'
STATUS_FAILED='WARNING'
STATUS_ABORTED='ABORTED'
STATUS_IN_PROGRESS='IN_PROGRESS'
STATUS_PENDING='PENDING'
#list of status indicating the operation has yet to finish
LIST_FINISHED_REQUEST_STATUS=[STATUS_FAILED, STATUS_COMPLETED, STATUS_ABORTED]
def init_parser_options(parser):
parser.add_option('-p', '--port',
dest="port", default=DEFAULT_HTTP_PORT,
help="Ambari Server port corrsponding to the network protocol. Default port is {0} for an HTTP connection".format(DEFAULT_HTTP_PORT))
parser.add_option('-u', '--user',
dest="user", default=DEFAULT_ADMIN_USER,
help="Ambari admin user. Default user name is {0}".format(DEFAULT_ADMIN_USER))
parser.add_option('-a', '--password',
dest="password",
help="Ambari admin user password.")
parser.add_option('-l', '--log',
dest="log",
default=DEFAULT_LOG_DIR,
help="The log file home location. Default log file home is {0}.".format(DEFAULT_LOG_DIR),
metavar="DIR")
parser.add_option('--blueprint',
dest="blueprint",
default=None,
help="Blueprint to validate",
metavar="FILE")
parser.add_option('--operation',
dest='operation', default=OPERATION_HOST_CHECK,
help='Operation can one of the following {0}'.format(', '.join(OPERATIONS)))
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Output verbosity.")
"""
Validate parameters passed in from the command line.
Exit if there are validation errors.
"""
def validate_options(options):
errors = []
"""General parameters that must be passed in via command line or set with a default value"""
if not options.port:
errors.append("No Ambari server port provided.")
if not options.user:
errors.append("No Ambari admin user name provided.")
if not options.password:
errors.append("No Ambari admin user passsword provided.")
if not options.log:
errors.append("No log home path provided.")
"""General check for operations"""
if not options.operation:
errors.append('No operation provided')
elif not options.operation in OPERATIONS:
errors.append('Unknow operation {0}. Specify one of the following operations: {1}'.format(options.operation, ', '.join(OPERATIONS)))
elif options.operation == OPERATION_VALIDATE_BLUEPRINT:
if not options.blueprint:
errors.append('No blueprint file provided')
if not errors:
return 'Parameters validation finished successfully', CODE_SUCCESS
else:
return 'Parameters validation finished with error(s). {0}'.format('. '.join(errors)), CODE_ERROR
def get_log_file(log_home):
return '{0}/{1}'.format(log_home, DEFAULT_LOG_FILE)
def init_logger(options):
log_dir = options.log
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging_level = logging.DEBUG if options.verbose else logging.INFO
logger.setLevel(logging_level)
logger.handlers = []
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
file_handler = logging.FileHandler(get_log_file(log_dir), mode='w')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
"""
Back up log directory if it already exists.
"""
def backup_log(filePath):
if filePath is not None and os.path.exists(filePath):
timestamp = datetime.datetime.now()
simpleformat = '%Y%m%d%H%M%S'
backup_file = filePath + "." + timestamp.strftime(simpleformat)
try:
shutil.move(filePath, backup_file)
except Exception, err:
print('Failed to backup "{0}": {1}'.format(str(filePath), str(err)))
return '', CODE_WARNING
return backup_file, CODE_SUCCESS
else:
return '', CODE_SUCCESS
def get_current_time():
total_seconds = time.time()
current_time = datetime.datetime.fromtimestamp(total_seconds).strftime('%Y-%m-%d %H:%M:%S')
return current_time
def step(msg):
logger.info('')
if len(msg) >= 43:
logger.info('******** Check: {0} ********'.format(msg))
else:
spaces = ' '.ljust((50 - len(msg))/2)
logger.info('{0}{2}Check: {1}{2}{0}'.format('********',msg,spaces))
def print_check_result(check, msgs, code):
if len(check)>=43:
spaces = ' '.ljust(20)
else:
spaces = ' '.ljust(63 - len(check))
if code == CODE_SUCCESS:
logger.info('{0}{1}{2}'.format(check, spaces, LABEL_OK))
elif code == CODE_WARNING:
logger.info('{0}{1}{2}'.format(check, spaces, LABEL_WARNING))
if msgs:
for msg in msgs:
if msg.strip():
logger.warning('\t{0}'.format(msg.strip()))
else:
logger.info('{0}{1}{2}'.format(check, spaces, LABEL_ERROR))
if msgs:
for msg in msgs:
logger.error('\t{0}'.format(msg.strip()))
def print_check_results(results):
global has_warnings
global has_errors
for result in results:
status = result['status']
if STATUS_PASSED == status:
code = CODE_SUCCESS
print_check_result(result['key'], None, code)
elif STATUS_WARNING == status:
if not has_warnings:
has_warnings = True
code = CODE_WARNING
print_check_result(result['key'], result['warning'], code)
else:
if not has_errors:
has_errors = True
code = CODE_ERROR
print_check_result(result['key'], result['error'] if result['error'] else None, code)
def dump_parameters_to_log(options):
server_url = get_server_url(options.port)
logger.info('/******************************************************************************/')
logger.info(' Parameters used for script run ')
logger.info('Cluster parameters')
logger.info("Server URL: {0}".format(server_url))
logger.info("Port: {0}".format(options.port))
logger.info("User: {0}".format(options.user))
logger.info('')
logger.info('Operation info')
logger.info("Operation: {0}".format(options.operation))
logger.info("Log Home Dir: {0}".format(options.log))
logger.info("Log File: {0}".format(get_log_file(options.log)))
logger.info('/******************************************************************************/')
"""
Retrieve property value from Ambari Server properties file.
"""
def get_ambari_server_property(key):
try:
with open(DEFAULT_AMBARI_SERVER_PROPERTIES, 'r') as property_file:
file_content = property_file.read()
lines = file_content.splitlines()
lines.reverse()
for line in lines:
tokens = line.split('=')
if len(tokens) == 2:
if tokens[0] == key:
return tokens[1]
except Exception, err:
logger.error(str(err))
return None
return None
def get_server_protocol():
sslActive = get_ambari_server_property('api.ssl')
if sslActive == "true":
return "https"
else:
return "http"
def get_admin_server_fqdn():
return socket.getfqdn()
def get_server_url(port):
protocol = get_server_protocol()
url = "{0}://{1}:{2}".format(protocol, get_admin_server_fqdn(), str(port))
return url
"""
Submit REST API to Ambari Server
"""
def execute_curl_command(url, headers=[], request_type=DEFAULT_HTTP_REQUEST_TYPE, request_body=None, user=DEFAULT_ADMIN_USER, password=None):
"""
@param url: REST URL
@param headers: Optional. Headers to be included in the REST API call
@param request_type: HTTP request type (GET/POST/PUT/DELETE). Use HTTP GET as the default.
@param request_body: Data to be submitted for HTTP POST and PUT requests
@param user: User for Ambari REST API authentication
@param password: Password for the user used to authenticate the Ambari REST API call
"""
curl_cmd_array = ["curl", "-v", "-u", "{0}:{1}".format(user,password), "-k", "-H", "X-Requested-By: ambari"]
for header in headers:
curl_cmd_array.append('-H')
curl_cmd_array.append(header)
curl_cmd_array.append('-s')
curl_cmd_array.append('-X')
curl_cmd_array.append(request_type)
if request_type == 'PUT' or request_type == 'POST':
if request_body:
curl_cmd_array.append("-d")
curl_cmd_array.append(request_body)
curl_cmd_array.append(url)
logger.debug('Curl command: {0}'.format(' '.join(curl_cmd_array)))
exeProcess = subprocess.Popen(curl_cmd_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exeProcess.communicate()
exit_code = exeProcess.returncode
return out, err, exit_code
def get_http_response_code(out):
for a_line in out.split('\n'):
a_line = a_line.strip()
if a_line.endswith('HTTP/1.1 200 OK'):
return HTTP_OK
elif a_line.endswith('HTTP/1.1 201 Created'):
return HTTP_CREATED
elif a_line.endswith('HTTP/1.1 400 Bad Request'):
return HTTP_BAD_REQUEST
elif a_line.endswith('HTTP/1.1 409 Conflict'):
return HTTP_CONFLICT
elif a_line.endswith('HTTP/1.1 400 Forbidden'):
return HTTP_FORBIDDEN
return -1
"""
Determine if Ambari Server responded with an error message for the REST API call
"""
def is_erroneous_response_by_server(json_str):
if not json_str:
return False, 0, ''
else:
response = json.loads(json_str)
status_code = response.get('status', -1)
message = response.get('message', None)
if -1 == status_code and not message:
return False, 0, ''
else:
return True, int(status_code), message
"""
Determine if Ambari Server has accepted the REST API call
"""
def is_request_accepted(json_str):
logger.debug("Checking request in {0}".format(json_str))
if not json_str:
return False
response = json.loads(json_str)
summary = response.get('Requests', {})
if summary:
status = summary.get('status', None)
return (STATUS_ACCEPTED == status)
else:
return False
def get_request_url(json_str, summary_only=True):
if not json_str:
return None
response = json.loads(json_str)
href = response.get('href', None)
if href:
url_str = str(href)
if summary_only:
return '{0}?fields=Requests'.format(url_str)
else:
return url_str
else:
logger.error("Failed to obtain request url. {0} does not contain 'href' section".format(json_str))
return None
"""
Determine if the request is finished
"""
def is_request_finished(json_str):
request_status = get_request_status(json_str)
is_finished = (request_status in LIST_FINISHED_REQUEST_STATUS)
is_finished_successfully = (STATUS_COMPLETED == request_status)
response = json.loads(json_str)
summary = response.get('Requests', {})
progress_percent = summary.get('progress_percent', '-1')
return is_finished, is_finished_successfully, int(progress_percent)
def is_request_finished_successfully(json_str):
request_status = get_request_status(json_str)
return STATUS_COMPLETED == request_status
def get_request_status(json_str):
if not json_str:
return None
response = json.loads(json_str)
summary = response.get('Requests', {})
request_status = summary.get('request_status', None)
if request_status:
return request_status
else:
logger.error("Failed to determin request state. {0} does not contain 'Requests' section".format(json_str))
return None
"""
Check request status based on the time interval
"""
def polling_request(url, user=None, password=None, timer=DEFAULT_POLLING_TIMER_REQUEST):
"""
@param url: Request URL returned by the Ambari Server
@param user: User for Ambari REST API authentication
@param password: Password for the user used to authenticate the Ambari REST API call
@param timer: Time interval between two check status REST API call. Default is 10 seconds.
"""
out=None
err=None
ec=0
request_in_progress = True
logger.debug("Polling status for {0} every {1} seconds".format(url, timer))
logger.debug("Polling started at {0}".format(str(get_current_time())))
previous_percentage = 0
while request_in_progress:
out, err, ec = execute_curl_command(url, user=user, password=password)
if CODE_SUCCESS != ec:
logger.debug('Server became offline')
request_in_progress = False
else:
is_finished, is_finished_successfully, percentage = is_request_finished(out)
if percentage >= 0:
if percentage != previous_percentage:
previous_percentage = percentage
logger.debug(' {0}%'.format(percentage))
else:
logger.debug('.')
if is_finished:
request_in_progress = False
else:
time.sleep(timer)
logger.debug("Stopped polling {0} at {1}. Request finished.".format(url, str(get_current_time())))
return out, err, ec
def get_host(json_str):
if not json_str:
return None
response = json.loads(json_str)
task_result = response.get('Tasks', {})
host_name = task_result.get('host_name', None)
return host_name
"""
Summarize results of all the tasks executed in the request
"""
def summarize_tasks_by_hosts(request_url, user, password):
"""
@param request_url: Request URL returned by the Ambari Server
@param user: User for Ambari REST API authentication
@param password: Password for the user used to authenticate the Ambari REST API call
"""
task_results_by_host = {}
results_to_print = []
out, err, ec = execute_curl_command(request_url, user=user, password=password)
if CODE_SUCCESS == ec:
if out:
is_erroneous_response, http_ec, http_err = is_erroneous_response_by_server(out)
if is_erroneous_response:
results_to_print=[{'key':'Error response from server', 'status':http_ec, 'error':[http_err]}]
else:
urls = get_tasks_urls(out)
if urls:
for task_url in urls:
task_out, err, ec = execute_curl_command(task_url, user=user, password=password)
logger.debug(task_out)
if CODE_SUCCESS == ec:
host = get_host(task_out)
if host:
task_results_by_host[host] = task_out
else:
results_to_print=[{'key':'Connection refused', 'status':STATUS_FAILED, 'error':[err]}]
break
else:
results_to_print=[{'key':'Empty task list', 'status':STATUS_FAILED}]
else:
results_to_print=[{'key':'Empty response from server', 'status':STATUS_FAILED}]
else:
results_to_print=[{'key':'Connection refused', 'status':STATUS_FAILED, 'error':[err]}]
return task_results_by_host, results_to_print
def get_tasks_urls(json_str):
response = json.loads(json_str)
tasks = response.get('tasks', [])
urls = set()
for task in tasks:
url = task.get('href',None)
if url:
urls.add(url)
return urls
"""
Check if the script can log in Ambari Server REST API via user and password provided
"""
def server_reachable_by_credentials_with_retry(server_url, user, password):
"""
@param server_url: Basic server url to connect and log in
@param user: User for Ambari REST API authentication
@param password: Password for the user used to authenticate the Ambari REST API call
"""
retry_counter = 0
out = None
ec = CODE_SUCCESS
while retry_counter < DEFAULT_MAX_COUNTER:
out, ec = server_reachable_by_credentials(server_url, user, password)
if CODE_CONNECTION_REFUSED == ec:
retry_counter = retry_counter + 1
logger.debug('Server may have not become fully online yet, try to reconnect in {0} seconds'.format(DEFAULT_TIMER_LONG))
time.sleep(DEFAULT_TIMER_LONG)
else:
logger.debug('Connected to server.')
break
if CODE_CONNECTION_REFUSED == ec:
message = 'Server did not become fully online in {0} seconds.'.format(str(DEFAULT_MAX_COUNTER * DEFAULT_TIMER_LONG))
logger.debug(message)
return out, ec
"""
Check if the script can log in Ambari Server REST API via user and password provided
"""
def server_reachable_by_credentials(server_url, user, password):
"""
@param server_url: Basic server url to connect and log in
@param user: User for Ambari REST API authentication
@param password: Password for the user used to authenticate the Ambari REST API call
"""
url = '{0}/api/v1/requests'.format(server_url)
out, err, ec = execute_curl_command(url, user=user, password=password)
if ec != CODE_SUCCESS:
return err, ec
else:
is_erroneous_response, http_ec, http_err = is_erroneous_response_by_server(out)
if is_erroneous_response:
return http_err, http_ec
else:
return '', CODE_SUCCESS
"""
Obtain a list of Ambari Agents registered to the host via a REST API call
"""
def get_ambari_agent_nodes(server_url, user, password):
"""
@param server_url: Basic server url to connect and log in
@param user: User for Ambari REST API authentication
@param password: Password for the user used to authenticate the Ambari REST API call
"""
url = "{0}/api/v1/services/AMBARI/components/AMBARI_AGENT".format(server_url)
hosts = set()
out, err, ec = execute_curl_command(url, user=user, password=password)
is_erroneous_response, ec, err = is_erroneous_response_by_server(out)
if is_erroneous_response:
logger.error("HTTP {0}:{1}".format(ec, err))
return hosts
response = json.loads(out)
host_list = response.get('hostComponents', [])
for item in host_list:
host_summary = item.get('RootServiceHostComponents', {})
host_name = host_summary.get('host_name', None)
if host_name:
hosts.add(host_name)
return hosts
"""
Run host checks
"""
def run_check(options, url, label_check, data):
"""
@param options: Parameters passed in from the command line
@param url: Ambari Server URL
@param label_check: Text to display for the check result section
@param data: Data to be submitted to the Ambari Server via a REST API call
"""
out, err, ec = execute_curl_command(url, request_type=HTTP_REQUEST_POST, request_body=data, user=options.user, password=options.password)
if CODE_SUCCESS != ec or not out:
logger.debug(out)
logger.debug(ec)
logger.debug(err)
print_check_result(label_check, ['Failed to connect to Ambari server'], ec)
return ec
else:
is_erroneous_response, http_ec, http_err = is_erroneous_response_by_server(out)
if is_erroneous_response:
print_check_result(label_check, [http_err], http_ec)
return http_ec
elif is_request_accepted(out):
request_url = get_request_url(out)
finalresult, err, ec = polling_request(request_url, options.user, options.password)
logger.debug(finalresult)
if is_request_finished_successfully(finalresult):
request_url = get_request_url(out, summary_only=False)
return summarize_tasks_by_hosts(request_url, options.user, options.password)
else:
print_check_result(label_check, [err], CODE_ERROR)
else:
print_check_result(label_check, [out], CODE_ERROR)
def basic_task_result_parser(json_str, results):
response = json.loads(json_str)
task_result = response.get('Tasks', {})
host_name = task_result.get('host_name', None)
status = task_result.get('status', None)
if STATUS_COMPLETED != status:
stderr = task_result.get('stderr', None)
results.append({'key':host_name, 'status':status, 'error':stderr})
return {}
else:
return task_result.get('structured_out', {})
def host_check_parser(task_results_by_hosts, results_to_print):
if not task_results_by_hosts:
return
for key in task_results_by_hosts:
json_str = task_results_by_hosts[key]
structured_out = basic_task_result_parser(json_str, results_to_print)
if structured_out:
check_result = structured_out.get('host_resolution_check', {})
ec = check_result.get('exit_code', -1)
if CODE_SUCCESS == ec:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
else:
results_to_print.append({'key':key, 'status':STATUS_FAILED, 'error':[check_result.get('message', None)]})
"""
Host name resolution check
"""
def run_host_checks(options, agents, server_url):
label_check = 'Host name resolution'
step(label_check)
url = '{0}/api/v1/requests'.format(server_url)
data = '{{"RequestInfo":{{"action":"check_host","context":"Check host","parameters":{{"check_execute_list":"host_resolution_check","jdk_location":"{0}/resources","threshold":"20","hosts":"{1}"}}}},"Requests/resource_filters":[{{"hosts":"{1}"}}]}}'.format(server_url, ','.join(agents))
logger.debug('Host resolution check data {0}'.format(data))
task_results_by_hosts, results_to_print = run_check(options, url, label_check, data)
host_check_parser(task_results_by_hosts, results_to_print)
print_check_results(results_to_print)
def java_home_check_parser(task_results_by_hosts, results_to_print):
if not task_results_by_hosts:
return
for key in task_results_by_hosts:
json_str = task_results_by_hosts[key]
structured_out = basic_task_result_parser(json_str, results_to_print)
if structured_out:
check_result = structured_out.get('java_home_check', {})
ec = check_result.get('exit_code', -1)
if CODE_SUCCESS == ec:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
else:
results_to_print.append({'key':key, 'status':STATUS_FAILED, 'error':[check_result.get('message', None)]})
"""
Java home path check
"""
def run_java_home_checks(options, agents, server_url):
label_check = 'Java Home location'
step(label_check)
url = '{0}/api/v1/requests'.format(server_url)
java_home = get_ambari_server_property('java.home')
logger.info('Ambari server java home: {0}'.format(java_home))
data = '{{"RequestInfo":{{"context":"Check hosts","action":"check_host","parameters":{{"threshold":"60","java_home":"{0}","jdk_location":"{1}/resources","check_execute_list":"java_home_check"}}}},"Requests/resource_filters":[{{"hosts":"{2}"}}]}}'.format(java_home, server_url, ','.join(agents))
logger.debug('Java home check data {0}'.format(data))
task_results_by_hosts, results_to_print = run_check(options, url, label_check, data)
java_home_check_parser(task_results_by_hosts, results_to_print)
print_check_results(results_to_print)
def thp_checks_parser(task_results_by_hosts, results_to_print):
if not task_results_by_hosts:
return
for key in task_results_by_hosts:
json_str = task_results_by_hosts[key]
structured_out = basic_task_result_parser(json_str, results_to_print)
if structured_out:
check_result = structured_out.get('transparentHugePage', {})
thp_message = check_result.get('message', None)
if thp_message == 'always':
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':['Transparent Huge Pages (THP) is enabled', 'THP should be disabled to avoid potential Hadoop performance issues.']})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def disk_space_checks_parser(host_info_by_host, results_to_print):
min_disk_space = DEFAULT_MINDISKSPACE * 1024 * 1024
min_disk_space_usrlib = DEFAULT_MINDISKSPACEUSRLIB * 1024 * 1024
for key in host_info_by_host:
host_summary = host_info_by_host[key]
info = host_summary.get('Hosts', {})
disk_info = info.get('disk_info', [])
for disk in disk_info:
errors = []
passes = 0
mountpoint = disk.get('mountpoint', None)
if '/' == mountpoint:
free_space = disk.get('available', -1)
if free_space == -1:
errors.append('Failed to obtain free space for mountpoint /')
elif free_space < min_disk_space:
errors.append('A miminum of {} GB free space for mountpoint /'.format(DEFAULT_MINDISKSPACE))
else:
passes += 1
elif '/usr' == mountpoint or '/usr/lib' == mountpoint:
free_space = disk.get('available', -1)
if free_space == -1:
errors.append('Failed to obtain free space for mountpoint /usr or /usr/lib')
elif free_space < min_disk_space_usrlib:
errors.append('A miminum of {} GB free space for mountpoint /usr or /usr/lib'.format(DEFAULT_MINDISKSPACEUSRLIB))
else:
passes += 1
if passes > 0:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
elif errors:
results_to_print.append({'key':key, 'status':STATUS_FAILED, 'error':errors})
def get_last_agent_env(host_info):
info = host_info.get('Hosts', {})
last_agent_env = info.get('last_agent_env', {})
return last_agent_env
def firewall_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
if structured_out:
last_agent_env = structured_out.get('last_agent_env_check', {})
if 'firewallRunning' in last_agent_env:
firewall_running = last_agent_env['firewallRunning']
if firewall_running:
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':['Firewall is running on the host', 'Please configure the firewall to allow communications on the ports documented in the Configuring Ports section of the Ambari documentation.']})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
else:
results_to_print.append({'key':key, 'status':STATUS_FAILED, 'error':['Failed to determine if firewall is running on the host']})
def java_process_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
if structured_out:
last_agent_env = structured_out.get('last_agent_env_check', {})
host_health = last_agent_env.get('hostHealth', {})
active_java_processes = host_health.get('activeJavaProcs', [])
if active_java_processes:
warnings = []
for process in active_java_processes:
warnings.append('Process {0} under user {1} should not be running'.format(process['pid'], process['user']))
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':warnings})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def install_packages_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
installed_packages = structured_out.get('installed_packages', [])
if installed_packages:
warnings = []
for package in installed_packages:
warnings.append('{0} (version {1}) is installed from repo {2}. It should be removed before deploying the cluster.'.format(package['name'], package['version'], package['repoName']))
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':warnings})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def file_and_folder_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
last_agent_env = structured_out.get('last_agent_env_check', [])
stack_files_and_folders = last_agent_env.get('stackFoldersAndFiles',[])
if stack_files_and_folders:
warnings = []
for item in stack_files_and_folders:
warnings.append('{0} {1} should not exist.'.format(item['type'].title(), item['name']))
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':warnings})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def live_services_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
last_agent_env = structured_out.get('last_agent_env_check', [])
host_health = last_agent_env.get('hostHealth', {})
live_services = host_health.get('liveServices', [])
if live_services:
warnings = []
for service in live_services:
if 'Unhealthy' == service['status']:
warnings.append('Service {0} shoud be up.'.format(service['name']))
if warnings:
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':warnings})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def default_user_ids_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
last_agent_env = structured_out.get('last_agent_env_check', [])
existing_users = last_agent_env.get('existingUsers', [])
if existing_users:
messages = []
for user in existing_users:
messages.append('User {0} with home directory {1} exists.'.format(user['name'], user['homeDir']))
if messages:
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':messages})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def umask_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
last_agent_env = structured_out.get('last_agent_env_check', [])
if 'umask' in last_agent_env:
umask = int(last_agent_env['umask'])
if umask > 23:
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':['Umask is {0}. Consider update it.'.format(umask)]})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
else:
results_to_print.append({'key':key, 'status':STATUS_FAILED, 'errors':['Failed to obtain umask value on the host.']})
def alternatives_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
last_agent_env = structured_out.get('last_agent_env_check', [])
alternatives = last_agent_env.get('alternatives', [])
if alternatives:
warnings = []
for alternative in alternatives:
warnings.append('Existing /etc/alternativies entry: {0} points to {1}'.format(alternative['name'], alternative['target']))
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':warnings})
else:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
def reverse_lookup_checks_parser(task_results_by_host, results_to_print):
for key in task_results_by_host:
structured_out = basic_task_result_parser(task_results_by_host[key], results_to_print)
last_agent_env = structured_out.get('last_agent_env_check', [])
if 'reverseLookup' in last_agent_env:
reverse_lookup = last_agent_env['reverseLookup']
if reverse_lookup:
results_to_print.append({'key':key, 'status':STATUS_PASSED})
else:
results_to_print.append({'key':key, 'status':STATUS_WARNING, 'warning':['The hostname was not found in the reverse DNS lookup', 'This may result in incorrect behavior. Please check the DNS setup and fix the issue.']})
else:
results_to_print.append({'key':key, 'status':STATUS_FAILED, 'error':['Failed to determine if DNS reverse lookup is configured on the host']})
"""
Agent last enviornment check
"""
def run_agent_checks(options, agents, server_url):
logger.info('')
logger.info('Prepare for Ambari Agent host check')
label_check = 'Ambari Agent host check'
url = '{0}/api/v1/requests'.format(server_url)
data = '{{"RequestInfo":{{"action":"check_host","context":"Check host","parameters":{{"check_execute_list":"last_agent_env_check,installed_packages,existing_repos,transparentHugePage","jdk_location":"{0}/resources","threshold":"20"}}}},"Requests/resource_filters":[{{"hosts":"{1}"}}]}}'.format(server_url, ','.join(agents))
logger.debug('Agent enviornment check data to submit {0}'.format(data))
task_results_by_host, results_to_print = run_check(options, url, label_check, data)
step('Transparent Huge Pages')
thp_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
host_info_url = '{0}/api/v1/hosts?fields=Hosts/total_mem,Hosts/cpu_count,Hosts/disk_info,Hosts/last_agent_env,Hosts/host_name,Hosts/os_type,Hosts/os_arch,Hosts/os_family,Hosts/ip'.format(server_url)
out, err, ec = execute_curl_command(host_info_url, user=options.user, password=options.password)
logger.debug('Agent host information {0}'.format(out))
host_info_by_host = {}
if out:
response = json.loads(out)
items = response.get('items', {})
for item in items:
info = item.get('Hosts', {})
host_name = info.get('host_name', None)
if host_name:
host_info_by_host[host_name]=item
if host_info_by_host:
step('Disk space')
results_to_print = []
disk_space_checks_parser(host_info_by_host, results_to_print)
print_check_results(results_to_print)
step('Firewall enabled')
results_to_print = []
firewall_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Java processes')
results_to_print = []
java_process_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Installed packages')
results_to_print = []
install_packages_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Stack files and directories')
results_to_print = []
file_and_folder_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Live services')
results_to_print = []
live_services_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Default user names')
results_to_print = []
default_user_ids_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Umask')
results_to_print = []
umask_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Alternatives')
results_to_print = []
alternatives_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
step('Reverse lookup')
results_to_print = []
reverse_lookup_checks_parser(task_results_by_host, results_to_print)
print_check_results(results_to_print)
def run_validate_blueprint(options, server_url):
results_to_print = []
blueprint_file = options.blueprint
label_check = 'Blueprint validation'
step(label_check)
logger.debug('Blueprint file to check {0}'.format(blueprint_file))
if os.path.isfile(blueprint_file):
"""Validate blueprint file is a valid json file"""
valid_json_file = False
try:
with open(blueprint_file) as data_file:
data = json.load(data_file)
valid_json_file = True
except ValueError as value_error:
results_to_print.append({'key':label_check, 'status':STATUS_FAILED, 'error':[str(value_error)]})
if valid_json_file:
"""Either a timestamp based name or the name defined in the blueprint"""
blueprint_metadata = data.get('Blueprints', {})
blueprint_name = blueprint_metadata.get('blueprint_name', None)
if not blueprint_name:
blueprint_name = 'blueprint_validation_{0}'.format(str(uuid.uuid4()))
logger.debug('Blueprint name used for server side validation: {0}'.format(blueprint_name))
url = '{0}/api/v1/blueprints/{1}'.format(server_url, blueprint_name)
out, err, ec = execute_curl_command(url, request_type=HTTP_REQUEST_POST, request_body="@{0}".format(blueprint_file), user=options.user, password=options.password)
logger.debug(out)
logger.debug(err)
if CODE_ERROR == ec:
results_to_print.append({'key':label_check, 'status':STATUS_FAILED, 'error':[err]})
else:
http_response_code = get_http_response_code(err)
logger.debug('HTTP response from the Ambari server: {0}'.format(http_response_code))
if http_response_code == HTTP_CREATED and not out :
results_to_print.append({'key':label_check, 'status':STATUS_PASSED})
else:
is_erroneous_response, http_ec, http_err = is_erroneous_response_by_server(out)
if is_erroneous_response:
results_to_print.append({'key':label_check, 'status':STATUS_FAILED, 'error':[http_err]})
else:
results_to_print.append({'key':label_check, 'status':STATUS_FAILED, 'error':[err]})
else:
results_to_print.append({'key':label_check, 'status':STATUS_FAILED, 'error':['{0} does not exist'.format(blueprint_file)]})
print_check_results(results_to_print)
deregister_temporary_blueprint(options, server_url, blueprint_name)
def deregister_temporary_blueprint(options, server_url, blueprint_name):
url = '{0}/api/v1/blueprints/{1}'.format(server_url, blueprint_name)
out, err, ec = execute_curl_command(url, request_type=HTTP_REQUEST_DELETE, user=options.user, password=options.password)
if CODE_ERROR == ec:
logger.error(out)
logger.error(err)
else:
logger.debug(out)
logger.debug(err)
http_response_code = get_http_response_code(err)
logger.debug('HTTP response from the Ambari server: {0}'.format(http_response_code))
if http_response_code == HTTP_OK and not out :
logger.debug("{0} deregistered".format(blueprint_name))
else:
is_erroneous_response, http_ec, http_err = is_erroneous_response_by_server(out)
if is_erroneous_response:
logger.error(http_err)
else:
logger.info(out)
if err:
logger.error(err)
"""
Execute the operation passed in from the command line
"""
def run(options):
global has_warnings
global has_errors
server_url = get_server_url(options.port)
label_check = 'Ambari server reachable by user credentials'
step(label_check)
out, ec = server_reachable_by_credentials_with_retry(server_url, options.user, options.password)
if CODE_SUCCESS == ec:
print_check_result(label_check, ['Ambari server reachable via {0}'.format(server_url)], ec)
elif CODE_ERROR == ec:
print_check_result(label_check, ['Failed to establish connection to {0}.'.format(server_url)], ec)
return ec
elif HTTP_FORBIDDEN == ec:
print_check_result(label_check, ['Wrong credentials provided.'], ec)
return ec
agents = get_ambari_agent_nodes(server_url, options.user, options.password)
logger.info('Total number of agents {0}'.format(len(agents)))
if not agents:
logger.error('No Ambari Agent registered to the Ambari Server. Install Ambari Agent first.')
return CODE_ERROR
if OPERATION_HOST_CHECK == options.operation:
run_host_checks(options, agents, server_url)
run_java_home_checks(options, agents, server_url)
run_agent_checks(options, agents, server_url)
elif OPERATION_VALIDATE_BLUEPRINT == options.operation:
run_validate_blueprint(options, server_url)
if has_errors:
logger.info('')
logger.error('Checks finished with errors')
return CODE_ERROR
elif has_warnings:
logger.info('')
logger.warning('Checks finished with warnings')
return CODE_WARNING
else:
logger.info('')
logger.info('Checks finished')
return CODE_SUCCESS
def main():
parser = optparse.OptionParser(usage="usage: %prog [option] arg ... [option] arg",)
init_parser_options(parser)
(options, args) = parser.parse_args()
backup_file, ec = backup_log(options.log)
init_logger(options)
if backup_file:
logger.info('Previous logs backed up as {0}'.format(backup_file))
out, ec = validate_options(options)
if CODE_ERROR == ec:
logger.error(out)
sys.exit(ec)
else:
dump_parameters_to_log(options)
try:
ec = run(options)
sys.exit(ec)
except Exception, e:
logger.exception(e)
sys.exit(CODE_ERROR)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, EOFError):
print("Aborting ... Keyboard Interrupt.")
sys.exit(1)
|
|
"""Logic expressions handling
NOTE
----
at present this is mainly needed for facts.py , feel free however to improve
this stuff for general purpose.
"""
from __future__ import print_function, division
def _fuzzy_group(args, quick_exit=False):
"""Return True if all args are True, None if there is any None else False
unless ``quick_exit`` is True (then return None as soon as a second False
is seen.
``_fuzzy_group`` is like ``fuzzy_and`` except that it is more
conservative in returning a False, waiting to make sure that all
arguments are True or False and returning None if any arguments are
None. It also has the capability of permiting only a single False and
returning None if more than one is seen. For example, the presence of a
single transcendental amongst rationals would indicate that the group is
no longer rational; but a second transcendental in the group would make the
determination impossible.
Examples
========
>>> from sympy.core.logic import _fuzzy_group
By default, multiple Falses mean the group is broken:
>>> _fuzzy_group([False, False, True])
False
If multiple Falses mean the group status is unknown then set
`quick_exit` to True so None can be returned when the 2nd False is seen:
>>> _fuzzy_group([False, False, True], quick_exit=True)
But if only a single False is seen then the group is known to
be broken:
>>> _fuzzy_group([False, True, True], quick_exit=True)
False
"""
saw_other = False
for a in args:
if a is True:
continue
if a is None:
return
if quick_exit and saw_other:
return
saw_other = True
return not saw_other
def fuzzy_bool(x):
"""Return True, False or None according to x.
Whereas bool(x) returns True or False, fuzzy_bool allows
for the None value.
"""
if x is None:
return None
return bool(x)
def fuzzy_and(args):
"""Return True (all True), False (any False) or None.
Examples
========
>>> from sympy.core.logic import fuzzy_and
>>> from sympy import Dummy
If you had a list of objects to test the commutivity of
and you want the fuzzy_and logic applied, passing an
iterator will allow the commutativity to only be computed
as many times as necessary. With this list, False can be
returned after analyzing the first symbol:
>>> syms = [Dummy(commutative=False), Dummy()]
>>> fuzzy_and(s.is_commutative for s in syms)
False
That False would require less work than if a list of pre-computed
items was sent:
>>> fuzzy_and([s.is_commutative for s in syms])
False
"""
rv = True
for ai in args:
ai = fuzzy_bool(ai)
if ai is False:
return False
if rv: # this will stop updating if a None is ever trapped
rv = ai
return rv
def fuzzy_not(v):
"""
Not in fuzzy logic
Return None if `v` is None else `not v`.
Examples
========
>>> from sympy.core.logic import fuzzy_not
>>> fuzzy_not(True)
False
>>> fuzzy_not(None)
>>> fuzzy_not(False)
True
"""
if v is None:
return v
else:
return not v
def fuzzy_or(args):
"""
Or in fuzzy logic. Returns True (any True), False (all False), or None
See the docstrings of fuzzy_and and fuzzy_not for more info. fuzzy_or is
related to the two by the standard De Morgan's law.
>>> from sympy.core.logic import fuzzy_or
>>> fuzzy_or([True, False])
True
>>> fuzzy_or([True, None])
True
>>> fuzzy_or([False, False])
False
>>> print(fuzzy_or([False, None]))
None
"""
return fuzzy_not(fuzzy_and(fuzzy_not(i) for i in args))
class Logic(object):
"""Logical expression"""
# {} 'op' -> LogicClass
op_2class = {}
def __new__(cls, *args):
obj = object.__new__(cls)
obj.args = args
return obj
def __getnewargs__(self):
return self.args
def __hash__(self):
return hash( (type(self).__name__,) + tuple(self.args) )
def __eq__(a, b):
if not isinstance(b, type(a)):
return False
else:
return a.args == b.args
def __ne__(a, b):
if not isinstance(b, type(a)):
return True
else:
return a.args != b.args
def __lt__(self, other):
if self.__cmp__(other) == -1:
return True
return False
def __cmp__(self, other):
if type(self) is not type(other):
a = str(type(self))
b = str(type(other))
else:
a = self.args
b = other.args
return (a > b) - (a < b)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(a) for a in self.args))
__repr__ = __str__
@staticmethod
def fromstring(text):
"""Logic from string with space around & and | but none after !.
e.g.
!a & b | c
"""
lexpr = None # current logical expression
schedop = None # scheduled operation
for term in text.split():
# operation symbol
if term in '&|':
if schedop is not None:
raise ValueError(
'double op forbidden: "%s %s"' % (term, schedop))
if lexpr is None:
raise ValueError(
'%s cannot be in the beginning of expression' % term)
schedop = term
continue
if '&' in term or '|' in term:
raise ValueError('& and | must have space around them')
if term[0] == '!':
if len(term) == 1:
raise ValueError('do not include space after "!"')
term = Not(term[1:])
# already scheduled operation, e.g. '&'
if schedop:
lexpr = Logic.op_2class[schedop](lexpr, term)
schedop = None
continue
# this should be atom
if lexpr is not None:
raise ValueError(
'missing op between "%s" and "%s"' % (lexpr, term))
lexpr = term
# let's check that we ended up in correct state
if schedop is not None:
raise ValueError('premature end-of-expression in "%s"' % text)
if lexpr is None:
raise ValueError('"%s" is empty' % text)
# everything looks good now
return lexpr
class AndOr_Base(Logic):
def __new__(cls, *args):
bargs = []
for a in args:
if a == cls.op_x_notx:
return a
elif a == (not cls.op_x_notx):
continue # skip this argument
bargs.append(a)
args = sorted(set(cls.flatten(bargs)), key=hash)
for a in args:
if Not(a) in args:
return cls.op_x_notx
if len(args) == 1:
return args.pop()
elif len(args) == 0:
return not cls.op_x_notx
return Logic.__new__(cls, *args)
@classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except IndexError:
break
if isinstance(arg, Logic):
if isinstance(arg, cls):
args_queue.extend(arg.args)
continue
res.append(arg)
args = tuple(res)
return args
class And(AndOr_Base):
op_x_notx = False
def _eval_propagate_not(self):
# !(a&b&c ...) == !a | !b | !c ...
return Or( *[Not(a) for a in self.args] )
# (a|b|...) & c == (a&c) | (b&c) | ...
def expand(self):
# first locate Or
for i in range(len(self.args)):
arg = self.args[i]
if isinstance(arg, Or):
arest = self.args[:i] + self.args[i + 1:]
orterms = [And( *(arest + (a,)) ) for a in arg.args]
for j in range(len(orterms)):
if isinstance(orterms[j], Logic):
orterms[j] = orterms[j].expand()
res = Or(*orterms)
return res
else:
return self
class Or(AndOr_Base):
op_x_notx = True
def _eval_propagate_not(self):
# !(a|b|c ...) == !a & !b & !c ...
return And( *[Not(a) for a in self.args] )
class Not(Logic):
def __new__(cls, arg):
if isinstance(arg, str):
return Logic.__new__(cls, arg)
elif isinstance(arg, bool):
return not arg
elif isinstance(arg, Not):
return arg.args[0]
elif isinstance(arg, Logic):
# XXX this is a hack to expand right from the beginning
arg = arg._eval_propagate_not()
return arg
else:
raise ValueError('Not: unknown argument %r' % (arg,))
@property
def arg(self):
return self.args[0]
Logic.op_2class['&'] = And
Logic.op_2class['|'] = Or
Logic.op_2class['!'] = Not
|
|
"""
@package mi.instrument.seabird.sbe54tps.test.test_driver
@file mi/instrument/seabird/sbe54tps/test/test_driver.py
@author Roger Unwin
@brief Test cases for sbe54 driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u
$ bin/test_driver -i
$ bin/test_driver -q
* From pyon
$ bin/nosetests -s -v mi/instrument/seabird/sbe54tps/ooicore
$ bin/nosetests -s -v mi/instrument/seabird/sbe54tps/ooicore -a UNIT
$ bin/nosetests -s -v mi/instrument/seabird/sbe54tps/ooicore -a INT
$ bin/nosetests -s -v mi/instrument/seabird/sbe54tps/ooicore -a QUAL
"""
import copy
from nose.plugins.attrib import attr
from mock import Mock
import time
from mi.core.log import get_logger
from mi.core.time_tools import timegm_to_float
from mi.idk.unit_test import DriverTestMixin, DriverStartupConfigKey, InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.instrument.seabird.sbe54tps.driver import SBE54PlusInstrumentDriver
from mi.instrument.seabird.sbe54tps.driver import ScheduledJob
from mi.instrument.seabird.sbe54tps.driver import ProtocolState
from mi.instrument.seabird.sbe54tps.driver import Parameter
from mi.instrument.seabird.sbe54tps.driver import ProtocolEvent
from mi.instrument.seabird.sbe54tps.driver import Capability
from mi.instrument.seabird.sbe54tps.driver import Prompt
from mi.instrument.seabird.sbe54tps.driver import Protocol
from mi.instrument.seabird.sbe54tps.driver import InstrumentCmds
from mi.instrument.seabird.sbe54tps.driver import SBE54tpsStatusDataParticleKey
from mi.instrument.seabird.sbe54tps.driver import SBE54tpsEventCounterDataParticleKey
from mi.instrument.seabird.sbe54tps.driver import SBE54tpsSampleDataParticleKey
from mi.instrument.seabird.sbe54tps.driver import SBE54tpsHardwareDataParticleKey
from mi.instrument.seabird.sbe54tps.driver import SBE54tpsConfigurationDataParticleKey
from mi.instrument.seabird.sbe54tps.driver import SBE54tpsSampleRefOscDataParticleKey
from mi.instrument.seabird.sbe54tps.driver import DataParticleType
from mi.instrument.seabird.test.test_driver import SeaBirdUnitTest
from mi.instrument.seabird.test.test_driver import SeaBirdIntegrationTest
from mi.instrument.seabird.test.test_driver import SeaBirdQualificationTest
from mi.instrument.seabird.sbe54tps.test.sample_data import *
from mi.core.instrument.instrument_driver import ResourceAgentEvent
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.exceptions import InstrumentCommandException
from mi.core.instrument.chunker import StringChunker
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
log = get_logger()
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.seabird.sbe54tps.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='123xyz',
instrument_agent_preload_id='IA7',
instrument_agent_name='Agent007',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.SAMPLE_PERIOD: 15,
Parameter.ENABLE_ALERTS: 1,
},
DriverStartupConfigKey.SCHEDULER: {
ScheduledJob.ACQUIRE_STATUS: {},
ScheduledJob.STATUS_DATA: {},
ScheduledJob.HARDWARE_DATA: {},
ScheduledJob.EVENT_COUNTER_DATA: {},
ScheduledJob.CONFIGURATION_DATA: {},
ScheduledJob.CLOCK_SYNC: {}
}
}
)
class SeaBird54tpsMixin(DriverTestMixin):
"""
Mixin class used for storing data particle constance and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.SAMPLE_PERIOD: {TYPE: int, READONLY: False, DA: True, STARTUP: True, DEFAULT: 15, VALUE: 15},
Parameter.TIME: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.BATTERY_TYPE: {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 1, VALUE: 1},
Parameter.ENABLE_ALERTS: {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: 1},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.CLOCK_SYNC: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.SAMPLE_REFERENCE_OSCILLATOR: {STATES: [ProtocolState.COMMAND]},
Capability.TEST_EEPROM: {STATES: [ProtocolState.COMMAND]},
}
_prest_real_time_parameters = {
SBE54tpsSampleDataParticleKey.SAMPLE_NUMBER: {TYPE: int, VALUE: 5947, REQUIRED: True},
SBE54tpsSampleDataParticleKey.SAMPLE_TYPE: {TYPE: unicode, VALUE: 'Pressure', REQUIRED: True},
SBE54tpsSampleDataParticleKey.INST_TIME: {TYPE: unicode, VALUE: '2012-11-07T12:21:25', REQUIRED: True},
SBE54tpsSampleDataParticleKey.PRESSURE: {TYPE: float, VALUE: 13.9669, REQUIRED: True},
SBE54tpsSampleDataParticleKey.PRESSURE_TEMP: {TYPE: float, VALUE: 18.9047, REQUIRED: True},
}
_prest_reference_oscillator_parameters = {
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT: {TYPE: int, VALUE: 125000, REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_MAX: {TYPE: int, VALUE: 150000, REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.SET_TIMEOUT_ICD: {TYPE: int, VALUE: 150000, REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_NUMBER: {TYPE: int, VALUE: 1244, REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TYPE: {TYPE: unicode, VALUE: 'RefOsc', REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.SAMPLE_TIMESTAMP: {TYPE: unicode, VALUE: u'2013-01-30T15:36:53',
REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.REF_OSC_FREQ: {TYPE: float, VALUE: 5999995.955, REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.REF_ERROR_PPM: {TYPE: float, VALUE: 0.090, REQUIRED: True},
SBE54tpsSampleRefOscDataParticleKey.PCB_TEMP_RAW: {TYPE: int, VALUE: 18413, REQUIRED: True},
}
_prest_configuration_data_parameters = {
SBE54tpsConfigurationDataParticleKey.DEVICE_TYPE: {TYPE: unicode, VALUE: 'SBE54', REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.SERIAL_NUMBER: {TYPE: str, VALUE: '05400012', REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.ACQ_OSC_CAL_DATE: {TYPE: unicode, VALUE: '2012-02-20', REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.FRA0: {TYPE: float, VALUE: 5.999926E+06, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.FRA1: {TYPE: float, VALUE: 5.792290E-03, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.FRA2: {TYPE: float, VALUE: -1.195664E-07, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.FRA3: {TYPE: float, VALUE: 7.018589E-13, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PRESSURE_SERIAL_NUM: {TYPE: unicode, VALUE: '121451', REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PRESSURE_CAL_DATE: {TYPE: unicode, VALUE: '2011-06-01', REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PU0: {TYPE: float, VALUE: 5.820407E+00, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PY1: {TYPE: float, VALUE: -3.845374E+03, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PY2: {TYPE: float, VALUE: -1.078882E+04, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PY3: {TYPE: float, VALUE: 0.000000E+00, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PC1: {TYPE: float, VALUE: -2.700543E+04, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PC2: {TYPE: float, VALUE: -1.738438E+03, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PC3: {TYPE: float, VALUE: 7.629962E+04, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PD1: {TYPE: float, VALUE: 3.739600E-02, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PD2: {TYPE: float, VALUE: 0.000000E+00, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PT1: {TYPE: float, VALUE: 3.027306E+01, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PT2: {TYPE: float, VALUE: 2.231025E-01, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PT3: {TYPE: float, VALUE: 5.398972E+01, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PT4: {TYPE: float, VALUE: 1.455506E+02, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PRESSURE_OFFSET: {TYPE: float, VALUE: 0.000000E+00, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.PRESSURE_RANGE: {TYPE: float, VALUE: 6.000000E+03, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.BATTERY_TYPE: {TYPE: int, VALUE: 0, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.BAUD_RATE: {TYPE: int, VALUE: 9600, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.ENABLE_ALERTS: {TYPE: int, VALUE: 0, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.UPLOAD_TYPE: {TYPE: int, VALUE: 0, REQUIRED: True},
SBE54tpsConfigurationDataParticleKey.SAMPLE_PERIOD: {TYPE: int, VALUE: 15, REQUIRED: True}
}
_prest_device_status_parameters = {
SBE54tpsStatusDataParticleKey.DEVICE_TYPE: {TYPE: unicode, VALUE: 'SBE54', REQUIRED: True},
SBE54tpsStatusDataParticleKey.SERIAL_NUMBER: {TYPE: str, VALUE: '05400012', REQUIRED: True},
SBE54tpsStatusDataParticleKey.TIME: {TYPE: unicode, VALUE: '2012-11-06T10:55:44', REQUIRED: True},
SBE54tpsStatusDataParticleKey.EVENT_COUNT: {TYPE: int, VALUE: 573},
SBE54tpsStatusDataParticleKey.MAIN_SUPPLY_VOLTAGE: {TYPE: float, VALUE: 23.3, REQUIRED: True},
SBE54tpsStatusDataParticleKey.NUMBER_OF_SAMPLES: {TYPE: int, VALUE: 22618, REQUIRED: True},
SBE54tpsStatusDataParticleKey.BYTES_USED: {TYPE: int, VALUE: 341504, REQUIRED: True},
SBE54tpsStatusDataParticleKey.BYTES_FREE: {TYPE: int, VALUE: 133876224, REQUIRED: True},
}
_prest_event_counter_parameters = {
SBE54tpsEventCounterDataParticleKey.NUMBER_EVENTS: {TYPE: int, VALUE: 573},
SBE54tpsEventCounterDataParticleKey.MAX_STACK: {TYPE: int, VALUE: 354},
SBE54tpsEventCounterDataParticleKey.DEVICE_TYPE: {TYPE: unicode, VALUE: 'SBE54'},
SBE54tpsEventCounterDataParticleKey.SERIAL_NUMBER: {TYPE: str, VALUE: '05400012'},
SBE54tpsEventCounterDataParticleKey.POWER_ON_RESET: {TYPE: int, VALUE: 25},
SBE54tpsEventCounterDataParticleKey.POWER_FAIL_RESET: {TYPE: int, VALUE: 25},
SBE54tpsEventCounterDataParticleKey.SERIAL_BYTE_ERROR: {TYPE: int, VALUE: 9},
SBE54tpsEventCounterDataParticleKey.COMMAND_BUFFER_OVERFLOW: {TYPE: int, VALUE: 1},
SBE54tpsEventCounterDataParticleKey.SERIAL_RECEIVE_OVERFLOW: {TYPE: int, VALUE: 255},
SBE54tpsEventCounterDataParticleKey.LOW_BATTERY: {TYPE: int, VALUE: 255},
SBE54tpsEventCounterDataParticleKey.SIGNAL_ERROR: {TYPE: int, VALUE: 1},
SBE54tpsEventCounterDataParticleKey.ERROR_10: {TYPE: int, VALUE: 1},
SBE54tpsEventCounterDataParticleKey.ERROR_12: {TYPE: int, VALUE: 1},
}
_prest_hardware_data_parameters = {
SBE54tpsHardwareDataParticleKey.DEVICE_TYPE: {TYPE: unicode, VALUE: 'SBE54', REQUIRED: True},
SBE54tpsHardwareDataParticleKey.SERIAL_NUMBER: {TYPE: str, VALUE: '05400012', REQUIRED: True},
SBE54tpsHardwareDataParticleKey.MANUFACTURER: {TYPE: unicode, VALUE: 'Sea-Bird Electronics, Inc',
REQUIRED: True},
SBE54tpsHardwareDataParticleKey.FIRMWARE_VERSION: {TYPE: unicode, VALUE: 'SBE54 V1.3-6MHZ', REQUIRED: True},
SBE54tpsHardwareDataParticleKey.FIRMWARE_DATE: {TYPE: unicode, VALUE: 'Mar 22 2007', REQUIRED: True},
SBE54tpsHardwareDataParticleKey.HARDWARE_VERSION: {TYPE: list, VALUE: ['41477A.1', '41478A.1T'],
REQUIRED: True},
SBE54tpsHardwareDataParticleKey.PCB_SERIAL_NUMBER: {TYPE: list, VALUE: ['NOT SET', 'NOT SET'], REQUIRED: True},
SBE54tpsHardwareDataParticleKey.PCB_TYPE: {TYPE: unicode, VALUE: '1', REQUIRED: True},
SBE54tpsHardwareDataParticleKey.MANUFACTURE_DATE: {TYPE: unicode, VALUE: 'Jun 27 2007', REQUIRED: True},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
def assert_particle_real_time(self, data_particle, verify_values=False):
"""
Verify prest_real_tim particle
@param data_particle: SBE54tpsSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SBE54tpsSampleDataParticleKey, self._prest_real_time_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.PREST_REAL_TIME)
self.assert_data_particle_parameters(data_particle, self._prest_real_time_parameters, verify_values)
def assert_particle_reference_oscillator(self, data_particle, verify_values=False):
"""
Verify prest_reference_oscillator particle
@param data_particle: SBE54tpsSampleRefOscDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SBE54tpsSampleRefOscDataParticleKey, self._prest_reference_oscillator_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.PREST_REFERENCE_OSCILLATOR)
self.assert_data_particle_parameters(data_particle, self._prest_reference_oscillator_parameters, verify_values)
def assert_particle_configuration_data(self, data_particle, verify_values=False):
"""
Verify prest_configuration_data particle
@param data_particle: SBE54tpsSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SBE54tpsConfigurationDataParticleKey, self._prest_configuration_data_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.PREST_CONFIGURATION_DATA)
self.assert_data_particle_parameters(data_particle, self._prest_configuration_data_parameters, verify_values)
def assert_particle_device_status(self, data_particle, verify_values=False):
"""
Verify prest_device_status particle
@param data_particle: SBE54tpsStatusDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SBE54tpsStatusDataParticleKey, self._prest_device_status_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.PREST_DEVICE_STATUS)
self.assert_data_particle_parameters(data_particle, self._prest_device_status_parameters, verify_values)
def assert_particle_event_counter(self, data_particle, verify_values=False):
"""
Verify prest_event_coutner particle
@param data_particle: SBE54tpsEventCounterDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SBE54tpsEventCounterDataParticleKey, self._prest_event_counter_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.PREST_EVENT_COUNTER)
self.assert_data_particle_parameters(data_particle, self._prest_event_counter_parameters, verify_values)
def assert_particle_hardware_data(self, data_particle, verify_values=False):
"""
Verify prest_hardware_data particle
@param data_particle: SBE54tpsHardwareDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(SBE54tpsHardwareDataParticleKey, self._prest_hardware_data_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.PREST_HARDWARE_DATA)
self.assert_data_particle_parameters(data_particle, self._prest_hardware_data_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
###############################################################################
@attr('UNIT', group='mi')
class SeaBird54PlusUnitTest(SeaBirdUnitTest, SeaBird54tpsMixin):
def setUp(self):
SeaBirdUnitTest.setUp(self)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(ScheduledJob())
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(InstrumentCmds())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
# Test capabilities for duplicates, them verify that capabilities is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = SBE54PlusInstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, SAMPLE_GETSD)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_GETSD)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_GETSD, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_GETSD)
self.assert_chunker_sample(chunker, SAMPLE_GETCD)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_GETCD)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_GETCD, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_GETCD)
self.assert_chunker_sample(chunker, SAMPLE_GETEC)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_GETEC)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_GETEC, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_GETEC)
self.assert_chunker_sample(chunker, SAMPLE_GETHD)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_GETHD)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_GETHD, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_GETHD)
self.assert_chunker_sample(chunker, SAMPLE_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_SAMPLE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_SAMPLE)
self.assert_chunker_sample(chunker, SAMPLE_REF_OSC)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_REF_OSC)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_REF_OSC, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_REF_OSC)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = SBE54PlusInstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, SAMPLE_GETSD, self.assert_particle_device_status, True)
self.assert_particle_published(driver, SAMPLE_GETCD, self.assert_particle_configuration_data, True)
self.assert_particle_published(driver, SAMPLE_GETEC, self.assert_particle_event_counter, True)
self.assert_particle_published(driver, SAMPLE_GETHD, self.assert_particle_hardware_data, True)
self.assert_particle_published(driver, SAMPLE_SAMPLE, self.assert_particle_real_time, True)
self.assert_particle_published(driver, SAMPLE_REF_OSC, self.assert_particle_reference_oscillator, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
my_event_callback = Mock(spec="UNKNOWN WHAT SHOULD GO HERE FOR evt_callback")
protocol = Protocol(Prompt, NEWLINE, my_event_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(driver_capabilities, protocol._filter_capabilities(test_capabilities))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: [ProtocolEvent.DISCOVER,
ProtocolEvent.START_DIRECT],
ProtocolState.COMMAND: [ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
ProtocolEvent.SCHEDULED_CLOCK_SYNC,
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.INIT_PARAMS,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.START_DIRECT,
ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR,
ProtocolEvent.RECOVER_AUTOSAMPLE,
ProtocolEvent.TEST_EEPROM],
ProtocolState.OSCILLATOR: [ProtocolEvent.ACQUIRE_OSCILLATOR_SAMPLE],
ProtocolState.AUTOSAMPLE: [ProtocolEvent.GET,
ProtocolEvent.INIT_PARAMS,
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.SCHEDULED_CLOCK_SYNC,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS],
ProtocolState.DIRECT_ACCESS: [ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT]
}
driver = SBE54PlusInstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class SeaBird54PlusIntegrationTest(SeaBirdIntegrationTest, SeaBird54tpsMixin):
def setUp(self):
SeaBirdIntegrationTest.setUp(self)
def test_set(self):
"""
Test all set commands. Verify all exception cases.
"""
self.assert_initialize_driver()
# Sample Period. integer 1 - 240
self.assert_set(Parameter.SAMPLE_PERIOD, 1)
self.assert_set(Parameter.SAMPLE_PERIOD, 240)
self.assert_set_exception(Parameter.SAMPLE_PERIOD, 241)
self.assert_set_exception(Parameter.SAMPLE_PERIOD, 0)
self.assert_set_exception(Parameter.SAMPLE_PERIOD, -1)
self.assert_set_exception(Parameter.SAMPLE_PERIOD, 0.2)
self.assert_set_exception(Parameter.SAMPLE_PERIOD, "1")
# Read only parameters
self.assert_set_readonly(Parameter.BATTERY_TYPE, 1)
self.assert_set_readonly(Parameter.ENABLE_ALERTS, True)
def test_commands(self):
"""
Run instrument commands from both command and streaming mode.
"""
self.assert_initialize_driver()
####
# First test in command mode
####
self.assert_driver_command(ProtocolEvent.CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'StatusData DeviceType')
self.assert_driver_command(ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR, regex=r'Ref osc warmup')
self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)
####
# Test in streaming mode
####
# Put us in streaming
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
####
# Test a bad command
####
self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)
def test_autosample(self):
"""
Verify that we can enter streaming and that all particles are produced
properly.
Because we have to test for three different data particles we can't use
the common assert_sample_autosample method
"""
self.assert_initialize_driver()
self.assert_set(Parameter.SAMPLE_PERIOD, 1)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.PREST_REAL_TIME, self.assert_particle_real_time,
timeout=120)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
def test_polled(self):
"""
Test that we can generate particles with commands
"""
self.assert_initialize_driver()
self.assert_particle_generation(ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR,
DataParticleType.PREST_REFERENCE_OSCILLATOR,
self.assert_particle_reference_oscillator)
def test_apply_startup_params(self):
"""
This test verifies that we can set the startup params
from autosample mode. It only verifies one parameter
change because all parameters are tested above.
"""
# Apply autosample happens for free when the driver fires up
self.assert_initialize_driver()
# Change something
self.assert_set(Parameter.SAMPLE_PERIOD, 10)
# Now try to apply params in Streaming
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE)
self.driver_client.cmd_dvr('apply_startup_params')
# All done. Verify the startup parameter has been reset
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND)
self.assert_get(Parameter.SAMPLE_PERIOD, 15)
def test_startup_params(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method.
"""
# Explicitly verify these values after discover. They should match
# what the startup values should be
get_values = {
Parameter.SAMPLE_PERIOD: 15
}
# Change the values of these parameters to something before the
# driver is re-initalized. They should be blown away on reinit.
new_values = {
Parameter.SAMPLE_PERIOD: 5
}
self.assert_initialize_driver()
self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values)
# Start autosample and try again
self.assert_set_bulk(new_values)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_startup_parameters(self.assert_driver_parameters)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_autosample_recovery(self):
"""
Test to ensure the driver will right itself when the instrument
times out in command mode and starts sampling again.
"""
# Puts the instrument into command mode.
self.assert_initialize_driver()
self.assert_set(Parameter.SAMPLE_PERIOD, 1)
# The instrument will return to streaming in 120 seconds. We
# wil watch for 200.
timeout = time.time() + 200
while time.time() < timeout:
state = self.driver_client.cmd_dvr('get_resource_state')
if state == ProtocolState.AUTOSAMPLE:
return
log.debug("current state %s. recheck in 5" % state)
time.sleep(5)
self.assertFalse(True, msg="Failed to transition to streaming after 200 seconds")
def assert_acquire_status(self):
"""
Verify a status particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.PREST_DEVICE_STATUS, self.assert_particle_device_status,
timeout=120)
self.assert_async_particle_generation(DataParticleType.PREST_CONFIGURATION_DATA,
self.assert_particle_configuration_data, timeout=3)
self.assert_async_particle_generation(DataParticleType.PREST_EVENT_COUNTER, self.assert_particle_event_counter,
timeout=3)
self.assert_async_particle_generation(DataParticleType.PREST_HARDWARE_DATA, self.assert_particle_hardware_data,
timeout=3)
def test_scheduled_device_status_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, delay=120)
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_acquire_status_autosample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status,
autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=120)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
def assert_clock_sync(self):
"""
Verify the clock is set to at least the current date
"""
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
dt = self.assert_get(Parameter.TIME)
lt = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(timegm_to_float(time.localtime())))
self.assertTrue(lt[:12].upper() in dt.upper())
def test_scheduled_clock_sync_command(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, self.assert_clock_sync, delay=90)
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_clock_sync_autosample(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, self.assert_clock_sync,
autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=90)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
def assert_cycle(self):
self.assert_current_state(ProtocolState.COMMAND)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
self.assert_async_particle_generation(DataParticleType.PREST_REAL_TIME, self.assert_particle_real_time,
timeout=120)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
self.assert_current_state(ProtocolState.COMMAND)
def test_discover(self):
"""
Verify we can discover from both command and auto sample modes
"""
self.assert_initialize_driver(final_state=ProtocolState.AUTOSAMPLE)
self.assert_cycle()
self.assert_cycle()
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class SeaBird54PlusQualificationTest(SeaBirdQualificationTest, SeaBird54tpsMixin):
def setUp(self):
SeaBirdQualificationTest.setUp(self)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 1)
self.assert_sample_autosample(self.assert_particle_real_time, DataParticleType.PREST_REAL_TIME)
def test_poll(self):
"""
Verify that we can poll for a sample. Take sample for this instrument
Also poll for other engineering data streams.
"""
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR,
self.assert_particle_reference_oscillator,
DataParticleType.PREST_REFERENCE_OSCILLATOR, sample_count=1, timeout=200)
def test_direct_access_telnet_mode_command(self):
"""
@brief This test verifies that the Instrument Driver
properly supports direct access to the physical
instrument. (telnet mode)
"""
###
# First test direct access and exit with a go command
# call. Also add a parameter change to verify DA
# parameters are restored on DA exit.
###
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 10)
# go into direct access, and muck up a setting.
self.assert_direct_access_start_telnet()
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("%ssetsamplePeriod=15%s" % (NEWLINE, NEWLINE))
self.tcp_client.send_data("%sGetCD%s" % (NEWLINE, NEWLINE))
self.tcp_client.expect("samplePeriod='15'")
log.debug("DA Parameter Sample Interval Updated")
self.assert_direct_access_stop_telnet()
# verify the setting got restored.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 10)
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 10)
###
# Test session timeout without activity
###
self.assert_direct_access_start_telnet(inactivity_timeout=120, session_timeout=30)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
###
# Test direct access session timeout with activity
###
self.assert_direct_access_start_telnet(inactivity_timeout=30, session_timeout=60)
# Send some activity every 30 seconds to keep DA alive.
for i in range(1, 2, 3):
self.tcp_client.send_data(NEWLINE)
log.debug("Sending a little keep alive communication, sleeping for 15 seconds")
time.sleep(15)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 45)
###
# Test direct access disconnect
###
self.assert_direct_access_start_telnet()
self.tcp_client.disconnect()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 30)
def test_direct_access_telnet_mode_autosample(self):
"""
@brief This test verifies that the Instrument Driver
properly supports direct access to the physical
instrument. (telnet mode)
"""
###
# First test direct access and exit with a go command
# call. Also add a parameter change to verify DA
# parameters are restored on DA exit.
###
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 10)
# go into direct access, and muck up a setting.
self.assert_direct_access_start_telnet()
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("%ssetsamplePeriod=15%s" % (NEWLINE, NEWLINE))
self.tcp_client.send_data("%sGetCD%s" % (NEWLINE, NEWLINE))
self.tcp_client.expect("samplePeriod='15'")
self.tcp_client.send_data("%sStart%s" % (NEWLINE, NEWLINE))
time.sleep(3)
log.debug("DA Parameter Sample Interval Updated")
self.assert_direct_access_stop_telnet()
# verify the setting got restored.
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 10)
self.tcp_client.send_data("%sStart%s" % (NEWLINE, NEWLINE))
time.sleep(3)
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 10)
###
# Test session timeout without activity
###
self.assert_direct_access_start_telnet(inactivity_timeout=120, session_timeout=30)
self.tcp_client.send_data("%sStart%s" % (NEWLINE, NEWLINE))
time.sleep(3)
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 60)
###
# Test direct access session timeout with activity
###
self.assert_direct_access_start_telnet(inactivity_timeout=30, session_timeout=60)
self.tcp_client.send_data("%sStart%s" % (NEWLINE, NEWLINE))
time.sleep(3)
# Send some activity every 30 seconds to keep DA alive.
for i in range(1, 2, 3):
self.tcp_client.send_data(NEWLINE)
log.debug("Sending a little keep alive communication, sleeping for 15 seconds")
time.sleep(15)
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 75)
###
# Test direct access disconnect
###
self.assert_direct_access_start_telnet()
self.tcp_client.send_data("%sStart%s" % (NEWLINE, NEWLINE))
time.sleep(3)
self.tcp_client.disconnect()
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 30)
def test_direct_access_telnet_timeout(self):
"""
Verify that DA times out as expected and transitions back to command mode.
"""
self.assert_enter_command_mode()
# go into direct access, and muck up a setting.
self.assert_direct_access_start_telnet(timeout=30)
self.assertTrue(self.tcp_client)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 90)
def test_direct_access_telnet_disconnect(self):
"""
Verify that a disconnection from the DA server transitions the agent back to
command mode.
"""
self.assert_enter_command_mode()
# go into direct access, and muck up a setting.
self.assert_direct_access_start_telnet(timeout=600)
self.assertTrue(self.tcp_client)
self.tcp_client.disconnect()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 30)
def test_direct_access_telnet_autosample(self):
"""
Verify we can handle an instrument state change while in DA
"""
self.assert_enter_command_mode()
# go into direct access, and muck up a setting.
self.assert_direct_access_start_telnet(timeout=600)
self.tcp_client.send_data("%s%s" % (InstrumentCmds.START_LOGGING, NEWLINE))
self.tcp_client.expect("S>")
self.assert_sample_async(self.assert_particle_real_time, DataParticleType.PREST_REAL_TIME, 15)
self.tcp_client.disconnect()
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 30)
def test_sample_interval_set(self):
"""
https://jira.oceanobservatories.org/tasks/browse/CISWMI-147
There was an issue raised that the sample interval set wasn't behaving
consistently. This test is intended to replicate the error and verify
the fix.
"""
self.assert_enter_command_mode()
log.debug("getting ready to set some parameters! Start watching the sniffer")
time.sleep(30)
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 15)
self.assert_start_autosample()
self.assert_stop_autosample()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 15)
self.assert_direct_access_start_telnet()
self.assert_direct_access_stop_telnet()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 15)
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 10)
self.assert_start_autosample()
self.assert_stop_autosample()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 10)
self.assert_direct_access_start_telnet()
self.assert_direct_access_stop_telnet()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 10)
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 20)
self.assert_start_autosample()
self.assert_stop_autosample()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 20)
self.assert_direct_access_start_telnet()
self.assert_direct_access_stop_telnet()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 20)
def test_startup_params_first_pass(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method. We have two identical versions
of this test so it is run twice. First time to check and CHANGE, then
the second time to check again.
since nose orders the tests by ascii value this should run second.
"""
self.assert_enter_command_mode()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 15)
# Change these values anyway just in case it ran first.
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 5)
def test_startup_params_second_pass(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method. We have two identical versions
of this test so it is run twice. First time to check and CHANGE, then
the second time to check again.
since nose orders the tests by ascii value this should run second.
"""
self.assert_enter_command_mode()
self.assert_get_parameter(Parameter.SAMPLE_PERIOD, 15)
# Change these values anyway just in case it ran first.
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 5)
def test_autosample_recovery(self):
"""
@brief Verify that when the instrument automatically starts autosample
the states are updated correctly
"""
self.assert_enter_command_mode()
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 145)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR,
ProtocolEvent.TEST_EEPROM,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
]
self.assert_start_autosample()
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Streaming Recovery
#######################
# Command mode times out after 120 seconds. This test will verify the agent states are correct
self.assert_set_parameter(Parameter.SAMPLE_PERIOD, 1)
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
|
|
"""
A simple test server for integration tests.
Only understands stdio.
Uses the asyncio module and mypy types, so you'll need a modern Python.
To make this server reply to requests, send the $test/setResponse notification.
To await a method that this server should eventually (or already has) received,
send the $test/getReceived request. If the method was already received, it will
return None immediately. Otherwise, it will wait for the method. You should
have a timeout in your tests to ensure your tests won't hang forever.
To make server send out a notification, send the $test/sendNotification request
with expected notification method in params['method'] and params in params['params'].
Tests can await this request to make sure that they receive notification before code
resumes (since response to request will arrive after requested notification).
TODO: Untested on Windows.
TODO: It should also understand TCP, both as slave and master.
"""
from argparse import ArgumentParser
from enum import IntEnum
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union, Iterable
import asyncio
import json
import os
import sys
__package__ = "server"
__version__ = "0.9.3"
if sys.version_info[0] < 3:
print("only works for python3.6 and higher")
exit(1)
if sys.version_info[1] < 6:
print("only works for python3.6 and higher")
exit(1)
StringDict = Dict[str, Any]
PayloadLike = Union[List[StringDict], StringDict, None]
ENCODING = "utf-8"
class ErrorCode(IntEnum):
# Defined by JSON RPC
ParseError = -32700
InvalidRequest = -32600
MethodNotFound = -32601
InvalidParams = -32602
InternalError = -32603
serverErrorStart = -32099
serverErrorEnd = -32000
ServerNotInitialized = -32002
UnknownErrorCode = -32001
# Defined by the protocol
RequestCancelled = -32800
ContentModified = -32801
class Error(Exception):
def __init__(self, code: ErrorCode, message: str) -> None:
super().__init__(message)
self.code = code
def to_lsp(self) -> StringDict:
return {"code": self.code, "message": super().__str__()}
def __str__(self) -> str:
return f"{super().__str__()} ({self.code})"
def jsonrpc() -> StringDict:
return {"jsonrpc": "2.0"}
def make_response(request_id: int, params: PayloadLike) -> StringDict:
return {**jsonrpc(), "id": request_id, "result": params}
def make_error_response(request_id: int, err: Error) -> StringDict:
return {**jsonrpc(), "id": request_id, "error": err.to_lsp()}
def make_notification(method: str, params: PayloadLike) -> StringDict:
return {**jsonrpc(), "method": method, "params": params}
def make_request(method: str, request_id: int, params: PayloadLike) -> StringDict:
return {**jsonrpc(), "method": method, "id": request_id, "params": params}
def dump(payload: PayloadLike) -> bytes:
return json.dumps(
payload,
check_circular=False,
ensure_ascii=False,
separators=(",", ":")).encode(ENCODING)
def content_length(line: bytes) -> Optional[int]:
if line.startswith(b'Content-Length: '):
_, value = line.split(b'Content-Length: ')
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None
class MessageType:
error = 1
warning = 2
info = 3
log = 4
class StopLoopException(Exception):
pass
class Session:
def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
self._reader = reader
self._writer = writer
self._response_handlers: Dict[int, Tuple[Callable, Callable]]
self._request_handlers: Dict[str,
Callable[[PayloadLike], PayloadLike]] = {}
self._notification_handlers: Dict[str,
Callable[[PayloadLike], None]] = {}
# initialize/shutdown/exit dance
self._received_shutdown = False
# properties used for testing purposes
self._responses: Dict[str, PayloadLike] = {}
self._received: Set[str] = set()
self._received_cv = asyncio.Condition()
self._install_handlers()
def _log(self, message: str) -> None:
self._notify("window/logMessage",
{"type": MessageType.info, "message": message})
def _notify(self, method: str, params: PayloadLike) -> None:
asyncio.get_event_loop().create_task(self._send_payload(
make_notification(method, params)))
def _reply(self, request_id: int, params: PayloadLike) -> None:
asyncio.get_event_loop().create_task(self._send_payload(
make_response(request_id, params)))
def _error(self, request_id: int, err: Error) -> None:
asyncio.get_event_loop().create_task(self._send_payload(
make_error_response(request_id, err)))
async def _send_payload(self, payload: StringDict) -> None:
body = dump(payload)
content = (
f"Content-Length: {len(body)}\r\n".encode(ENCODING),
"Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING),
body)
self._writer.writelines(content)
await self._writer.drain()
async def _receive_payload(self, payload: StringDict) -> None:
try:
if "method" in payload:
if "id" in payload:
await self._handle("request", payload, self._request_handlers, int(payload["id"]))
else:
await self._handle("notification", payload, self._notification_handlers, None)
elif "id" in payload:
await self._response_handler(payload)
else:
self._log(f"Unknown payload type: {payload}")
except Exception as err:
self._log(f"Error handling server payload: {err}")
async def _response_handler(self, response: StringDict) -> None:
request_id = int(response["id"])
handler, error_handler = self._response_handlers.pop(
request_id, (None, None))
assert handler
if "result" in response and "error" not in response:
if handler:
await handler(response["result"])
else:
self._log(f"no response for request {request_id}")
elif "result" not in response and "error" in response:
error = response["error"]
if error_handler:
await error_handler(error)
def _on_request(self, request_method: str, handler: Callable) -> None:
self._request_handlers[request_method] = handler
def _on_notification(self, notification_method: str, handler: Callable) -> None:
self._notification_handlers[notification_method] = handler
async def _handle(self, typestr: str, message: 'Dict[str, Any]', handlers: Dict[str, Callable],
request_id: Optional[int]) -> None:
method = message.get("method", "")
params = message.get("params")
unhandled = True
if not method.startswith("$test/"):
self._received.add(method)
async with self._received_cv:
self._received_cv.notify_all()
unhandled = False
handler = handlers.get(method)
if handler is None:
if method in self._responses:
assert request_id is not None
self._reply(request_id, self._responses.pop(method))
elif request_id is not None:
self._error(request_id, Error(
ErrorCode.MethodNotFound, "method not found"))
else:
if unhandled:
self._log(f"unhandled {typestr} {method}")
elif request_id is not None:
# handle request
try:
self._reply(request_id, await handler(params))
except Error as ex:
self._error(request_id, ex)
except Exception as ex:
self._error(request_id, Error(
ErrorCode.InternalError, str(ex)))
else:
# handle notification
try:
await handler(params)
except asyncio.CancelledError:
return
except Exception as ex:
if not self._received_shutdown:
self._notify("window/logMessage", {"type": MessageType.error, "message": str(ex)})
async def _handle_body(self, body: bytes) -> None:
try:
await self._receive_payload(json.loads(body))
except IOError as ex:
self._log(f"malformed {ENCODING}: {ex}")
except UnicodeDecodeError as ex:
self._log(f"malformed {ENCODING}: {ex}")
except json.JSONDecodeError as ex:
self._log(f"malformed JSON: {ex}")
async def run_forever(self) -> bool:
try:
while not self._reader.at_eof():
line = await self._reader.readline()
if not line:
continue
try:
num_bytes = content_length(line)
except ValueError:
continue
if num_bytes is None:
continue
while line and line.strip():
line = await self._reader.readline()
if not line:
continue
body = await self._reader.readexactly(num_bytes)
asyncio.get_event_loop().create_task(self._handle_body(body))
except (BrokenPipeError, ConnectionResetError, StopLoopException):
pass
return self._received_shutdown
def _install_handlers(self) -> None:
self._on_request("initialize", self._initialize)
self._on_request("shutdown", self._shutdown)
self._on_notification("exit", self._on_exit)
self._on_request("$test/getReceived", self._get_received)
self._on_request("$test/sendNotification", self._send_notification)
self._on_notification("$test/setResponse", self._on_set_response)
async def _on_set_response(self, params: PayloadLike) -> None:
if isinstance(params, dict):
self._responses[params["method"]] = params["response"]
async def _send_notification(self, params: PayloadLike) -> None:
method, payload = self._validate_request_params(params)
self._notify(method, payload)
return None
async def _get_received(self, params: PayloadLike) -> PayloadLike:
method, payload = self._validate_request_params(params)
async with self._received_cv:
while True:
try:
self._received.remove(method)
return None
except KeyError:
pass
await self._received_cv.wait()
def _validate_request_params(self, params: PayloadLike) -> Tuple[str, Optional[Union[Dict, List]]]:
if not isinstance(params, dict):
raise Error(ErrorCode.InvalidParams, "expected params to be a dictionary")
if "method" not in params:
raise Error(ErrorCode.InvalidParams, 'expected "method" key')
if not isinstance(params["method"], str):
raise Error(ErrorCode.InvalidParams, 'expected "method" key to be a string')
return (params["method"], params.get('params'))
async def _initialize(self, params: PayloadLike) -> PayloadLike:
if not isinstance(params, dict):
raise Error(ErrorCode.InvalidParams,
"expected params to be a dictionary")
init_options = params.get("initializationOptions", {})
if not isinstance(init_options, dict):
raise Error(ErrorCode.InvalidParams,
"expected initializationOptions to be a dictionary")
return init_options.get("serverResponse", {})
async def _shutdown(self, params: PayloadLike) -> PayloadLike:
if params is not None:
raise Error(ErrorCode.InvalidParams, "expected shutdown params to be null")
self._received_shutdown = True
return None
async def _on_exit(self, params: PayloadLike) -> None:
if params is not None:
raise Error(ErrorCode.InvalidParams, "expected exit params to be null")
self._reader.set_exception(StopLoopException())
# START: https://stackoverflow.com/a/52702646/990142
async def stdio() -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
loop = asyncio.get_event_loop()
if sys.platform == 'win32':
return _win32_stdio(loop)
else:
return await _unix_stdio(loop)
async def _unix_stdio(loop: asyncio.AbstractEventLoop) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
reader = asyncio.StreamReader(loop=loop)
def reader_factory() -> asyncio.StreamReaderProtocol:
return asyncio.StreamReaderProtocol(reader)
def writer_factory() -> asyncio.streams.FlowControlMixin:
return asyncio.streams.FlowControlMixin()
await loop.connect_read_pipe(reader_factory, sys.stdin)
pipe = os.fdopen(sys.stdout.fileno(), 'wb')
writer_transport, writer_protocol = await loop.connect_write_pipe(writer_factory, pipe)
writer = asyncio.streams.StreamWriter(writer_transport, writer_protocol, None, loop)
return reader, writer
def _win32_stdio(loop: asyncio.AbstractEventLoop) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
# no support for asyncio stdio yet on Windows, see https://bugs.python.org/issue26832
# use an executor to read from stdin and write to stdout
# note: if nothing ever drains the writer explicitly, no flushing ever takes place!
class Reader:
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self.loop = loop
self.stdin = sys.stdin.buffer
self.__exception: Optional[Exception] = None
def at_eof(self) -> bool:
return self.__exception is not None
def set_exception(self, exception: Exception) -> None:
self.__exception = exception
def __check(self) -> None:
if self.__exception is not None:
raise self.__exception
async def readline(self) -> bytes:
self.__check()
# a single call to sys.stdin.readline() is thread-safe
return await self.loop.run_in_executor(None, self.stdin.readline)
async def readexactly(self, n: int) -> bytes:
self.__check()
return await self.loop.run_in_executor(None, self.stdin.read, n)
class Writer:
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self.loop = loop
self.buffer: List[bytes] = []
self.stdout = sys.stdout.buffer
def write(self, data: bytes) -> None:
self.buffer.append(data)
def writelines(self, lines: Iterable[bytes]) -> None:
self.buffer.extend(lines)
async def drain(self) -> None:
data, self.buffer = self.buffer, []
def do_blocking_drain() -> None:
self.stdout.write(b''.join(data))
self.stdout.flush()
await self.loop.run_in_executor(None, do_blocking_drain)
return Reader(loop), Writer(loop) # type: ignore
# END: https://stackoverflow.com/a/52702646/990142
async def main() -> bool:
reader, writer = await stdio()
session = Session(reader, writer)
return await session.run_forever()
if __name__ == '__main__':
parser = ArgumentParser(prog=__package__, description=__doc__)
parser.add_argument("-v", "--version", action="store_true", help="print version and exit")
args = parser.parse_args()
if args.version:
print(__package__, __version__)
exit(0)
loop = asyncio.get_event_loop()
shutdown_received = False
try:
shutdown_received = loop.run_until_complete(main())
except KeyboardInterrupt:
pass
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
exit(0 if shutdown_received else 1)
|
|
"""
Kernel dump configuration files
===============================
This module contains the following parsers:
KDumpConf - file ``/etc/kdump.conf``
------------------------------------
KexecCrashLoaded - file ``/sys/kernel/kexec_crash_loaded``
----------------------------------------------------------
SysconfigKdump - file ``/etc/sysconfig/kdump``
----------------------------------------------
"""
import re
from urlparse import urlparse
from .. import Parser, parser
from insights.specs import kdump_conf
from insights.specs import kexec_crash_loaded
from insights.specs import kexec_crash_size
@parser(kdump_conf)
class KDumpConf(Parser):
"""
A dictionary like object for the values of the kdump.conf file.
Attributes::
lines (list): raw lines from the file, in order
data (dict): a dictionary of options set in the data.
comments(list): fully commented lines
inline_comments(list): lines containing inline comments
The ``data`` property has two special behaviours:
* If an option - e.g. ``blacklist`` - is repeated, its values are
collected together in a list. Options that only appear once have
their values stored as is.
* The ``options`` option is special - it appears in the form ``option
module value``. The ``options`` key in the data dictionary is therefore
stored as a dictionary, keyed on the ``module`` name.
Main helper functions:
* ``options`` - the ``options`` value in the data(see above).
Sample ``/etc/kdump.conf`` file::
path /var/crash
core_collector makedumpfile -c --message-level 1 -d 24
default shell
Examples:
>>> kd = shared[KDumpConf]
>>> kd.is_local_disk
True
>>> kd.is_ssh()
False
>>> 'path' in kd.data
True
"""
NET_COMMANDS = set(['nfs', 'net', 'ssh'])
def parse_content(self, content):
lines = list(content)
opt_kw = 'options'
items = {opt_kw: {}}
# Paul Wayper - 2017-03-27 - why do we care about comments?
comments = []
inline_comments = []
for _line in content:
line = _line.strip()
if not line:
continue
# Ignore lines that are entirely comments
if line.startswith('#'):
comments.append(_line)
continue
# Remove comments
if '#' in line:
comment_start = line.index('#')
inline_comments.append(_line)
line = line[0:comment_start]
# Settings of the form 'option value' where value is the rest of
# the line. No equals is expected here.
lineparts = [s.strip() for s in line.split(None, 1)]
# All options must have a value
if len(lineparts) < 2:
continue
opt, value = (lineparts)
if opt != opt_kw:
# Some items can be repeated - if they are, create a list of
# their values
if opt in items:
# Append to the list, creating if necessary
if not isinstance(items[opt], list):
items[opt] = [items[opt]]
items[opt].append(value)
else:
items[opt] = value
else:
# 'options' is special - it becomes a dictionary
mod, rest = value.split(None, 1)
items[opt_kw][mod] = rest.strip()
self.lines = lines
self.data = items
self.comments = comments
self.inline_comments = inline_comments
def options(self, module):
"""
Returns the options for this module in the settings.
Arguments:
module(str): The module name
Returns:
(str) The module's options, or '' if either ``options`` or
``module`` is not found.
"""
return self.get('options', {}).get(module, '')
def _network_lines(self, net_commands=NET_COMMANDS):
"""
A list of all the options in the given list of commands, defaulting
to the list of network destinations for kernel dumps (i.e. 'ssh',
'nfs', 'nfs4' and 'net').
"""
return filter(None, [self.get(n) for n in net_commands])
def get_ip(self, net_commands=NET_COMMANDS):
"""
Find the first IP address in the given list of commands. Uses
``_network_lines`` above to find the list of commands. The first
line that lists an IP address is returned, otherwise None is returned.
"""
ip_re = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
for l in self._network_lines(net_commands):
matched_ip = ip_re.search(l)
if matched_ip:
return matched_ip.group()
def is_ssh(self):
"""
Is the destination of the kernel dump an ssh connection?
"""
return 'ssh' in self or ('net' in self and '@' in self['net'])
def is_nfs(self):
"""
Is the destination of the kernel dump a NFS or NFSv4 connection?
"""
return (
('nfs' in self or 'nfs4' in self) or
('net' in self and '@' not in self['net'])
)
def get_hostname(self, net_commands=NET_COMMANDS):
"""
Find the first host name in the given list of commands. Uses
``_network_lines`` above to find the list of commands. The first
line that matches ``urlparse``'s definition of a host name is
returned, or None is returned.
"""
for l in self._network_lines(net_commands):
# required for urlparse to interpret as host instead of
# relative path
if '//' not in l:
l = '//' + l
netloc = urlparse(l).netloc
# strip user:pass@
i = netloc.find('@')
if i != -1:
netloc = netloc[i + 1:]
# strip port
return netloc.rsplit(':', 1)[0]
@property
def ip(self):
"""
Uses get_ip() above to give the first IP address found in the list of
crash dump destinations.
"""
return self.get_ip()
@property
def hostname(self):
"""
Uses get_hostname() above to give the first host name found in the
list of crash dump destinations.
"""
return self.get_hostname()
@property
def using_local_disk(self):
"""
Is kdump configured to only use local disk?
The logic used here is the first of these conditions:
* If 'raw' is given as an option, then the dump is local.
* If 'ssh', 'net', 'nfs', or 'nfs4' is given, then the dump is NOT local.
* Otherwise, the dump is local.
"""
# The previous version used iteration across self.data.keys(), which
# is of course non-repeatable because hash key ordering may change.
# So we reverted to logic.
return ('raw' in self.data) or (not (
'ssh' in self.data or 'net' in self.data or
'nfs' in self.data or 'nfs4' in self.data)
)
def __getitem__(self, key):
"""
Return the configuration option of this key. Integer keys are
explicitly not supported.
"""
if isinstance(key, int):
raise TypeError("Parser does not support integer indexes")
return self.data[key]
def get(self, key, default=None):
"""
Return the configuration option of this key, or the default if the
key is not found and the default is defined, otherwise None.
"""
return self[key] if key in self else default
def __contains__(self, key):
"""
Is the given key a configuration option in the file?
"""
return key in self.data
@parser(kexec_crash_loaded)
class KexecCrashLoaded(Parser):
"""
A simple parser to determine if a crash kernel (i.e. a second kernel
capable of capturing the machine state should the main kernel crash) is
present.
This simply returns a set of whether the ``/sys/kernel/kexec_crash_loaded``
file has the value ``1``.
"""
def parse_content(self, content):
if len(content) == 0:
self.is_loaded = False
return
line = list(content)[0].strip()
self.is_loaded = line == '1'
@parser(kexec_crash_size)
class KexecCrashSize(Parser):
"""
Parses the `/sys/kernel/kexec_crash_size` file which tells the
reserved memory size for the crash kernel.
Attributes
----------
size (int): reserved memory size for the crash kernel, or 0 if not found.
"""
def parse_content(self, content):
self.size = 0
if len(content) == 0:
return
size = list(content)[0].strip()
if size.isdigit():
self.size = int(size)
|
|
import pytest
import unittest
class TestGatherFilepathList(unittest.TestCase):
def setUp(self):
# setup
import os
import pkg_resources as p
from qap.script_utils import gather_filepath_list
self.gather_filepath_list = gather_filepath_list
# inputs
self.data_folder = \
p.resource_filename("qap", os.path.join("test_data",
"data_folder"))
# outputs
self.ref_path_list = [
"site_1/sub_01/ses_01/anat_1/anatomical_scan.nii.gz",
"site_1/sub_01/ses_01/rest_1/functional_scan.nii.gz"]
def test_custom_filepaths(self):
test_path_list = self.gather_filepath_list(self.data_folder)
self.assertListEqual(self.ref_path_list, test_path_list)
@pytest.mark.long
class TestPullS3Sublist(unittest.TestCase):
# will fail if no internet connection
# use this test fixture periodically
def setUp(self):
# setup
from qap.script_utils import pull_s3_sublist
self.pull_s3_sublist = pull_s3_sublist
# inputs
self.bids_path = "s3://fcp-indi/data/Projects/CORR/RawDataBIDS"
self.custom_path = "s3://fcp-indi/data/Projects/CORR/RawData"
self.invalid_bucket_path = "s3://fcp--indi/data/Projects/CORR/RawDataBIDS"
self.invalid_dir_path = "s3://fcp-indi/data/Projects/corr/RawDataBIDS"
# outputs
self.bids_s3_list = [
'BMB_1/T1w.json',
'BMB_1/sub-0003001/ses-1/anat/sub-0003001_ses-1_run-1_T1w.nii.gz',
'BMB_1/sub-0003001/ses-1/func/sub-0003001_ses-1_task-rest_run-1_bold.nii.gz',
'BMB_1/sub-0003001/ses-1/func/sub-0003001_ses-1_task-rest_run-2_bold.nii.gz',
'BMB_1/sub-0003001/sub-0003001_sessions.tsv']
self.custom_s3_list = [
'BMB_1/0003001/session_1/anat_1/anat.nii.gz',
'BMB_1/0003001/session_1/rest_1/rest.nii.gz',
'BMB_1/0003001/session_1/rest_2/rest.nii.gz',
'BMB_1/0003002/session_1/anat_1/anat.nii.gz',
'BMB_1/0003002/session_1/rest_1/rest.nii.gz']
def test_BIDS(self):
test_bids_s3_list = self.pull_s3_sublist(self.bids_path)
self.assertListEqual(self.bids_s3_list, test_bids_s3_list[0:5])
def test_custom(self):
test_custom_s3_list = self.pull_s3_sublist(self.custom_path)
self.assertListEqual(self.custom_s3_list, test_custom_s3_list[0:5])
def test_invalid_bucket_name(self):
with self.assertRaises(Exception):
self.pull_s3_sublist(self.invalid_bucket_path)
def test_wrong_dirpath(self):
test_wrong_list = self.pull_s3_sublist(self.invalid_dir_path)
self.assertEquals(0, len(test_wrong_list))
def test_invalid_creds_path(self):
with self.assertRaises(Exception):
self.pull_s3_sublist(self.bids_path, "/path/to/nothing.csv")
class TestParseRawDataList(unittest.TestCase):
# for non-BIDS data directory formats
def setUp(self):
# setup
from qap.script_utils import parse_raw_data_list
self.parse_raw_data_list = parse_raw_data_list
# inputs
self.local_data_folder = "/data/dir"
self.local_file_list = ["site_1/sub_01/ses_01/anat_1/mprage.nii.gz",
"site_1/sub_02/ses_01/func_1/rest.nii.gz"]
self.wrong_file_list = ["site_1/sub_01/anat_1/mprage.nii.gz"]
self.s3_data_folder = "s3://data/Projects/RawData"
self.s3_file_list = ["site_1/sub_01/ses_01/anat_1/mprage.nii.gz",
"site_1/sub_02/ses_01/func_1/rest.nii.gz"]
# outputs
self.ref_local_subdict = {
'sub_01': {
'ses_01': {
'anatomical_scan': {
'anat_1': '/data/dir/site_1/sub_01/ses_01/anat_1/mprage.nii.gz'},
'site_name': 'site_1'}},
'sub_02': {
'ses_01': {
'functional_scan': {
'func_1': '/data/dir/site_1/sub_02/ses_01/func_1/rest.nii.gz'},
'site_name': 'site_1'}}}
def test_local_filepaths(self):
test_local = self.parse_raw_data_list(self.local_file_list,
self.local_data_folder)
self.assertDictEqual(self.ref_local_subdict, test_local)
def test_s3_filepaths(self):
# TODO
pass
def test_inclusion(self):
ref_subdict = self.ref_local_subdict
del ref_subdict["sub_02"]
test_inc = self.parse_raw_data_list(self.local_file_list,
self.local_data_folder,
inclusion_list=["sub_01"])
self.assertDictEqual(ref_subdict, test_inc)
def test_wrong_dir_format(self):
# only comes out empty because there's only one entry in the input
# list
with self.assertRaisesRegexp(Exception, "came out empty"):
self.parse_raw_data_list(self.wrong_file_list,
self.local_data_folder)
class TestCheckCSVMissingSubs(unittest.TestCase):
def setUp(self):
# setup
import os
import pandas as pd
import pkg_resources as p
self.maxDiff = None
from qap.script_utils import check_csv_missing_subs
self.check_csv = check_csv_missing_subs
# inputs
anat_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_anatomical_spatial_5rows.csv"))
func_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_functional_spatial_5subs.csv"))
short_anat_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_anatomical_spatial_3rows.csv"))
short_func_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_functional_spatial_3subs.csv"))
self.anat_df = pd.read_csv(anat_csv, dtype={"Participant": str})
self.func_df = pd.read_csv(func_csv, dtype={"Participant": str})
self.short_anat_df = pd.read_csv(short_anat_csv,
dtype={"Participant": str})
self.short_func_df = pd.read_csv(short_func_csv,
dtype={"Participant": str})
self.data_dict = {
'0003001': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003001/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003001/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003001/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003002': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003002/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003002/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003002/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003004': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003004/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003004/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003004/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003006': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003007': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}}}
# outputs
self.anat_missing_dict = {
'0003006': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/anat_1/anat.nii.gz'}}},
'0003007': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/anat_1/anat.nii.gz'}}}}
self.func_missing_dict = {
'0003006': {"session_1": {"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_2/rest.nii.gz'}}},
'0003007': {"session_1": {"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_2/rest.nii.gz'}}}}
def test_anat_no_missing(self):
ret = self.check_csv(self.anat_df, self.data_dict, "anat")
self.assertEquals(ret, None)
def test_anat_missing(self):
ret = self.check_csv(self.short_anat_df, self.data_dict, "anat")
self.assertDictEqual(ret, self.anat_missing_dict)
def test_func_no_missing(self):
ret = self.check_csv(self.func_df, self.data_dict, "func")
self.assertEquals(ret, None)
def test_func_missing(self):
ret = self.check_csv(self.short_func_df, self.data_dict, "func")
self.assertDictEqual(ret, self.func_missing_dict)
@pytest.mark.quick
def test_gather_custom_raw_data():
from qap.script_utils import gather_custom_raw_data
# we are starting in the directory containing the site folders!
site_folder = "/home/data"
format = "/{site}/{participant}/{session}/{series}"
anatomical_keywords = "mprage"
functional_keywords = "rest func"
filepath_list = [
"/home/data/site01/sub01/sess01/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess02/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess01/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess02/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess01/rest_1/rest.nii.gz",
"/home/data/site01/sub01/sess01/rest_2/rest.nii.gz",
"/home/data/site01/sub01/sess02/rest_1/func.nii.gz",
"/home/data/site01/sub02/sess01/rest_1/rest.nii.gz",
"/home/data/site01/sub02/sess01/rest_2/rest.nii.gz",
"/home/data/site01/sub02/sess02/rest_1/func.nii.gz",
]
# include sites
ref_sub_dict = {
'sub01': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess01/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess01/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub01/sess01/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess02/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess02/rest_1/func.nii.gz'},
'site_name': 'site01'}},
'sub02': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess01/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess01/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub02/sess01/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess02/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess02/rest_1/func.nii.gz'},
'site_name': 'site01'}}}
sub_dict = gather_custom_raw_data(filepath_list, site_folder, format,
anatomical_keywords, functional_keywords)
assert ref_sub_dict == sub_dict
@pytest.mark.quick
def test_gather_custom_raw_data_scans_folder():
from qap.script_utils import gather_custom_raw_data
# we are starting in the directory containing the site folders!
site_folder = "/home/data"
format = "/{site}/{participant}/{session}/scans/{series}"
anatomical_keywords = "mprage"
functional_keywords = "rest func"
# inclusion of a "scans" folder in between the session and scan folders
filepath_list = [
"/home/data/site01/sub01/sess01/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess02/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess01/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess02/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess01/scans/rest_1/rest.nii.gz",
"/home/data/site01/sub01/sess01/scans/rest_2/rest.nii.gz",
"/home/data/site01/sub01/sess02/scans/rest_1/func.nii.gz",
"/home/data/site01/sub02/sess01/scans/rest_1/rest.nii.gz",
"/home/data/site01/sub02/sess01/scans/rest_2/rest.nii.gz",
"/home/data/site01/sub02/sess02/scans/rest_1/func.nii.gz",
]
# include sites
ref_sub_dict = {
'sub01': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess01/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess01/scans/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub01/sess01/scans/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess02/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess02/scans/rest_1/func.nii.gz'},
'site_name': 'site01'}},
'sub02': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess01/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess01/scans/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub02/sess01/scans/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess02/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess02/scans/rest_1/func.nii.gz'},
'site_name': 'site01'}}}
sub_dict = gather_custom_raw_data(filepath_list, site_folder, format,
anatomical_keywords, functional_keywords)
assert ref_sub_dict == sub_dict
|
|
# Authors: Dirko Coetsee
# License: 3-clause BSD
""" Implements feature extraction methods to use with HACRF models. """
import numpy as np
import functools
import itertools
class PairFeatureExtractor(object):
"""Extract features from sequence pairs.
For each feature, a grid is constructed for a sequency pair. The
features are stacked, producing a 3 dimensional matrix of
dimensions:
(length of sequence 1) X (length of sequence 2) X (number of features)
For example, a 'beginning' character feature grid for the sequences,
'kaas' and 'cheese' could look like this.
c h e e s e
k 1 1 1 1 1 1
a 1 0 0 0 0 0
a 1 0 0 0 0 0
s 1 0 0 0 0 0
These grids are made from two different types of feature
functions: real and sparse.
Real features are functions of the form:
def some_feature_function(array1, array2):
...
return feature_grid
Given two sequences, s1 and s1, return a numpy.array with dimensions
(length of array1) X (length of array2).
For performance reasons, we take advantage of numpy broadcasting, and
array1 is a column array and array2 is a row array.
For a 'matching character' feature between 'kaas' and 'cheese', the
sequences are transformed and then we use broadcasting
> array1 = numpy.array([['k'],
['a'],
['a'],
['s']])
> array2 = numpy.array([['c', 'h', 'e', 'e', 's', 'e'])
> array1 == array2
numpy.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]])
When writing you own real feature functions, you can assume that
the arrays will come in with the right shape.
Sparse feature functions look similar:
def some_feature_function(i, j, s1, s2):
...
return some_index, total_vector_length
but they always return two ints. The first is the index of the
element that should be 1 and the second is the total length of
vector. So for example if (4, 5) is returned, then the feature
vector [0, 0, 0, 0, 1] is constructed.
Parameters
----------
real: list: optional (default=[])
List of functions of the form
def some_feature_function(i, j, s1, s2):
...
return some_float
sparse: list: optional (default=[])
List of functions of the form
def some_feature_function(i, j, s1, s2):
...
return some_index, total_vector_length
"""
def __init__(self, real=None, sparse=None):
self._binary_features = []
if real:
self._binary_features = real
self._sparse_features = []
if sparse:
self._sparse_features = sparse
self.K = (len(self._binary_features)
+ sum(num_feats for _, num_feats in self._sparse_features))
def fit_transform(self, raw_X, y=None):
"""Like transform. Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models.
Parameters
----------
raw_X : List of (sequence1_n, sequence2_n) pairs, one for each training example n.
y : (ignored)
Returns
-------
X : List of numpy ndarrays, each with shape = (I_n, J_n, K), where I_n is the length of sequence1_n, J_n is the
length of sequence2_n, and K is the number of features.
Feature matrix list, for use with estimators or further transformers.
"""
return self.transform(raw_X)
def transform(self, raw_X, y=None):
"""Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models.
Parameters
----------
raw_X : List of (sequence1_n, sequence2_n) pairs, one for each training example n.
y : (ignored)
Returns
-------
X : List of numpy ndarrays, each with shape = (I_n, J_n, K), where I_n is the length of sequence1_n, J_n is the
length of sequence2_n, and K is the number of features.
Feature matrix list, for use with estimators or further transformers.
"""
return [self._extract_features(self._to_array(sequence1).T,
self._to_array(sequence2))
for sequence1, sequence2 in raw_X]
def _extract_features(self, array1, array2):
""" Helper to extract features for one data point. """
feature_array = np.zeros((array1.size, array2.size, self.K),
dtype='float64')
for k, feature_function in enumerate(self._binary_features):
feature_array[..., k] = feature_function(array1, array2)
if self._sparse_features:
array1 = array1.T[0]
array2 = array2[0]
n_binary_features = len(self._binary_features)
for i, j in np.ndindex(array1.size, array2.size):
k = n_binary_features
for feature_function, num_features in self._sparse_features:
feature_array[i, j, k + feature_function(i, j, array1, array2)] = 1.0
k += num_features
return feature_array
def _to_array(self, sequence):
return np.array(tuple(sequence), ndmin=2)
class StringPairFeatureExtractor(PairFeatureExtractor):
""" Extract features from sequence pairs.
A grid is constructed for each sequence pair, for example for ("kaas", "cheese"):
s * . . . @ .
a * . . . . .
a * . . . . .
k * * * * * *
c h e e s e
For each element in the grid, a feature vector is constructed. The elements in the feature
vector are determined by which features are active at that position in the grid. So for the
example above, the 'match' feature will be 0 in every vector in every position except the
position indicated with '@', where it will be 1. The 'start' feature will be 1 in all the
positions with '*' and 0 everywhere else.
Parameters
----------
bias: float: optional (default=1.0)
A bias term that is always added to every position in the lattice.
start: boolean: optional
Binary feature that activates at the start of either sequence.
end: boolean: optional
Binary feature that activates at the end of either sequence.
match: boolean: optional
Binary feature that activates when elements at a position are equal.
numeric: boolean, optional
Binary feature that activates when all elements at a position are numerical.
transition: boolean, optional
Adds binary features for pairs of (lower case) input characters.
"""
# Constants
CHARACTERS = 'abcdefghijklmnopqrstuvwxyz0123456789,./;\'\-=<>?:"|_+!@#$%^&*() '
def __init__(self, bias=1.0, start=False, end=False, match=False, numeric=False, transition=False):
# TODO: For longer strings, tokenize and use Levenshtein
# distance up until a lattice position. Other (possibly)
# useful features might be whether characters are consonant or
# vowel, punctuation, case.
binary_features_active = [True, start, end, match, numeric]
binary_features = [functools.partial(biases, bias=bias),
starts,
ends,
matches,
digits]
self._binary_features = [feature
for feature, active
in zip(binary_features,
binary_features_active)
if active]
self._sparse_features = []
if transition:
characters_to_index = {character: index for index, character in enumerate(self.CHARACTERS)}
curried_charIndex = functools.partial(charIndex,
char2index = characters_to_index)
self._sparse_features.append((curried_charIndex,
len(characters_to_index) ** 2))
self.K = (len(self._binary_features)
+ sum(num_feats for _, num_feats in self._sparse_features))
def _to_array(self, sequence):
return np.asarray(tuple(sequence)).reshape(1, -1)
def charIndex(i, j, s1, s2, char2index=None) :
char_i, char_j = s1[i].lower(), s2[j].lower()
index = char2index[char_j] + char2index[char_i] * len(char2index)
return index
def biases(s1, s2, bias=1.0) :
return np.full((s1.size, s2.size), bias)
def starts(s1, s2) :
M = np.zeros((s1.size, s2.size))
M[0,...] = 1
M[...,0] = 1
return M
def ends(s1, s2) :
M = np.zeros((s1.size, s2.size))
M[(s1.size-1),...] = 1
M[...,(s2.size-1)] = 1
return M
def matches(s1, s2) :
return (s1 == s2)
def digits(s1, s2) :
return np.char.isdigit(s1) & np.char.isdigit(s2)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils for rebalanced tests.
"""
import sys
from datetime import datetime
from enum import IntEnum
from itertools import chain, product
from typing import NamedTuple
# pylint: disable=W0622
from ducktape.errors import TimeoutError
# pylint: disable=too-many-arguments
from ignitetest.services.ignite import IgniteService
from ignitetest.services.ignite_app import IgniteApplicationService
from ignitetest.services.utils.ignite_configuration import IgniteConfiguration, DataStorageConfiguration
from ignitetest.services.utils.ignite_configuration.data_storage import DataRegionConfiguration
from ignitetest.utils.enum import constructible
from ignitetest.utils.version import IgniteVersion
NUM_NODES = 4
DEFAULT_DATA_REGION_SZ = 1 << 30
@constructible
class TriggerEvent(IntEnum):
"""
Rebalance trigger event.
"""
NODE_JOIN = 0
NODE_LEFT = 1
class RebalanceParams(NamedTuple):
"""
Rebalance parameters
"""
trigger_event: TriggerEvent = TriggerEvent.NODE_JOIN
backups: int = 1
cache_count: int = 1
entry_count: int = 15_000
entry_size: int = 50_000
preloaders: int = 1
thread_pool_size: int = None
batch_size: int = None
batches_prefetch_count: int = None
throttle: int = None
persistent: bool = False
jvm_opts: list = None
@property
def data_region_max_size(self):
"""
Max size for DataRegionConfiguration.
"""
return max(self.cache_count * self.entry_count * self.entry_size * (self.backups + 1), DEFAULT_DATA_REGION_SZ)\
@property
def entry_count_per_preloader(self):
"""
Entry count per preloader.
"""
return int(self.entry_count / self.preloaders)
class RebalanceMetrics(NamedTuple):
"""
Rebalance metrics
"""
received_bytes: int = 0
start_time: int = 0
end_time: int = 0
duration: int = 0
node: str = None
# pylint: disable=too-many-arguments, too-many-locals
def start_ignite(test_context, ignite_version: str, rebalance_params: RebalanceParams) -> IgniteService:
"""
Start IgniteService:
:param test_context: Test context.
:param ignite_version: Ignite version.
:param rebalance_params: Rebalance parameters.
:return: IgniteService.
"""
node_count = len(test_context.cluster) - rebalance_params.preloaders
if rebalance_params.persistent:
data_storage = DataStorageConfiguration(
max_wal_archive_size=2 * rebalance_params.data_region_max_size,
default=DataRegionConfiguration(
persistent=True,
max_size=rebalance_params.data_region_max_size
)
)
else:
data_storage = DataStorageConfiguration(
default=DataRegionConfiguration(max_size=rebalance_params.data_region_max_size)
)
node_config = IgniteConfiguration(
version=IgniteVersion(ignite_version),
data_storage=data_storage,
metric_exporter="org.apache.ignite.spi.metric.jmx.JmxMetricExporterSpi",
rebalance_thread_pool_size=rebalance_params.thread_pool_size,
rebalance_batch_size=rebalance_params.batch_size,
rebalance_batches_prefetch_count=rebalance_params.batches_prefetch_count,
rebalance_throttle=rebalance_params.throttle)
ignites = IgniteService(test_context, config=node_config,
num_nodes=node_count if rebalance_params.trigger_event else node_count - 1,
jvm_opts=rebalance_params.jvm_opts)
ignites.start()
return ignites
# pylint: disable=R0914
def preload_data(context, config, rebalance_params: RebalanceParams, timeout=3600):
"""
Puts entry_count of key-value pairs of entry_size bytes to cache_count caches.
:param context: Test context.
:param config: Ignite configuration.
:param rebalance_params: Rebalance parameters.
:param timeout: Timeout in seconds for application finished.
:return: Time taken for data preloading.
"""
assert rebalance_params.preloaders > 0
assert rebalance_params.cache_count > 0
assert rebalance_params.entry_count > 0
assert rebalance_params.entry_size > 0
apps = []
def start_app(_from, _to):
app = IgniteApplicationService(
context,
config=config,
java_class_name="org.apache.ignite.internal.ducktest.tests.rebalance.DataGenerationApplication",
params={
"backups": rebalance_params.backups,
"cacheCount": rebalance_params.cache_count,
"entrySize": rebalance_params.entry_size,
"from": _from,
"to": _to
},
shutdown_timeout_sec=timeout)
app.start_async()
apps.append(app)
count = rebalance_params.entry_count_per_preloader
end = 0
for _ in range(rebalance_params.preloaders - 1):
start = end
end += count
start_app(start, end)
start_app(end, rebalance_params.entry_count)
for app in apps:
app.await_stopped()
return (max(map(lambda app: app.get_finish_time(), apps)) -
min(map(lambda app: app.get_init_time(), apps))).total_seconds()
def await_rebalance_start(service: IgniteService, timeout: int = 30):
"""
Awaits rebalance starting on any test-cache on any node.
:param service: IgniteService in which rebalance start will be awaited.
:param timeout: Rebalance start await timeout.
:return: dictionary of two keypairs with keys "node" and "time",
where "node" contains the first node on which rebalance start was detected
and "time" contains the time when rebalance was started.
"""
for node in service.alive_nodes:
try:
rebalance_start_time = service.get_event_time_on_node(
node,
"Starting rebalance routine",
timeout=timeout)
except TimeoutError:
continue
else:
return rebalance_start_time
raise RuntimeError("Rebalance start was not detected on any node")
# pylint: disable=W0640
def aggregate_rebalance_stats(nodes, cache_count):
"""
Aggregates rebalance stats for specified nodes and cache count.
:param nodes: Nodes list.
:param cache_count: Cache count.
:return: Aggregated rebalance stats dictionary.
"""
def __stats(cache_idx):
cache_name = "test-cache-%d" % (cache_idx + 1)
stats = {
"cache": cache_name,
"start_time": {},
"end_time": {},
"duration": {},
"received_bytes": {}
}
metrics = list(map(lambda node: get_rebalance_metrics(node, cache_name), nodes))
for prop, func in chain(product(['start_time', 'end_time'], [min, max]),
product(['duration', 'received_bytes'], [min, max, sum])):
if func.__name__ == 'sum':
val = func(map(lambda item: getattr(item, prop), metrics))
else:
val = func(map(lambda item: [item.node, getattr(item, prop)], metrics), key=lambda tup: tup[1])
if prop in ['start_time', 'end_time']:
val[1] = to_time_format(val[1])
if prop == 'duration':
if func.__name__ == 'sum':
val = f'{round(val / 1000, 3)} s.'
else:
val[1] = f'{round(val[1] / 1000, 3)} s.'
stats[prop][func.__name__] = val
return stats
return list(map(__stats, range(cache_count)))
def get_rebalance_metrics(node, cache_group):
"""
Gets rebalance metrics for specified node and cache group.
:param node: Ignite node.
:param cache_group: Cache group.
:return: RebalanceMetrics instance.
"""
mbean = node.jmx_client().find_mbean('.*group=cacheGroups.*name="%s"' % cache_group)
start_time = int(next(mbean.RebalancingStartTime))
end_time = int(next(mbean.RebalancingEndTime))
return RebalanceMetrics(
received_bytes=int(next(mbean.RebalancingReceivedBytes)),
start_time=start_time,
end_time=end_time,
duration=(end_time - start_time) if start_time != -1 and end_time != -1 else 0,
node=node.name)
def to_time_format(timestamp: int, fmt: str = '%Y-%m-%d %H:%M:%S'):
"""
Convert timestamp to string using format.
:param timestamp: Timestamp in ms
:param fmt: Format.
:return:
"""
return datetime.fromtimestamp(int(timestamp) // 1000).strftime(fmt)
# pylint: disable=too-many-arguments, too-many-locals
def get_result(rebalance_nodes: list, preload_time: int, cache_count: int, entry_count: int, entry_size: int) -> dict:
"""
:param rebalance_nodes: Ignite nodes in which rebalance will be awaited.
:param preload_time: Preload time.
:param cache_count: Cache count.
:param entry_count: Cache entry count.
:param entry_size: Cache entry size.
:return: Rebalance result with aggregated rebalance stats dictionary
"""
stats = aggregate_rebalance_stats(rebalance_nodes, cache_count)
return {
"rebalance_nodes": len(rebalance_nodes),
"rebalance_stats": stats,
"preload_time_sec": int(preload_time),
"preloaded_bytes": cache_count * entry_count * entry_size
}
def check_type_of_rebalancing(rebalance_nodes: list, is_full: bool = True):
"""
Check the type of rebalancing on node.
:param rebalance_nodes: Ignite nodes in which rebalance will be awaited.
:param is_full: Expected type of rebalancing.
"""
for node in rebalance_nodes:
output = node.account.ssh_output(f'grep "Starting rebalance routine" {node.log_file}', allow_fail=False,
combine_stderr=False) \
.decode(sys.getdefaultencoding()) \
.splitlines()
msg = 'histPartitions=[]' if is_full else 'fullPartitions=[]'
for i in output:
assert msg in i, i
return output
|
|
import functools
from importlib import import_module
from inspect import getfullargspec
from django.utils.html import conditional_escape
from django.utils.itercompat import is_iterable
from .base import Node, Template, token_kwargs
from .exceptions import TemplateSyntaxError
class InvalidTemplateLibrary(Exception):
pass
class Library:
"""
A class for registering template tags and filters. Compiled filter and
template tag functions are stored in the filters and tags attributes.
The filter, simple_tag, and inclusion_tag methods provide a convenient
way to register callables as tags.
"""
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise ValueError(
"Unsupported arguments to Library.tag: (%r, %r)" %
(name, compile_function),
)
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
"""
Register a callable as a template filter. Example:
@register.filter
def lower(value):
return value.lower()
"""
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it, e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise ValueError(
"Unsupported arguments to Library.filter: (%r, %r)" %
(name, filter_func),
)
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
"""
Register a callable as a compiled template tag. Example:
@register.simple_tag
def hello(*args, **kwargs):
return 'world'
"""
def dec(func):
params, varargs, varkw, defaults, _, _, _ = getfullargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == 'as':
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name
)
return SimpleNode(func, takes_context, args, kwargs, target_var)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_tag")
def inclusion_tag(self, filename, func=None, takes_context=None, name=None):
"""
Register a callable as an inclusion tag:
@register.inclusion_tag('results.html')
def show_results(poll):
choices = poll.choice_set.all()
return {'choices': choices}
"""
def dec(func):
params, varargs, varkw, defaults, _, _, _ = getfullargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name,
)
return InclusionNode(
func, takes_context, args, kwargs, filename,
)
self.tag(function_name, compile_func)
return func
return dec
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode and InclusionNode.
Manages the positional and keyword arguments to be passed to the decorated
function.
"""
def __init__(self, func, takes_context, args, kwargs):
self.func = func
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class SimpleNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, target_var):
super().__init__(func, takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
output = self.func(*resolved_args, **resolved_kwargs)
if self.target_var is not None:
context[self.target_var] = output
return ''
if context.autoescape:
output = conditional_escape(output)
return output
class InclusionNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, filename):
super().__init__(func, takes_context, args, kwargs)
self.filename = filename
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = self.func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(self.filename, Template):
t = self.filename
elif isinstance(getattr(self.filename, 'template', None), Template):
t = self.filename.template
elif not isinstance(self.filename, str) and is_iterable(self.filename):
t = context.template.engine.select_template(self.filename)
else:
t = context.template.engine.get_template(self.filename)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because inclusion tags are
# often used for forms, and we need instructions for using CSRF
# protection to be as simple as possible.
csrf_token = context.get('csrf_token')
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parse bits for template tag helpers simple_tag and inclusion_tag, in
particular by detecting syntax errors and by extracting positional and
keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.popitem()
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def import_library(name):
"""
Load a Library object from a template tag module.
"""
try:
module = import_module(name)
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (name, e)
)
try:
return module.register
except AttributeError:
raise InvalidTemplateLibrary(
"Module %s does not have a variable named 'register'" % name,
)
|
|
# -*- coding: utf-8 -*-
# coding=utf-8
# Copyright 2019 The SGNMT Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the bigram greedy search strategy """
import copy
import logging
import operator
from cam.sgnmt import utils
from cam.sgnmt.decoding.core import Decoder, PartialHypothesis
from cam.sgnmt.misc.trie import SimpleTrie
class BigramGreedyDecoder(Decoder):
"""The bigram greedy decoder collects bigram statistics at each
node expansions. After each decoding pass, it constructs a new
hypothesis to rescore by greedily selecting bigrams and gluing
them together. Afterwards, the new hypothesis is rescored and new
bigram statistics are collected.
Note that this decoder does not support the ``max_length``
parameter as it is designed for fixed length decoding problems.
Also note that this decoder works only for bag-of-words problems.
Do not use the bow predictor in combination with this decoder as
it will hide the EOS scores which are important to estimate bigram
scores.
"""
def __init__(self, decoder_args):
"""Creates a new bigram greedy decoder. Do not use this decoder
in combination with the bow predictor as it inherently already
satisfies the bag-of-word constrains. The following values
are fetched from `decoder_args`:
trg_test (string): Path to a plain text file which
defines the bag of words
max_node_expansions (int): Maximum number of node expansions
for inadmissible pruning.
early_stopping (boolean): Activates admissible pruning
Args:
decoder_args (object): Decoder configuration passed through
from the configuration API.
"""
super(BigramGreedyDecoder, self).__init__(decoder_args)
self.max_expansions_param = decoder_args.max_node_expansions
self.early_stopping = decoder_args.early_stopping
with open(decoder_args.trg_test) as f:
self.lines = f.read().splitlines()
def _greedy_decode(self):
"""Performs greedy decoding from the start node. Used to obtain
initial bigram statistics.
"""
hypo = PartialHypothesis()
hypos = []
posteriors = []
score_breakdowns = []
bag = dict(self.full_bag)
while bag:
posterior,score_breakdown = self.apply_predictors()
hypo.predictor_states = copy.deepcopy(self.get_predictor_states())
bag_posterior = {w: posterior[w] for w in self.full_bag_with_eos}
bag_breakdown = {w: score_breakdown[w]
for w in self.full_bag_with_eos}
posteriors.append(bag_posterior)
score_breakdowns.append(bag_breakdown)
hypos.append(hypo)
best_word = utils.argmax({w: bag_posterior[w] for w in bag})
bag[best_word] -= 1
if bag[best_word] < 1:
del bag[best_word]
self.consume(best_word)
hypo = hypo.expand(best_word,
None,
bag_posterior[best_word],
score_breakdown[best_word])
posterior,score_breakdown = self.apply_predictors()
hypo.predictor_states = copy.deepcopy(self.get_predictor_states())
bag_posterior = {w: posterior[w] for w in self.full_bag_with_eos}
bag_breakdown = {w: score_breakdown[w] for w in self.full_bag_with_eos}
posteriors.append(bag_posterior)
score_breakdowns.append(bag_breakdown)
hypos.append(hypo)
hypo = hypo.cheap_expand(utils.EOS_ID,
bag_posterior[utils.EOS_ID],
score_breakdown[utils.EOS_ID])
logging.debug("Greedy hypo (%f): %s" % (
hypo.score,
' '.join([str(w) for w in hypo.trgt_sentence])))
self._process_new_hypos(hypos, posteriors, score_breakdowns, hypo)
def _process_new_hypos(self,
hypos,
posteriors,
score_breakdowns,
complete_hypo = None):
"""This method is called after a decoding pass. It updates
bigram statistics, stores partial hypotheses for restarting
from them later, and creates full hypotheses if a hypo
ends with EOS
"""
if complete_hypo:
self.best_score = max(self.best_score, complete_hypo.score)
self.add_full_hypo(complete_hypo.generate_full_hypothesis())
for idx,hypo in enumerate(hypos):
posterior = posteriors[idx]
prefix = hypo.trgt_sentence
self._register_bigram_scores(prefix[-1] if prefix else utils.GO_ID,
posterior)
self.posteriors.add(prefix, posterior)
self.score_breakdowns.add(prefix, score_breakdowns[idx])
self.hypos.add(prefix, hypo)
self._sort_bigram_scores()
def _get_next_sentence(self):
"""Get the next sentence to rescore
"""
bag0 = dict(self.full_bag)
bag0[utils.GO_ID] = 1
bag1 = dict(self.full_bag_with_eos)
return self._get_next_sentence_recursive([], bag0, bag1)
def _get_next_sentence_recursive(self,
bigrams,
remaining_bag0,
remaining_bag1):
"""Recursive helper function for _get_next_sentence
Args:
bigrams (list): List of already selected bigrams
remaining_bag0 (dict): Remaining words in the bag for the
first word in the bigram
remaining_bag1 (dict): Remaining words in the bag for the
second word in the bigram
Returns:
Tuple. hypo, sen tuple where sen is an unexplored sentence
and hypo corresponds to the largest explored prefix of sen.
Returns None if no consistent sentence was found
"""
if len(bigrams) == self.num_words + 1: # Collected enough bigrams
sens = self._get_sentences_from_bigrams(bigrams)
if not sens: # Bigrams are not consistent
return None
for sen in sens:
hypo = self._get_largest_prefix_hypo(sen)
if hypo and hypo.score > self.best_score:
return hypo, sen
return None
for bigram in self.sorted_bigrams:
if remaining_bag0[bigram[0]] > 0 and remaining_bag1[bigram[1]] > 0:
remaining_bag0[bigram[0]] -= 1
remaining_bag1[bigram[1]] -= 1
ret = self._get_next_sentence_recursive(bigrams + [bigram],
remaining_bag0,
remaining_bag1)
if ret:
return ret
remaining_bag0[bigram[0]] += 1
remaining_bag1[bigram[1]] += 1
return None
def _get_largest_prefix_hypo(self, sen):
"""Get the explored hypothesis with the largest common prefix
with ``sen``.
"""
prefix = self.hypos.get_prefix(sen)
if len(prefix) == len(sen): # hypo is already fully explored
return None
hypo = self.hypos.get(prefix)
posterior = self.posteriors.get(prefix)
score_breakdown = self.score_breakdowns.get(prefix)
next_word = sen[len(prefix)]
return hypo.cheap_expand(next_word,
posterior[next_word],
score_breakdown[next_word])
def _get_sentences_from_bigrams(self, bigrams):
"""Constructs all full consistent sentences from a list of
bigrams. The search is implemented as BFS. """
candidates = [([utils.GO_ID], bigrams)]
for _ in range(len(bigrams)):
next_candidates = []
for candidate in candidates:
# Select the next consistent bigram
cand_sen,cand_bigrams = candidate
last_word = cand_sen[-1]
for idx,bigram in enumerate(cand_bigrams):
if bigram[0] == last_word: # Consistent
new_bigrams = list(cand_bigrams)
del new_bigrams[idx]
next_candidates.append((cand_sen + [bigram[1]],
new_bigrams))
candidates = next_candidates
if not candidates:
break
return [candidate[0][1:] for candidate in candidates]
def _forced_decode(self, start_hypo, sen):
"""Performs forced decoding from a the node in the search tree.
Args:
start_hypo (PartialHypothesis): This is a partial hypothesis
for a prefix of sen from
which we start decoding
sen (list): Sentence to rescore
"""
logging.debug("best=%f prefix=%s prefix_score=%f sen=%s" % (
self.best_score,
start_hypo.trgt_sentence,
start_hypo.score,
sen))
self.set_predictor_states(copy.deepcopy(start_hypo.predictor_states))
if not start_hypo.word_to_consume is None: # Consume if cheap expand
self.consume(start_hypo.word_to_consume)
hypos = []
posteriors = []
score_breakdowns = []
hypo = start_hypo
cancelled = False
for forced_w in sen[len(start_hypo.trgt_sentence):]:
posterior,score_breakdown = self.apply_predictors()
hypo.predictor_states = copy.deepcopy(self.get_predictor_states())
bag_posterior = {w: posterior[w] for w in self.full_bag_with_eos}
bag_breakdown = {w: score_breakdown[w]
for w in self.full_bag_with_eos}
posteriors.append(bag_posterior)
score_breakdowns.append(bag_breakdown)
hypos.append(hypo)
hypo = hypo.expand(forced_w,
None,
bag_posterior[forced_w],
score_breakdown[forced_w])
if self.early_stopping and hypo.score < self.best_score:
cancelled = True
break
self.consume(forced_w)
self._process_new_hypos(hypos, posteriors, score_breakdowns,
hypo if not cancelled else None)
def _load_bag(self):
"""Load the current bag of words """
self.full_bag = {}
for w in self.lines[self.current_sen_id].strip().split():
int_w = int(w)
self.full_bag[int_w] = self.full_bag.get(int_w, 0) + 1
self.num_words = sum(self.full_bag.values())
self.full_bag_with_eos = dict(self.full_bag)
self.full_bag_with_eos[utils.EOS_ID] = 1
def _register_bigram_scores(self, last_word, posterior):
for w,score in utils.common_iterable(posterior):
self.bigram_scores[last_word][w] = min(
self.bigram_scores[last_word][w], score)
def _sort_bigram_scores(self):
self.sorted_bigrams = []
for w1,scores in self.bigram_scores.items():
self.sorted_bigrams.extend([(w1, w2, score)
for w2,score in scores.items()])
self.sorted_bigrams.sort(key=operator.itemgetter(2), reverse=True)
def _initialize_bigram_scores(self):
default_scores = {w: 0.0 for w in self.full_bag_with_eos}
self.bigram_scores = {w: dict(default_scores) for w in self.full_bag}
self.bigram_scores[utils.GO_ID] = default_scores
def decode(self, src_sentence):
"""Decodes a single source sentence with the flip decoder """
self.initialize_predictors(src_sentence)
self.max_expansions = self.get_max_expansions(self.max_expansions_param,
src_sentence)
self._load_bag()
self.hypos = SimpleTrie()
self.posteriors = SimpleTrie()
self.score_breakdowns = SimpleTrie()
self.best_score = self.get_lower_score_bound()
self._initialize_bigram_scores()
self._greedy_decode()
while self.max_expansions > self.apply_predictors_count:
ret = self._get_next_sentence()
if not ret:
break
self._forced_decode(ret[0], ret[1])
return self.get_full_hypos_sorted()
|
|
"""This module contains a class for representing the gameboard."""
from random import randint
from collections import namedtuple
import json
import pandas as pd
from .constants import Constants as C
from .gameboard_delegate import GameBoardDelegate
from .combiners import combine_left, combine_right
from .combiners import combine_left_mapped, combine_right_mapped
# a named tuple for the history points of a gameboard
GameBoardSnapshot = namedtuple('GameBoardSnapshot',
'board_bits board_json score move')
class GameBoard(object):
"""
This class models a 2048 game board with a table of tiles holding numbers.
The board is modeled by a 64 bit integer and all tile, row, column lookups
are performed using bit-shift operations.
"""
# the board to put the tiles on in row-major ordering this means the first
# index refers to the row and the second index to the column.
_board = 0
# the score of the game
_score = 0
# the history of metadata throughout the game
_history = []
# the delegate to send events to
_delegate = None
def __init__(self, delegate=None):
"""
Create a new gameboard.
- parameters:
- delegate: {GameBoardEventHandler} the delegate to pass events to.
"""
# reset the board (i.e. score and board set to 0)
self.reset()
# assign the delegate
if delegate and not isinstance(delegate, GameBoardDelegate):
msg = 'delegate must be of type None or GameBoardDelegate'
raise ValueError(msg)
self._delegate = delegate
@property
def copy(self):
"""Return a copy of this object."""
copy = GameBoard()
# copy the board from the other board, use the private setters, no
# need to run the verification logic as the board should always be
# in a valid state when copy is called and copy should be fast.
copy._board = self.board
# copy the score
copy._score = self.score
# setup the move map
# copy._move_map = self._move_map
return copy
def reset(self):
"""Reset the board and score."""
# reset the score
self._score = 0
# create the tile objects by iterating over the array
self._board = 0
# restore the history
self._history = []
@property
def history(self):
"""Return the history of this gameboard."""
return self._history
@property
def data_frame(self):
"""
Return the history as a `pandas` DataFrame.
- returns: {pandas.DataFrame} the history as a data frame
"""
columns = ['64-bit board', 'JSON board', 'score', 'move']
return pd.DataFrame.from_records(self._history, columns=columns)
@property
def board(self):
"""Return the board encoded as a 64 bit unsigned int."""
return self._board
@property
def board_json(self):
"""Return the board encoded into json."""
rows = []
for column in range(0, C.columns()):
rows.append(self.get_row_array(column))
return json.dumps({'board': rows})
@board.setter
def board(self, new_board):
"""
Set the board to a new value.
- parameters:
- new_board: {long} the new board to set
"""
self._board = new_board
@property
def board_array(self):
"""Return the board as a 2D array of bytes."""
board = []
for row in range(0, C.rows()):
# add a column list to the array
board.append([])
for column in range(0, C.columns()):
# append the tile in the column to the array
board[row].append(self.get_tile(row, column))
return board
@property
def score(self):
"""Return the current score of the game."""
return self._score
@score.setter
def score(self, new_score):
"""
Set the score to a new value.
- parameters:
- new_score: {number} the new score for the game, must be positive.
"""
if new_score < 0:
raise ValueError('scores cannot be negative')
self._score = new_score
def is_equal(self, other):
"""
Return a boolean determining the equality of this board to the given.
- parameters:
- other: {GameBoard} the gameboard to compare against
- returns: {boolean} true if the boards are equal, false otherwise
"""
# if the boards are the same, there was no change
if self.board == other.board and self.score == other.score:
return False
return True
def get_tile(self, row, column):
"""
Get a tile at a given row column index.
- parameters:
- row: {int} the row to get the tile from
- column: {int} the column to get the tile from
- return: {int} the tile as a nibble
"""
# verify the bound of the row
if row < 0 or row >= C.rows():
msg = 'row must be in [0, %i)'
raise ValueError(msg % C.rows())
# verify the bounds of the column
if column < 0 or column >= C.columns():
msg = 'column must be in [0, %i)'
raise ValueError(msg % C.columns())
# get the tiles unwrapped index
index = (row * C.rows()) + column
return self.get_tile_at(index)
def get_tile_at(self, index):
"""
Get the tile at the given index in the bitmap.
- parameters:
- index: {number} the index of the tile to get
- returns: {int} a nibble representing a single tiles exponent value
"""
# verify the bound of the row
if index < 0 or index >= C.tiles():
msg = 'index must be in [0, %s)'
raise ValueError(msg % C.tiles())
# get the offset based on index
row_offset = (C.rows() * index)
offset = C.board_size() - C.tile_size() - row_offset
# shift the board and mask it to get the value
return (self._board >> offset) & C.tile_value_mask()
def place_tile(self, row, column, value):
"""
Place a tile at the given row column index with the given value.
- parameters:
- row: {int} the index of the row to place at
- column: {int} the index of the column to place at
- value: {int} the value to place in [0, 15]
"""
# verify the bounds of the row
if row < 0 or row >= C.rows():
msg = 'row must be in [0, %i)'
raise ValueError(msg % C.columns())
# verify the bounds of the column
if column < 0 or column >= C.columns():
msg = 'column must be in [0, %i)'
raise ValueError(msg % C.columns())
# get the index
index = (row * C.rows()) + column
# place the tile
self.place_tile_at(index, value)
def place_tile_at(self, index, value):
"""
Place a new tile at the unwrapped index.
- parameters:
- index: {int} the index to place the value at
- value: {long} the value to place in the tile
"""
# verify the bound of the index
if index < 0 or index >= C.tiles():
msg = 'tile must be in [0, %i)'
raise ValueError(msg % C.tiles())
if value < 0 or value > 15:
raise ValueError('value must be in [0, 15]')
# calculate the offset
offset = C.rows() * index
# shift the zero to delete the old value
self._board = self._board & ~(C.tile_mask() >> offset)
# zero out garbage and shift value for placement, then add the new
# value to the board
mask = C.board_size() - C.tile_size() - offset
masked_value = ((value & C.tile_value_mask()) << mask)
self._board = self._board + masked_value
def place_random_tile(self):
"""Randomly place a new tile on the board."""
# make sure there is a place for the tile
if len(self.active_tiles) is C.tiles():
return
# get a list of the inactive tiles
inactive_tiles = self.inactive_tiles
value = 1 # 2 ^ 1 = 2
probability = randint(1, 1.0 / C.chance_of_four())
if probability is 1:
value = 2 # 2 ^ 2 = 4
# get a random index in the list
index = randint(0, len(inactive_tiles) - 1)
# place the tile on the board
self.place_tile_at(inactive_tiles[index], value)
# pass the event to the handler
if self._delegate is not None:
tile_index = inactive_tiles[index]
row = tile_index / C.rows()
column = tile_index % C.rows()
# self._delegate.didPlaceNewTile(row, column, value)
def place_row(self, index, value):
"""
Place a row onto the board.
- parameters:
- index: {int} the index of the row to replace
- value: {short} the value of the row to put into the board
"""
# cross out the row to make room for the new one
self._board = self._board & ~(C.row_mask() >> int(C.row_size() * index))
# cast the row and mask any sign extension, shift the row, then add the row to the board
self._board = self._board + (((value) & C.short_mask()) << int((C.rows() - 1 - index) * C.row_size()))
def get_row(self, index):
"""
Return the encoded row value for a given index.
- parameters:
- index: {int} the index of the row to select [0, 3]
- returns: {short} the row as 16 bits such that each 4 represents a
tile
"""
shifted_board = self.board >> int((C.rows() - 1 - index) * C.row_size())
return shifted_board & C.short_mask()
def place_row_array(self, index, row_array):
"""
Place a row array on the board.
- parameters:
- index: {int} the index of the row to place
- row_array: {byte[]} the list of tiles
"""
for column in range(0, C.columns()):
self.place_tile(index, column, row_array[column])
def get_row_array(self, row):
"""
Get the tiles in a row as an array of bytes.
- parameters:
- row: {int} the index of the row to get a tile array of
- returns: {byte[]} an array of tiles
"""
row_tiles = []
for column in range(0, C.columns()):
row_tiles.append(self.get_tile(row, column))
return row_tiles
def place_column(self, index, value):
"""
Place a column on the board.
- parameters:
- index: {int} the index to place the column at
- value: {short} the value of the column to place
"""
# shift the mask to the given column, flip the bits, then & with the board to zero the
# column.
self._board = self._board & ~(C.column_mask() >> int(C.tile_size() * index))
# first get the pieces of the board shifted into column positions, then shift the column
# itself into place and add it to the board.
self._board = self._board + (
(
# get the pieces of the board shifted into column positions as a long
( ((value >> C.tile_shift()[0]) & C.tile_value_mask()) << C.column_shift()[0] ) +
( ((value >> C.tile_shift()[1]) & C.tile_value_mask()) << C.column_shift()[1] ) +
( ((value >> C.tile_shift()[2]) & C.tile_value_mask()) << C.column_shift()[2] ) +
( value & C.tile_value_mask() )
#then shift the column
) << (C.rows() - 1 - index) * C.tile_size()
)
def get_column(self, index):
"""
Get the encoded column value for a given column.
- parameters:
- index: {int} the index of the column to select [0, 3]
- returns: {short} the column as 16 bits such that each 4 represents a
tile
"""
return ((self.get_tile(0, index) << C.tile_shift()[0]) +
(self.get_tile(1, index) << C.tile_shift()[1]) +
(self.get_tile(2, index) << C.tile_shift()[2]) +
(self.get_tile(3, index)))
def place_column_array(self, index, column_array):
"""
Place a column array on the board.
- parameters:
- index: {int} the index of the column to place
- column_array: {byte[]} the list of tiles
"""
for row in range(0, C.rows()):
self.place_tile(row, index, column_array[row])
def get_column_array(self, column):
"""
Get the tiles in a column as an array of bytes.
- parameters:
- column: {int} the index of the row to get a tile array of
- returns: {byte[]} an array of tiles
"""
column_tiles = []
# iterate over the rows
for row in range(0, C.rows()):
column_tiles.append(self.get_tile(row, column))
return column_tiles
@property
def inactive_tiles(self):
"""Return a list containing pointers to the inactive tiles."""
inactive_tiles = []
for index in range(0, C.tiles()):
if self.get_tile_at(index) is 0:
inactive_tiles.append(index)
return inactive_tiles
@property
def active_tiles(self):
"""Return a list containing pointers to the active tiles."""
active_tiles = []
for index in range(0, C.tiles()):
if self.get_tile_at(index) is not 0:
active_tiles.append(index)
return active_tiles
# MARK: Movement
def _move(self, length, combine, get, place):
"""
Perform a move function dynamically on a row / col array.
- parameters:
- length: {int} the number of row / col to perform a move action on
- combine: {function} a combine function that returns a new board
and score as a tuple
- get: {function} the function that will return a row / col
- place: {function} the function that will place a new row / col
"""
for index in range(0, length):
combination = combine(get(index))
place(index, combination[0])
self._score = self._score + combination[1]
def _move_up(self):
"""
Move the pieces up on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.rows(), combine_left,
self.get_column_array, self.place_column_array)
def _move_down(self):
"""
Move the pieces down on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.rows(), combine_right,
self.get_column_array, self.place_column_array)
def _move_left(self):
"""
Move the pieces left on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.columns(), combine_left,
self.get_row_array, self.place_row_array)
def _move_right(self):
"""
Move the pieces right on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.columns(), combine_right,
self.get_row_array, self.place_row_array)
# MARK: Can move
@property
def can_move_up(self):
"""Return a boolean determining whether a move up is possible."""
copy = self.copy
copy._move_up()
return self.is_equal(copy)
@property
def can_move_down(self):
"""Return a boolean determining whether a move down is possible."""
copy = self.copy
copy._move_down()
return self.is_equal(copy)
@property
def can_move_left(self):
"""Return a boolean determining whether a move left is possible."""
copy = self.copy
copy._move_left()
return self.is_equal(copy)
@property
def can_move_right(self):
"""Return a boolean determining whether a move right is possible."""
copy = self.copy
copy._move_right()
return self.is_equal(copy)
@property
def can_move(self):
"""Return a boolean determining if there are any moves on the board."""
return self.can_move_up or self.can_move_down or self.can_move_left or self.can_move_right
# Guarded Movement
def _try_move(self, can_move, move, did_move=None):
if can_move:
# get the matrix of moves
# int[][] moveMatrix = LikeValueIndexMapper.getUpMovementMatrixFor(self)
# store the old score (for the handler)
old_score = self.score
# make the move
move()
# take a snapshot
snapshot = GameBoardSnapshot(board_bits=self._board,
board_json=self.board_json,
score=self._score,
move=str(move.__name__))
self._history.append(snapshot)
# if there is a handler, pass the event
if did_move:
did_score = self.score > old_score
# did_move(did_score, moveMatrix)
return True
return False
def move_up(self):
"""
Try to move the pieces up on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_up, self._move_up)
def move_down(self):
"""
Try to move the pieces down on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_down, self._move_down)
def move_left(self):
"""
Try to move the pieces left on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_left, self._move_left)
def move_right(self):
"""
Try to move the pieces right on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_right, self._move_right)
# MARK: Description
@property
def description(self):
"""Return a human readable version of this gameboard."""
description = ""
# iterate over all the tiles
for i in range(0, C.tiles()):
# get the tile at this index
value = self.get_tile_at(i)
if value is 0:
tile = '|{0: >6} |'.format('')
else:
tile = '|{0: >6} |'.format(2**value)
# add the tile to the string
description += tile
# if the index is about to be a multiple of four, add a new line
if (i + 1) % 4 is 0:
description += "\n"
return description
def __repr__(self):
"""Return a string representation of this object."""
return self.description
|
|
from yargy.visitor import Visitor
from yargy.dot import (
style,
DotTransformator,
BLUE,
ORANGE,
RED,
PURPLE,
GREEN,
DARKGRAY
)
from yargy.predicates import is_predicate
from .constructors import (
is_rule,
Production,
EmptyProduction,
Rule,
OrRule,
OptionalRule,
RepeatableRule,
BoundedRule,
MinBoundedRule,
MaxBoundedRule,
MinMaxBoundedRule,
RepeatableOptionalRule,
NamedRule,
InterpretationRule,
RelationRule,
ForwardRule,
)
class InplaceRuleTransformator(Visitor):
def __call__(self, root):
for item in root.walk(types=(Rule, Production)):
self.visit(item)
return self.visit(root)
def visit_term(self, item):
return item
def visit_Production(self, item):
item.terms = [self.visit_term(_) for _ in item.terms]
return item
def visit_EmptyProduction(self, item):
return item
def visit_PipelineProduction(self, item):
return item
def visit_Rule(self, item):
return item
class RuleTransformator(Visitor):
def __init__(self):
self.visited = {}
def __call__(self, root):
for item in root.walk(types=ForwardRule):
if item.rule:
item.define(self.visit(item.rule))
return self.visit(root)
def visit(self, item):
item_id = id(item)
if item_id in self.visited:
return self.visited[item_id]
else:
item = self.resolve_method(item)(item)
self.visited[item_id] = item
return item
def visit_term(self, item):
if is_rule(item):
return self.visit(item)
else:
return item
def visit_Production(self, item):
return Production(
[self.visit_term(_) for _ in item.terms],
item.main
)
def visit_EmptyProduction(self, item):
return item
def visit_PipelineProduction(self, item):
return item
def visit_Rule(self, item):
return Rule([self.visit(_) for _ in item.productions])
def visit_OrRule(self, item):
return OrRule([self.visit(_) for _ in item.rules])
def visit_OptionalRule(self, item):
return OptionalRule(self.visit(item.rule, item.reverse))
def visit_RepeatableRule(self, item):
return RepeatableRule(self.visit(item.rule, item.reverse))
def visit_RepeatableOptionalRule(self, item):
return RepeatableOptionalRule(self.visit(
item.rule,
item.reverse_repeatable,
item.reverse_optional
))
def visit_MinBoundedRule(self, item):
return MinBoundedRule(self.visit(item.rule), item.min, item.reverse)
def visit_MaxBoundedRule(self, item):
return MaxBoundedRule(self.visit(item.rule), item.max, item.reverse)
def visit_MinMaxBoundedRule(self, item):
return MinMaxBoundedRule(
self.visit(item.rule),
item.min, item.max, item.reverse
)
def visit_NamedRule(self, item):
return NamedRule(self.visit(item.rule), item.name)
def visit_InterpretationRule(self, item):
return InterpretationRule(self.visit(item.rule), item.interpretator)
def visit_RelationRule(self, item):
return RelationRule(self.visit(item.rule), item.relation)
def visit_ForwardRule(self, item):
return item
def visit_EmptyRule(self, item):
return item
def visit_PipelineRule(self, item):
return item
class ActivateTransformator(InplaceRuleTransformator):
def __init__(self, context):
super(ActivateTransformator, self).__init__()
self.context = context
def visit_term(self, item):
if is_predicate(item):
return item.activate(self.context)
else:
return item
def visit_PipelineRule(self, item):
item.pipeline = item.pipeline.activate(self.context)
return item
class SquashExtendedTransformator(RuleTransformator):
def visit_RepeatableRule(self, item):
child = item.rule
if isinstance(child, OptionalRule):
return self.visit(RepeatableOptionalRule(
child.rule,
item.reverse,
child.reverse
))
elif isinstance(child, RepeatableOptionalRule):
return self.visit(RepeatableOptionalRule(
child.rule,
item.reverse,
child.reverse_optional
))
elif isinstance(child, (RepeatableRule, BoundedRule)):
return self.visit(RepeatableRule(child.rule, item.reverse))
else:
return RepeatableRule(self.visit(child), item.reverse)
def visit_OptionalRule(self, item):
child = item.rule
if isinstance(child, RepeatableRule):
return self.visit(RepeatableOptionalRule(
child.rule,
child.reverse,
item.reverse
))
elif isinstance(child, RepeatableOptionalRule):
return self.visit(RepeatableOptionalRule(
child.rule,
child.reverse_repeatable,
item.reverse
))
elif isinstance(child, OptionalRule):
return self.visit(OptionalRule(child.rule, item.reverse))
else:
return OptionalRule(self.visit(child), item.reverse)
def visit_RepeatableOptionalRule(self, item):
child = item.rule
if isinstance(child, (RepeatableRule, BoundedRule,
OptionalRule, RepeatableOptionalRule)):
return self.visit(RepeatableOptionalRule(
child.rule,
item.reverse_repeatable,
item.reverse_optional
))
else:
return RepeatableOptionalRule(
self.visit(child),
item.reverse_repeatable,
item.reverse_optional
)
def visit_BoundedRule(self, item):
child = item.rule
if isinstance(child, RepeatableRule):
return self.visit(RepeatableRule(child.rule, child.reverse))
elif isinstance(child, RepeatableOptionalRule):
return self.visit(RepeatableOptionalRule(
child.rule,
child.reverse_repeatable,
child.reverse_optional
))
raise TypeError
def visit_MinBoundedRule(self, item):
child = item.rule
if isinstance(child, (RepeatableRule, RepeatableOptionalRule)):
return self.visit_BoundedRule(item)
elif isinstance(child, OptionalRule):
return self.visit(OptionalRule(
MinBoundedRule(
child.rule, item.min, item.reverse
),
child.reverse
))
else:
return MinBoundedRule(self.visit(child), item.min, item.reverse)
def visit_MaxBoundedRule(self, item):
child = item.rule
if isinstance(child, (RepeatableRule, RepeatableOptionalRule)):
return self.visit_BoundedRule(item)
elif isinstance(child, OptionalRule):
return self.visit(OptionalRule(
MaxBoundedRule(
child.rule, item.max, item.reverse
),
child.reverse
))
else:
return MaxBoundedRule(self.visit(child), item.max, item.reverse)
def visit_MinMaxBoundedRule(self, item):
child = item.rule
if isinstance(child, (RepeatableRule, RepeatableOptionalRule)):
return self.visit_BoundedRule(item)
elif isinstance(child, OptionalRule):
return self.visit(OptionalRule(
MinMaxBoundedRule(
child.rule, item.min, item.max, item.reverse
),
child.reverse
))
else:
return MinMaxBoundedRule(
self.visit(child),
item.min, item.max, item.reverse
)
class FlattenTransformator(RuleTransformator):
def visit_term(self, item):
if type(item) is Rule:
productions = item.productions
if len(productions) == 1:
terms = productions[0].terms
if len(terms) == 1:
term = terms[0]
return self.visit_term(term)
return super(FlattenTransformator, self).visit_term(item)
def visit_Production(self, item):
terms = item.terms
if len(terms) == 1:
term = terms[0]
if type(term) is Rule:
productions = term.productions
if len(productions) == 1:
production = productions[0]
return self.visit(production)
return super(FlattenTransformator, self).visit_Production(item)
class ReplaceOrTransformator(RuleTransformator):
def visit_OrRule(self, item):
return Rule([Production([self.visit(_)]) for _ in item.rules])
class ReplaceEmptyTransformator(RuleTransformator):
def visit_EmptyRule(self, item):
return Rule([EmptyProduction()])
def max_bound(item, count, reverse=False):
from yargy.api import rule, or_
if count == 1:
return item
else:
a = rule(
item,
max_bound(item, count - 1, reverse)
)
b = item
if reverse:
a, b = b, a
return or_(a, b)
def repeatable(item, reverse=False):
from yargy.api import forward, or_, rule
temp = forward()
a = rule(item, temp)
b = item
if reverse:
a, b = b, a
return temp.define(
or_(
a,
b
)
)
def optional(item, reverse=False):
from yargy.api import or_, empty
a = empty()
b = item
if reverse:
a, b = b, a
return or_(a, b)
def repeatable_optional(item, reverse_repeatable=False, reverse_optional=False):
from yargy.api import forward, or_, rule, empty
temp = forward()
a = empty()
b = rule(item, temp)
c = item
if reverse_repeatable:
b, c = c, b
if reverse_optional:
a, b, c = b, c, a
return temp.define(
or_(
a,
b,
c
)
)
def repeat(item, count):
return [item for _ in range(count)]
class ReplaceExtendedTransformator(RuleTransformator):
def visit_RepeatableRule(self, item):
child = self.visit(item.rule)
return repeatable(child, item.reverse)
def visit_OptionalRule(self, item):
child = self.visit(item.rule)
return optional(child, item.reverse)
def visit_RepeatableOptionalRule(self, item):
child = self.visit(item.rule)
return repeatable_optional(
child,
item.reverse_repeatable,
item.reverse_optional
)
def visit_MinBoundedRule(self, item):
child = self.visit(item.rule)
items = repeat(child, item.min - 1)
items.append(repeatable(child, item.reverse))
from yargy.api import rule
return rule(*items)
def visit_MaxBoundedRule(self, item):
child = self.visit(item.rule)
return max_bound(child, item.max, item.reverse)
def visit_MinMaxBoundedRule(self, item):
rule, min, max, reverse = item
child = self.visit(rule)
items = repeat(child, min - 1)
items.append(max_bound(child, max - min + 1, reverse))
from yargy.api import rule
return rule(*items)
class DotRuleTransformator(DotTransformator, InplaceRuleTransformator):
def visit_Predicate(self, item):
self.style(
item,
style(label=item.label)
)
def visit_Production(self, item):
self.graph.add_node(
item,
style(label='Production', fillcolor=BLUE)
)
for index, child in enumerate(item.children):
styling = (
style(color=DARKGRAY)
if item.main > 0 and item.main == index
else None
)
self.graph.add_edge(
item, child,
style=styling
)
def visit_EmptyProduction(self, item):
self.style(
item,
style(label='EmptyProduction')
)
def visit_PipelineProduction(self, item):
self.style(
item,
style(label='PipelineProduction', fillcolor=BLUE)
)
def visit_Rule(self, item):
self.style(
item,
style(label='Rule', fillcolor=BLUE)
)
def visit_OrRule(self, item):
self.style(
item,
style(label='Or', fillcolor=BLUE)
)
def visit_OptionalRule(self, item):
self.style(
item,
style(label='Optional', fillcolor=ORANGE)
)
def visit_RepeatableRule(self, item):
self.style(
item,
style(label='Repeatable', fillcolor=ORANGE)
)
def visit_RepeatableOptionalRule(self, item):
self.style(
item,
style(label='RepeatableOptional', fillcolor=ORANGE)
)
def visit_MinBoundedRule(self, item):
self.style(
item,
style(label='MinBounded >=%d' % item.min, fillcolor=ORANGE)
)
def visit_MaxBoundedRule(self, item):
self.style(
item,
style(label='MaxBounded <=%d' % item.max, fillcolor=ORANGE)
)
def visit_MinMaxBoundedRule(self, item):
self.style(
item,
style(
label='MinMaxBounded [{item.min}, {item.max}]'.format(item=item),
fillcolor=ORANGE
)
)
def visit_NamedRule(self, item):
self.style(
item,
style(label=item.name, fillcolor=RED)
)
def visit_InterpretationRule(self, item):
self.style(
item,
style(label=item.interpretator.label, fillcolor=GREEN)
)
def visit_RelationRule(self, item):
self.style(
item,
style(label=item.relation.label, fillcolor=PURPLE)
)
def visit_ForwardRule(self, item):
self.style(
item,
style(label='Forward', fillcolor=PURPLE)
)
def visit_EmptyRule(self, item):
self.style(
item,
style(label='Empty')
)
def visit_PipelineRule(self, item):
self.style(
item,
style(label=item.pipeline.label, fillcolor=PURPLE)
)
def visit_BNFRule(self, item):
self.style(
item,
style(label=item.label, fillcolor=GREEN)
)
|
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
The GPO Reference Aggregate Manager v2, showing how to implement
the GENI AM API version 2. This AggregateManager has only fake resources.
Invoked from gcf-am.py
The GENI AM API is defined in the AggregateManager class.
"""
import base64
import datetime
import dateutil.parser
import logging
import os
import uuid
import xml.dom.minidom as minidom
import xmlrpclib
import zlib
import geni
from geni.util.urn_util import publicid_to_urn
from geni.SecureXMLRPCServer import SecureXMLRPCServer
from resource import Resource
from aggregate import Aggregate
from fakevm import FakeVM
# See sfa/trust/rights.py
# These are names of operations
# from the rights.py privilege_table
# Credentials may list privileges that
# map to these operations, giving the caller permission
# to perform the functions
RENEWSLIVERPRIV = 'renewsliver'
CREATESLIVERPRIV = 'createsliver'
DELETESLIVERPRIV = 'deleteslice'
SLIVERSTATUSPRIV = 'getsliceresources'
SHUTDOWNSLIVERPRIV = 'shutdown'
# Publicid format resource namespace. EG Resource URNs
# will be <namespace>:resource:<resourcetype>_<resourceid>
# This is something like the name of your AM
# See gen-certs.CERT_AUTHORITY
RESOURCE_NAMESPACE = 'geni//gpo//gcf'
REFAM_MAXLEASE_DAYS = 365
class Slice(object):
"""A slice has a URN, a list of resources, and an expiration time in UTC."""
def __init__(self, urn, expiration):
self.id = str(uuid.uuid4())
self.urn = urn
self.expiration = expiration
self.resources = dict()
def status(self, resources):
"""Determine the status of the sliver by examining the status
of each resource in the sliver.
"""
# If any resource is 'shutdown', the sliver is 'shutdown'
# Else if any resource is 'failed', the sliver is 'failed'
# Else if any resource is 'configuring', the sliver is 'configuring'
# Else if all resources are 'ready', the sliver is 'ready'
# Else the sliver is 'unknown'
rstat = [res.status for res in resources]
if Resource.STATUS_SHUTDOWN in rstat:
return Resource.STATUS_SHUTDOWN
elif Resource.STATUS_FAILED in rstat:
return Resource.STATUS_FAILED
elif Resource.STATUS_CONFIGURING in rstat:
return Resource.STATUS_CONFIGURING
elif rstat == [Resource.STATUS_READY for res in self.resources.values()]:
# All resources report status of ready
return Resource.STATUS_READY
else:
return Resource.STATUS_UNKNOWN
class ReferenceAggregateManager(object):
'''A reference Aggregate Manager that manages fake resources.'''
# root_cert is a single cert or dir of multiple certs
# that are trusted to sign credentials
def __init__(self, root_cert, urn_authority, url):
self._url = url
self._api_version = 2
self._slices = dict()
self._agg = Aggregate()
self._agg.add_resources([FakeVM(self._agg) for _ in range(3)])
self._cred_verifier = geni.CredentialVerifier(root_cert)
self._urn_authority = urn_authority
self._my_urn = publicid_to_urn("%s %s %s" % (self._urn_authority, 'authority', 'am'))
self.max_lease = datetime.timedelta(days=REFAM_MAXLEASE_DAYS)
self.logger = logging.getLogger('gcf.am2')
def GetVersion(self, options):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
self.logger.info("Called GetVersion")
reqver = [dict(type="geni",
version="3",
schema="http://www.geni.net/resources/rspec/3/request.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
adver = [dict(type="geni",
version="3",
schema="http://www.geni.net/resources/rspec/3/ad.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
api_versions = dict()
api_versions[str(self._api_version)] = self._url
versions = dict(geni_api=2,
geni_api_versions=api_versions,
geni_request_rspec_versions=reqver,
geni_ad_rspec_versions=adver)
return dict(geni_api=versions['geni_api'],
code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=versions,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
self.logger.info('ListResources(%r)' % (options))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
# could require list or listnodes?
privileges = ()
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
None,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if 'geni_rspec_version' not in options:
# This is a required option, so error out with bad arguments.
self.logger.error('No geni_rspec_version supplied to ListResources.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version was not supplied.')
if 'type' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a type field.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version does not have a type field.')
if 'version' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a version field.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version does not have a version field.')
# Look to see what RSpec version the client requested
# Error-check that the input value is supported.
rspec_type = options['geni_rspec_version']['type']
if isinstance(rspec_type, str):
rspec_type = rspec_type.lower().strip()
rspec_version = options['geni_rspec_version']['version']
if rspec_type != 'geni':
self.logger.error('ListResources: Unknown RSpec type %s requested', rspec_type)
return self.errorResult(4, 'Bad Version: requested RSpec type %s is not a valid option.' % (rspec_type))
if rspec_version != '3':
self.logger.error('ListResources: Unknown RSpec version %s requested', rspec_version)
return self.errorResult(4, 'Bad Version: requested RSpec version %s is not a valid option.' % (rspec_type))
self.logger.info("ListResources requested RSpec %s (%s)", rspec_type, rspec_version)
if 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
if slice_urn in self._slices:
result = self.manifest_rspec(slice_urn)
else:
# return an empty rspec
return self._no_such_slice(slice_urn)
else:
all_resources = self._agg.catalog(None)
available = 'geni_available' in options and options['geni_available']
resource_xml = ""
for r in all_resources:
if available and not r.available:
continue
resource_xml = resource_xml + self.advert_resource(r)
result = self.advert_header() + resource_xml + self.advert_footer()
self.logger.debug("Result is now \"%s\"", result)
# Optionally compress the result
if 'geni_compressed' in options and options['geni_compressed']:
try:
result = base64.b64encode(zlib.compress(result))
except Exception, exc:
import traceback
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def CreateSliver(self, slice_urn, credentials, rspec, users, options):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
self.logger.info('CreateSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (CREATESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slices:
self.logger.error('Slice %s already exists.', slice_urn)
return self.errorResult(17, 'Slice %s already exists' % (slice_urn))
rspec_dom = None
try:
rspec_dom = minidom.parseString(rspec)
except Exception, exc:
self.logger.error("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
return self.errorResult(1, 'Bad Args: RSpec is unparseable')
# Look at the version of the input request RSpec
# Make sure it is supported
# Then make sure that you return an RSpec in the same format
# EG if both V1 and V2 are supported, and the user gives V2 request,
# then you must return a V2 request and not V1
allresources = self._agg.catalog()
allrdict = dict()
for r in allresources:
if r.available:
allrdict[r.id] = r
# Note: This only handles unbound nodes. Any attempt by the client
# to specify a node is ignored.
resources = dict()
unbound = list()
for elem in rspec_dom.documentElement.getElementsByTagName('node'):
unbound.append(elem)
for elem in unbound:
client_id = elem.getAttribute('client_id')
keys = allrdict.keys()
if keys:
rid = keys[0]
resources[client_id] = allrdict[rid]
del allrdict[rid]
else:
return self.errorResult(6, 'Too Big: insufficient resources to fulfill request')
# determine max expiration time from credentials
# do not create a sliver that will outlive the slice!
expiration = datetime.datetime.utcnow() + self.max_lease
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp < expiration:
expiration = credexp
newslice = Slice(slice_urn, expiration)
self._agg.allocate(slice_urn, resources.values())
for cid, r in resources.items():
newslice.resources[cid] = r.id
r.status = Resource.STATUS_READY
self._slices[slice_urn] = newslice
self.logger.info("Created new slice %s" % slice_urn)
result = self.manifest_rspec(slice_urn)
self.logger.debug('Result = %s', result)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def DeleteSliver(self, slice_urn, credentials, options):
'''Stop and completely delete the named sliver, and return True.'''
self.logger.info('DeleteSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (DELETESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slices:
sliver = self._slices[slice_urn]
resources = self._agg.catalog(slice_urn)
if sliver.status(resources) == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not deleted because it is shutdown",
slice_urn)
return self.errorResult(11, "Unavailable: Slice %s is unavailable." % (slice_urn))
self._agg.deallocate(slice_urn, None)
for r in resources:
r.status = Resource.STATUS_UNKNOWN
del self._slices[slice_urn]
self.logger.info("Sliver %r deleted" % slice_urn)
return self.successResult(True)
else:
return self._no_such_slice(slice_urn)
def SliverStatus(self, slice_urn, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.
Return a dict of sliver urn, status, and a list of dicts resource
statuses.'''
# Loop over the resources in a sliver gathering status.
self.logger.info('SliverStatus(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (SLIVERSTATUSPRIV,)
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
if slice_urn in self._slices:
theSlice = self._slices[slice_urn]
# Now calculate the status of the sliver
res_status = list()
resources = self._agg.catalog(slice_urn)
for res in resources:
self.logger.debug('Resource = %s', str(res))
# Gather the status of all the resources
# in the sliver. This could be actually
# communicating with the resources, or simply
# reporting the state of initialized, started, stopped, ...
res_status.append(dict(geni_urn=self.resource_urn(res),
geni_status=res.status,
geni_error=''))
self.logger.info("Calculated and returning slice %s status", slice_urn)
result = dict(geni_urn=slice_urn,
geni_status=theSlice.status(resources),
geni_resources=res_status)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
else:
return self._no_such_slice(slice_urn)
def RenewSliver(self, slice_urn, credentials, expiration_time, options):
'''Renew the local sliver that is part of the named Slice
until the given expiration time (in UTC with a TZ per RFC3339).
Requires at least one credential that is valid until then.
Return False on any error, True on success.'''
self.logger.info('RenewSliver(%r, %r)' % (slice_urn, expiration_time))
privileges = (RENEWSLIVERPRIV,)
try:
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# All the credentials we just got are valid
if slice_urn in self._slices:
# If any credential will still be valid at the newly
# requested time, then we can do this.
resources = self._agg.catalog(slice_urn)
sliver = self._slices.get(slice_urn)
if sliver.status(resources) == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not renewed because it is shutdown",
slice_urn)
return self.errorResult(11, "Unavailable: Slice %s is unavailable." % (slice_urn))
requested = dateutil.parser.parse(str(expiration_time))
# Per the AM API, the input time should be TZ-aware
# But since the slice cred may not (per ISO8601), convert
# it to naiveUTC for comparison
requested = self._naiveUTC(requested)
maxexp = datetime.datetime.min
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp > maxexp:
maxexp = credexp
maxexp = credexp
if credexp >= requested:
sliver.expiration = requested
self.logger.info("Sliver %r now expires on %r", slice_urn, expiration_time)
return self.successResult(True)
else:
self.logger.debug("Valid cred %r expires at %r before %r", cred, credexp, requested)
# Fell through then no credential expires at or after
# newly requested expiration time
self.logger.info("Can't renew sliver %r until %r because none of %d credential(s) valid until then (latest expires at %r)", slice_urn, expiration_time, len(creds), maxexp)
# FIXME: raise an exception so the client knows what
# really went wrong?
return self.errorResult(19, "Out of range: Expiration %s is out of range (past last credential expiration of %s)." % (expiration_time, maxexp))
else:
return self._no_such_slice(slice_urn)
def Shutdown(self, slice_urn, credentials, options):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
self.logger.info('Shutdown(%r)' % (slice_urn))
privileges = (SHUTDOWNSLIVERPRIV,)
try:
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
if slice_urn in self._slices:
resources = self._agg.catalog(slice_urn)
for resource in resources:
resource.status = Resource.STATUS_SHUTDOWN
self.logger.info("Sliver %r shut down" % slice_urn)
return self.successResult(True)
else:
self.logger.info("Shutdown: No such slice: %s.", slice_urn)
return self._no_such_slice(slice_urn)
def successResult(self, value):
code_dict = dict(geni_code=0,
am_type="gcf2",
am_code=0)
return dict(code=code_dict,
value=value,
output="")
def _no_such_slice(self, slice_urn):
return self.errorResult(12, 'Search Failed: no slice "%s" found' % (slice_urn))
def errorResult(self, code, output, am_code=None):
code_dict = dict(geni_code=code, am_type="gcf2")
if am_code is not None:
code_dict['am_code'] = am_code
return dict(code=code_dict,
value="",
output=output)
def _naiveUTC(self, dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
def advert_resource(self, resource):
tmpl = ''' <node component_manager_id="%s"
component_name="%s"
component_id="%s"
exclusive="%s">
<available now="%s"/>
</node>
'''
resource_id = str(resource.id)
resource_exclusive = str(False).lower()
resource_available = str(resource.available).lower()
resource_urn = self.resource_urn(resource)
return tmpl % (self._my_urn,
resource_id,
resource_urn,
resource_exclusive,
resource_available)
def advert_header(self):
header = '''<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd"
type="advertisement">'''
return header
def advert_footer(self):
return '</rspec>'
def manifest_header(self):
header = '''<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd"
type="manifest">'''
return header
def manifest_slice(self, slice_urn):
tmpl = '<node client_id="%s"/>'
result = ""
for cid in self._slices[slice_urn].resources.keys():
result = result + tmpl % (cid)
return result
def manifest_footer(self):
return '</rspec>'
def manifest_rspec(self, slice_urn):
return self.manifest_header() + self.manifest_slice(slice_urn) + self.manifest_footer()
def resource_urn(self, resource):
urn = publicid_to_urn("%s %s %s" % (self._urn_authority,
str(resource.type),
str(resource.id)))
return urn
class AggregateManager(object):
"""The public API for a GENI Aggregate Manager. This class provides the
XMLRPC interface and invokes a delegate for all the operations.
"""
def __init__(self, delegate):
self._delegate = delegate
self.logger = logging.getLogger('gcf.am2')
def _exception_result(self, exception):
output = str(exception)
self.logger.warning(output)
# XXX Code for no slice here?
return dict(code=dict(geni_code=102,
am_type="gcf2",
am_code=0),
value="",
output=output)
def GetVersion(self, options=dict()):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
try:
return self._delegate.GetVersion(options)
except Exception as e:
self.logger.exception("Error in GetVersion:")
return self._exception_result(e)
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
try:
return self._delegate.ListResources(credentials, options)
except Exception as e:
self.logger.exception("Error in ListResources:")
return self._exception_result(e)
def CreateSliver(self, slice_urn, credentials, rspec, users, options):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
try:
return self._delegate.CreateSliver(slice_urn, credentials, rspec,
users, options)
except Exception as e:
self.logger.exception("Error in CreateSliver:")
return self._exception_result(e)
def DeleteSliver(self, slice_urn, credentials, options):
"""Delete the given sliver. Return true on success."""
try:
return self._delegate.DeleteSliver(slice_urn, credentials, options)
except Exception as e:
self.logger.exception("Error in DeleteSliver:")
return self._exception_result(e)
def SliverStatus(self, slice_urn, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.'''
try:
return self._delegate.SliverStatus(slice_urn, credentials, options)
except Exception as e:
self.logger.exception("Error in SliverStatus:")
return self._exception_result(e)
def RenewSliver(self, slice_urn, credentials, expiration_time, options):
"""Extend the life of the given sliver until the given
expiration time. Return False on error."""
try:
return self._delegate.RenewSliver(slice_urn, credentials,
expiration_time, options)
except Exception as e:
self.logger.exception("Error in RenewSliver:")
return self._exception_result(e)
def Shutdown(self, slice_urn, credentials, options):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
try:
return self._delegate.Shutdown(slice_urn, credentials, options)
except Exception as e:
self.logger.exception("Error in Shutdown:")
return self._exception_result(e)
class AggregateManagerServer(object):
"""An XMLRPC Aggregate Manager Server. Delegates calls to given delegate,
or the default printing AM."""
def __init__(self, addr, keyfile=None, certfile=None,
trust_roots_dir=None,
ca_certs=None, base_name=None):
# ca_certs arg here must be a file of concatenated certs
if ca_certs is None:
raise Exception('Missing CA Certs')
elif not os.path.isfile(os.path.expanduser(ca_certs)):
raise Exception('CA Certs must be an existing file of accepted root certs: %s' % ca_certs)
# Decode the addr into a URL. Is there a pythonic way to do this?
server_url = "https://%s:%d/" % addr
delegate = ReferenceAggregateManager(trust_roots_dir, base_name,
server_url)
# FIXME: set logRequests=true if --debug
self._server = SecureXMLRPCServer(addr, keyfile=keyfile,
certfile=certfile, ca_certs=ca_certs)
self._server.register_instance(AggregateManager(delegate))
# Set the server on the delegate so it can access the
# client certificate.
delegate._server = self._server
if not base_name is None:
global RESOURCE_NAMESPACE
RESOURCE_NAMESPACE = base_name
def serve_forever(self):
self._server.serve_forever()
def register_instance(self, instance):
# Pass the AM instance to the generic XMLRPC server,
# which lets it know what XMLRPC methods to expose
self._server.register_instance(instance)
|
|
# -*- coding: utf-8 -*-
"""
flaskbb.management.models
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains all management related models.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from wtforms import (TextField, IntegerField, FloatField, BooleanField,
SelectField, SelectMultipleField, validators)
from flask_wtf import Form
from flaskbb._compat import max_integer, text_type, iteritems
from flaskbb.extensions import db, cache
from flaskbb.utils.database import CRUDMixin
class SettingsGroup(db.Model, CRUDMixin):
__tablename__ = "settingsgroup"
key = db.Column(db.String(255), primary_key=True)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=False)
settings = db.relationship("Setting", lazy="dynamic", backref="group",
cascade="all, delete-orphan")
class Setting(db.Model, CRUDMixin):
__tablename__ = "settings"
key = db.Column(db.String(255), primary_key=True)
value = db.Column(db.PickleType, nullable=False)
settingsgroup = db.Column(db.String,
db.ForeignKey('settingsgroup.key',
use_alter=True,
name="fk_settingsgroup"),
nullable=False)
# The name (displayed in the form)
name = db.Column(db.String(200), nullable=False)
# The description (displayed in the form)
description = db.Column(db.Text, nullable=False)
# Available types: string, integer, float, boolean, select, selectmultiple
value_type = db.Column(db.String(20), nullable=False)
# Extra attributes like, validation things (min, max length...)
# For Select*Fields required: choices
extra = db.Column(db.PickleType)
@classmethod
def get_form(cls, group):
"""Returns a Form for all settings found in :class:`SettingsGroup`.
:param group: The settingsgroup name. It is used to get the settings
which are in the specified group.
"""
class SettingsForm(Form):
pass
# now parse the settings in this group
for setting in group.settings:
field_validators = []
if setting.value_type in ("integer", "float"):
validator_class = validators.NumberRange
elif setting.value_type == "string":
validator_class = validators.Length
# generate the validators
if "min" in setting.extra:
# Min number validator
field_validators.append(
validator_class(min=setting.extra["min"])
)
if "max" in setting.extra:
# Max number validator
field_validators.append(
validator_class(max=setting.extra["max"])
)
# Generate the fields based on value_type
# IntegerField
if setting.value_type == "integer":
setattr(
SettingsForm, setting.key,
IntegerField(setting.name, validators=field_validators,
description=setting.description)
)
# FloatField
elif setting.value_type == "float":
setattr(
SettingsForm, setting.key,
FloatField(setting.name, validators=field_validators,
description=setting.description)
)
# TextField
elif setting.value_type == "string":
setattr(
SettingsForm, setting.key,
TextField(setting.name, validators=field_validators,
description=setting.description)
)
# SelectMultipleField
elif setting.value_type == "selectmultiple":
# if no coerce is found, it will fallback to unicode
if "coerce" in setting.extra:
coerce_to = setting.extra['coerce']
else:
coerce_to = text_type
setattr(
SettingsForm, setting.key,
SelectMultipleField(
setting.name,
choices=setting.extra['choices'](),
coerce=coerce_to,
description=setting.description
)
)
# SelectField
elif setting.value_type == "select":
# if no coerce is found, it will fallback to unicode
if "coerce" in setting.extra:
coerce_to = setting.extra['coerce']
else:
coerce_to = text_type
setattr(
SettingsForm, setting.key,
SelectField(
setting.name,
coerce=coerce_to,
choices=setting.extra['choices'](),
description=setting.description)
)
# BooleanField
elif setting.value_type == "boolean":
setattr(
SettingsForm, setting.key,
BooleanField(setting.name, description=setting.description)
)
return SettingsForm
@classmethod
def get_all(cls):
return cls.query.all()
@classmethod
def update(cls, settings, app=None):
"""Updates the cache and stores the changes in the
database.
:param settings: A dictionary with setting items.
"""
# update the database
for key, value in iteritems(settings):
setting = cls.query.filter(Setting.key == key.lower()).first()
setting.value = value
db.session.add(setting)
db.session.commit()
cls.invalidate_cache()
@classmethod
def get_settings(cls, from_group=None):
"""This will return all settings with the key as the key for the dict
and the values are packed again in a dict which contains
the remaining attributes.
:param from_group: Optionally - Returns only the settings from a group.
"""
result = None
if from_group is not None:
result = from_group.settings
else:
result = cls.query.all()
settings = {}
for setting in result:
settings[setting.key] = {
'name': setting.name,
'description': setting.description,
'value': setting.value,
'value_type': setting.value_type,
'extra': setting.extra
}
return settings
@classmethod
@cache.memoize(timeout=max_integer)
def as_dict(cls, from_group=None, upper=True):
"""Returns all settings as a dict. This method is cached. If you want
to invalidate the cache, simply execute ``self.invalidate_cache()``.
:param from_group: Returns only the settings from the group as a dict.
:param upper: If upper is ``True``, the key will use upper-case
letters. Defaults to ``False``.
"""
settings = {}
result = None
if from_group is not None:
result = SettingsGroup.query.filter_by(key=from_group).\
first_or_404()
result = result.settings
else:
print(Setting.query)
result = cls.query.all()
for setting in result:
if upper:
setting_key = setting.key.upper()
else:
setting_key = setting.key
settings[setting_key] = setting.value
return settings
@classmethod
def invalidate_cache(cls):
"""Invalidates this objects cached metadata."""
cache.delete_memoized(cls.as_dict, cls)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Encrypt/decrypt files with symmetric AES cipher-block chaining (CBC) mode.
Usage:
File Encryption:
aescrypt.py [-f] infile [outfile]
File decryption:
aescrypt.py -d [-f] infile [outfile]
This script is derived from an answer to this StackOverflow question:
http://stackoverflow.com/questions/16761458/
I changed the key derivation function to use PBKDF2.
"""
from __future__ import print_function, unicode_literals
__all__ = ('encrypt', 'decrypt')
import argparse
import os
import struct
import sys
from getpass import getpass
from os.path import exists, splitext
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from pbkdf2 import PBKDF2
SALT_MARKER = b'$'
ITERATIONS = 1000
def encrypt(infile, outfile, password, key_size=32, salt_marker=SALT_MARKER,
kdf_iterations=ITERATIONS, hashmod=SHA256):
"""Encrypt infile and write it to outfile using password to generate key.
The encryption algorithm used is symmetric AES in cipher-block chaining
(CBC) mode.
``key_size`` may be 16, 24 or 32 (default).
The key is derived via the PBKDF2 key derivation function (KDF) from the
password and a random salt of 16 bytes (the AES block size) minus the
length of the salt header (see below).
The hash function used by PBKDF2 is SHA256 per default. You can pass a
different hash function module via the ``hashmod`` argument. The module
must adhere to the Python API for Cryptographic Hash Functions (PEP 247).
PBKDF2 uses a number of iterations of the hash function to derive the key,
which can be set via the ``kdf_iterations` keyword argumeent. The default
number is 1000 and the maximum 65535.
The header and the salt are written to the first block of the encrypted
file. The header consist of the number of KDF iterations encoded as a
big-endian word bytes wrapped by ``salt_marker`` on both sides. With the
default value of ``salt_marker = b'$'``, the header size is thus 4 and the
salt 12 bytes. The salt marker must be a byte string of 1-6 bytes length.
The last block of the encrypted file is padded with up to 16 bytes, all
having the value of the length of the padding.
"""
if not 1 <= len(salt_marker) <= 6:
raise ValueError('The salt_marker must be one to six bytes long.')
elif not isinstance(salt_marker, bytes):
raise TypeError('salt_marker must be a bytes instance.')
if kdf_iterations >= 65536:
raise ValueError('kdf_iterations must be <= 65535.')
bs = AES.block_size
header = salt_marker + struct.pack('>H', kdf_iterations) + salt_marker
salt = os.urandom(bs - len(header))
kdf = PBKDF2(password, salt, min(kdf_iterations, 65535), hashmod)
key = kdf.read(key_size)
iv = os.urandom(bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
outfile.write(header + salt)
outfile.write(iv)
finished = False
while not finished:
chunk = infile.read(1024 * bs)
if len(chunk) == 0 or len(chunk) % bs != 0:
padding_length = (bs - len(chunk) % bs) or bs
chunk += (padding_length * chr(padding_length)).encode()
finished = True
outfile.write(cipher.encrypt(chunk))
def decrypt(infile, outfile, password, key_size=32, salt_marker=SALT_MARKER,
hashmod=SHA256):
"""Decrypt infile and write it to outfile using password to derive key.
See `encrypt` for documentation of the encryption algorithm and parameters.
"""
mlen = len(salt_marker)
hlen = mlen * 2 + 2
if not 1 <= mlen <= 6:
raise ValueError('The salt_marker must be one to six bytes long.')
elif not isinstance(salt_marker, bytes):
raise TypeError('salt_marker must be a bytes instance.')
bs = AES.block_size
salt = infile.read(bs)
if salt[:mlen] == salt_marker and salt[mlen + 2:hlen] == salt_marker:
kdf_iterations = struct.unpack('>H', salt[mlen:mlen + 2])[0]
salt = salt[hlen:]
else:
kdf_iterations = ITERATIONS
if kdf_iterations >= 65536:
raise ValueError('kdf_iterations must be <= 65535.')
iv = infile.read(bs)
kdf = PBKDF2(password, salt, kdf_iterations, hashmod)
key = kdf.read(key_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
next_chunk = b''
finished = False
while not finished:
chunk, next_chunk = next_chunk, cipher.decrypt(infile.read(1024 * bs))
if not next_chunk:
padlen = chunk[-1]
if isinstance(padlen, str):
padlen = ord(padlen)
padding = padlen * chr(padlen)
else:
padding = (padlen * chr(chunk[-1])).encode()
if padlen < 1 or padlen > bs:
raise ValueError("bad decrypt pad (%d)" % padlen)
# all the pad-bytes must be the same
if chunk[-padlen:] != padding:
# this is similar to the bad decrypt:evp_enc.c
# from openssl program
raise ValueError("bad decrypt")
chunk = chunk[:-padlen]
finished = True
outfile.write(chunk)
def main(args=None):
ap = argparse.ArgumentParser(description=__doc__.splitlines()[0])
ap.add_argument('-d', '--decrypt', action="store_true",
help="Decrypt input file")
ap.add_argument('-f', '--force', action="store_true",
help="Overwrite output file if it exists")
ap.add_argument('infile', help="Input file")
ap.add_argument('outfile', nargs='?', help="Output file")
args = ap.parse_args(args if args is not None else sys.argv[1:])
if not args.outfile:
if args.decrypt:
args.outfile = splitext(args.infile)[0]
else:
args.outfile = args.infile + '.enc'
if args.outfile == args.infile:
print("Input and output file must not be the same.")
return 1
if exists(args.outfile) and not args.force:
print("Output file '%s' exists. "
"Use option -f to override." % args.outfile)
return 1
with open(args.infile, 'rb') as infile, \
open(args.outfile, 'wb') as outfile:
if args.decrypt:
decrypt(infile, outfile, getpass("Enter decryption password: "))
else:
try:
while True:
passwd = getpass("Enter encryption password: ")
passwd2 = getpass("Verify password: ")
if passwd != passwd2:
print("Password mismatch!")
else:
break
except (EOFError, KeyboardInterrupt):
return 1
encrypt(infile, outfile, passwd)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]) or 0)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapt TF-agents parallel process environments to adversarial env setting.
Note that the environments had to be copied and modified rather than overridden
because of the way parent processes are called for multiprocessing.
Adds two new functions: reset_agent, and step_adversary in addition to usual
RL env functions. Therefore we have the following environment functions:
env.reset(): completely resets the environment and removes anything the
adversary has built.
env.reset_agent(): resets the position of the agent, but does not
remove the obstacles the adversary has created when building the env.
env.step(): steps the agent as before in the environment. i.e. if the agent
passes action 'left' it will move left.
env.step_adversary(): processes an adversary action, which involves choosing
the location of the agent, goal, or an obstacle.
Adds additional functions for logging metrics related to the generated
environments, like the shortest path length to the goal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import sys
import traceback
from absl import logging
import cloudpickle
import gin
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.environments import py_environment
from tf_agents.system import system_multiprocessing
from tf_agents.utils import nest_utils
# Import needed to trigger env registration, so pylint: disable=unused-import
from social_rl import gym_multigrid
# Worker polling period in seconds.
_POLLING_PERIOD = 0.1
@gin.configurable
class AdversarialParallelPyEnvironment(py_environment.PyEnvironment):
"""Batch together environments and simulate them in external processes.
The environments are created in external processes by calling the provided
callables. This can be an environment class, or a function creating the
environment and potentially wrapping it. The returned environment should not
access global variables.
"""
def __init__(self, env_constructors, start_serially=True, blocking=False,
flatten=False):
"""Batch together environments and simulate them in external processes.
The environments can be different but must use the same action and
observation specs.
Args:
env_constructors: List of callables that create environments.
start_serially: Whether to start environments serially or in parallel.
blocking: Whether to step environments one after another.
flatten: Boolean, whether to use flatten action and time_steps during
communication to reduce overhead.
Raises:
ValueError: If the action or observation specs don't match.
"""
super(AdversarialParallelPyEnvironment, self).__init__()
self._envs = [AdversarialProcessPyEnvironment(ctor, flatten=flatten)
for ctor in env_constructors]
self._num_envs = len(env_constructors)
self._blocking = blocking
self._start_serially = start_serially
self.start()
self._action_spec = self._envs[0].action_spec()
self._observation_spec = self._envs[0].observation_spec()
self._time_step_spec = self._envs[0].time_step_spec()
self._parallel_execution = True
if any(env.action_spec() != self._action_spec for env in self._envs):
raise ValueError('All environments must have the same action spec.')
if any(env.time_step_spec() != self._time_step_spec for env in self._envs):
raise ValueError('All environments must have the same time_step_spec.')
self._flatten = flatten
self.adversary_action_spec = self._envs[0].adversary_action_spec
self.adversary_observation_spec = self._envs[0].adversary_observation_spec
self.adversary_time_step_spec = self._envs[0].adversary_time_step_spec
def start(self):
logging.info('Spawning all processes.')
for env in self._envs:
env.start(wait_to_start=self._start_serially)
if not self._start_serially:
logging.info('Waiting for all processes to start.')
for env in self._envs:
env.wait_start()
logging.info('All processes started.')
@property
def batched(self):
return True
@property
def batch_size(self):
return self._num_envs
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
def time_step_spec(self):
return self._time_step_spec
def _reset(self):
"""Reset all environments and combine the resulting observation.
Returns:
Time step with batch dimension.
"""
time_steps = [env.reset(self._blocking) for env in self._envs]
if not self._blocking:
time_steps = [promise() for promise in time_steps]
return self._stack_time_steps(time_steps)
def _step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action, possibly nested, to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
time_steps = [
env.step(action, self._blocking)
for env, action in zip(self._envs, self._unstack_actions(actions))]
# When blocking is False we get promises that need to be called.
if not self._blocking:
time_steps = [promise() for promise in time_steps]
return self._stack_time_steps(time_steps)
def reset_agent(self):
"""Reset all environments and combine the resulting observation.
Returns:
Time step with batch dimension.
"""
time_steps = [env.reset_agent(self._blocking) for env in self._envs]
if not self._blocking:
time_steps = [promise() for promise in time_steps]
self._current_time_step = self._stack_time_steps(time_steps)
return self._current_time_step
def reset_random(self):
"""Reset all environments randomly and combine the resulting observation.
Returns:
Time step with batch dimension.
"""
time_steps = [env.reset_random(self._blocking) for env in self._envs]
if not self._blocking:
time_steps = [promise() for promise in time_steps]
self._current_time_step = self._stack_time_steps(time_steps)
return self._current_time_step
def step_adversary(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action, possibly nested, to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
time_steps = [
env.step_adversary(action, self._blocking)
for env, action in zip(self._envs, self._unstack_actions(actions))]
# When blocking is False we get promises that need to be called.
if not self._blocking:
time_steps = [promise() for promise in time_steps]
return self._stack_time_steps(time_steps)
def get_num_blocks(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].n_clutter_placed, tf.float32))
else:
return tf.stack(
[tf.cast(env.n_clutter_placed, tf.float32) for env in self._envs])
def get_distance_to_goal(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].distance_to_goal, tf.float32))
else:
return tf.stack(
[tf.cast(env.distance_to_goal, tf.float32) for env in self._envs])
def get_deliberate_placement(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].deliberate_agent_placement, tf.float32))
else:
return tf.stack(
[tf.cast(env.deliberate_agent_placement,
tf.float32) for env in self._envs])
def get_goal_x(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].get_goal_x(), tf.float32))
else:
return tf.stack(
[tf.cast(env.get_goal_x(), tf.float32) for env in self._envs])
def get_goal_y(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].get_goal_y(), tf.float32))
else:
return tf.stack(
[tf.cast(env.get_goal_y(), tf.float32) for env in self._envs])
def get_passable(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].passable, tf.float32))
else:
return tf.stack(
[tf.cast(env.passable, tf.float32) for env in self._envs])
def get_shortest_path_length(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(
tf.cast(self._envs[0].shortest_path_length, tf.float32))
else:
return tf.stack(
[tf.cast(env.shortest_path_length, tf.float32) for env in self._envs])
def close(self):
"""Close all external process."""
logging.info('Closing all processes.')
for env in self._envs:
env.close()
logging.info('All processes closed.')
def _stack_time_steps(self, time_steps):
"""Given a list of TimeStep, combine to one with a batch dimension."""
if self._flatten:
return nest_utils.fast_map_structure_flatten(
lambda *arrays: np.stack(arrays), self._time_step_spec, *time_steps)
else:
return nest_utils.fast_map_structure(
lambda *arrays: np.stack(arrays), *time_steps)
def _unstack_actions(self, batched_actions):
"""Returns a list of actions from potentially nested batch of actions."""
flattened_actions = tf.nest.flatten(batched_actions)
if self._flatten:
unstacked_actions = zip(*flattened_actions)
else:
unstacked_actions = [
tf.nest.pack_sequence_as(batched_actions, actions)
for actions in zip(*flattened_actions)
]
return unstacked_actions
def seed(self, seeds):
"""Seeds the parallel environments."""
if len(seeds) != len(self._envs):
raise ValueError(
'Number of seeds should match the number of parallel_envs.')
promises = [env.call('seed', seed) for seed, env in zip(seeds, self._envs)]
# Block until all envs are seeded.
return [promise() for promise in promises]
class AdversarialProcessPyEnvironment(object):
"""Step a single env in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_READY = 1
_ACCESS = 2
_CALL = 3
_RESULT = 4
_EXCEPTION = 5
_CLOSE = 6
def __init__(self, env_constructor, flatten=False):
"""Step environment in a separate process for lock free paralellism.
The environment is created in an external process by calling the provided
callable. This can be an environment class, or a function creating the
environment and potentially wrapping it. The returned environment should
not access global variables.
Args:
env_constructor: Callable that creates and returns a Python environment.
flatten: Boolean, whether to assume flattened actions and time_steps
during communication to avoid overhead.
Attributes:
observation_spec: The cached observation spec of the environment.
action_spec: The cached action spec of the environment.
time_step_spec: The cached time step spec of the environment.
"""
# NOTE(ebrevdo): multiprocessing uses the standard py3 pickler which does
# not support anonymous lambdas. Folks usually pass anonymous lambdas as
# env constructors. Here we work around this by manually pickling
# the constructor using cloudpickle; which supports these. In the
# new process, we'll unpickle this constructor and run it.
self._pickled_env_constructor = cloudpickle.dumps(env_constructor)
self._flatten = flatten
self._observation_spec = None
self._action_spec = None
self._time_step_spec = None
def start(self, wait_to_start=True):
"""Start the process.
Args:
wait_to_start: Whether the call should wait for an env initialization.
"""
mp_context = system_multiprocessing.get_context()
self._conn, conn = mp_context.Pipe()
self._process = mp_context.Process(target=self._worker, args=(conn,))
atexit.register(self.close)
self._process.start()
if wait_to_start:
self.wait_start()
def wait_start(self):
"""Wait for the started process to finish initialization."""
result = self._conn.recv()
if isinstance(result, Exception):
self._conn.close()
self._process.join(5)
raise result
assert result == self._READY, result
def observation_spec(self):
if not self._observation_spec:
self._observation_spec = self.call('observation_spec')()
return self._observation_spec
def action_spec(self):
if not self._action_spec:
self._action_spec = self.call('action_spec')()
return self._action_spec
def time_step_spec(self):
if not self._time_step_spec:
self._time_step_spec = self.call('time_step_spec')()
return self._time_step_spec
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
This method is only called if the attribute is not found in the dictionary
of `ParallelPyEnvironment`'s definition.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
# Accessed by multiprocessing Pickler or this function.
if name.startswith('_'):
return super(AdversarialProcessPyEnvironment, self).__getattribute__(name)
# All other requests get sent to the worker.
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
if self._process.is_alive():
self._process.join(5)
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
time step when blocking, otherwise callable that returns the time step.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def step_adversary(self, action, blocking=True):
promise = self.call('step_adversary', action)
if blocking:
return promise()
else:
return promise
def reset_agent(self, blocking=True):
promise = self.call('reset_agent')
if blocking:
return promise()
else:
return promise
def reset_random(self, blocking=True):
promise = self.call('reset_random')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
self.close()
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, conn):
"""The process waits for actions and sends back environment results.
Args:
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = cloudpickle.loads(self._pickled_env_constructor)()
action_spec = env.action_spec()
conn.send(self._READY) # Ready.
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
if self._flatten and name == 'step':
args = [tf.nest.pack_sequence_as(action_spec, args[0])]
elif self._flatten and name == 'step_adversary':
args = [tf.nest.pack_sequence_as(
env.adversary_action_spec, args[0])]
result = getattr(env, name)(*args, **kwargs)
if self._flatten and name in [
'step', 'reset', 'step_advesary', 'reset_agent', 'reset_random']:
result = tf.nest.flatten(result)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
env.close()
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
etype, evalue, tb = sys.exc_info()
stacktrace = ''.join(traceback.format_exception(etype, evalue, tb))
message = 'Error in environment process: {}'.format(stacktrace)
logging.error(message)
conn.send((self._EXCEPTION, stacktrace))
finally:
conn.close()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment',
_('environment'),
cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['template_data']:
kwargs['template'] = cleaned['template_data']
else:
kwargs['template_url'] = cleaned['template_url']
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
except Exception as e:
raise forms.ValidationError(unicode(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data'],
'template_data': data['template_data'],
'template_url': data['template_url']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta(object):
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta(object):
name = _('Create Stack')
template_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
template_url = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if(kwargs.get('validate_me')):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
self._build_parameter_fields(parameters)
def _build_parameter_fields(self, template_validate):
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, simply sorted to make the order fixed
params_in_order = sorted(params.items())
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': param.get('Description', ''),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = param.get('MinLength', 0) > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput()
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
# heat-api currently returns the boolean type in lowercase
# (see https://bugs.launchpad.net/heat/+bug/1361448)
# so for better compatibility both are checked here
elif param_type in ('Boolean', 'boolean'):
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.success(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
class EditStackForm(CreateStackForm):
class Meta(object):
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
# if the user went directly to this form, resubmit the existing
# template data. otherwise, submit what they had from the first form
if data.get('template_data'):
fields['template'] = data.get('template_data')
elif data.get('template_url'):
fields['template_url'] = data.get('template_url')
elif data.get('parameters'):
fields['template'] = data.get('parameters')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.success(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
|
|
#!/usr/bin/env python
'''
Controls or queries the web components of gpperfmon.
'''
import os
import stat
import sys
import signal
import time
import socket
import subprocess
import shutil
import ConfigParser
import re
import getpass
import psutil
from gppylib.db import dbconn
GPPERFMONHOME=os.getenv('GPPERFMONHOME')
if not GPPERFMONHOME:
sys.exit('ERROR: GPPERFMONHOME environment variable is not set. Please check that you have sourced gpperfmon_path.sh.')
sys.path.append(os.path.join(GPPERFMONHOME, 'lib', 'python'))
try:
from gppylib.gpparseopts import OptParser
from gppylib.gpparseopts import OptChecker
from gppylib.userinput import *
except ImportError, e:
sys.exit('ERROR: Cannot import modules. Please check that you have sourced gpperfmon_path.sh. Detail: ' + str(e))
script_name = os.path.split(__file__)[-1]
script_version = 'main build 29200'
# consts
STOPPED = 0
RUNNING = 1
STRANDED_GPMONWS = 2
STRANDED_LIGHT = 3
ONLY_LIGHT = 4
RESTART_SUCCESS = 1
RESTART_FAILED = 0
LIGHTY_BIN = os.path.join(GPPERFMONHOME, 'bin', 'lighttpd')
OPENSSL_BIN = os.path.join(GPPERFMONHOME, 'bin', 'openssl')
OPENSSL_CNF = os.path.join(GPPERFMONHOME, 'etc', 'openssl.cnf')
# Custom input validators
len_validator = lambda str, ignore1, ignore2: str if len(str) > 0 else None
len_nospace_validator = lambda str, ignore1, ignore2: str if len(str) > 0 and str.find(' ') == -1 else None
_usage = """{ --start | --stop | --restart | --status | --setup | --upgrade} ["instance name"]
"""
_description = ("""
Controls and configures the Greenplum Performance Monitor web server.
""")
_help = ("""
""")
#################
def version():
print '%s version %s' % (script_name, script_version)
print 'lighttpd version: %s' % lighty_version()
#################
def parse_command_line():
parser = OptParser(option_class=OptChecker,
description=' '.join(_description.split()))
parser.setHelp(_help)
parser.set_usage('%prog ' + _usage)
parser.remove_option('-h')
parser.add_option('--start', action='store_true',
help='Start the Greenplum Performance Monitor web server.')
parser.add_option('--stop', action='store_true',
help='Stop the Greenplum Performance Monitor web server.')
parser.add_option('--restart', action='store_true',
help='Restart the Greenplum Performance Monitor web server.')
parser.add_option('--status', action='store_true',
help='Display the status of the Gerrnplum Performance Monitor web server.')
parser.add_option('--setup', action='store_true',
help='Setup the Greenplum Performance Monitor web server.')
parser.add_option('--version', action='store_true',
help='Display version information')
parser.add_option('--upgrade', action='store_true',
help='Upgrade a previous installation of the Greenplum Performance Monitors web UI')
parser.set_defaults(verbose=False,filters=[], slice=(None, None))
# Parse the command line arguments
(options, args) = parser.parse_args()
if options.version:
version()
sys.exit(0)
# check for too many options
opt_count = 0
if options.start:
opt_count+=1
if options.stop:
opt_count+=1
if options.setup:
opt_count+=1
if options.upgrade:
opt_count+=1
if options.status:
opt_count+=1
if opt_count > 1:
parser.print_help()
parser.exit()
return options, args
#################
def start_application(cmd):
pid = os.fork()
if not pid:
os.execve('/bin/sh', ['sh', '-c', cmd], os.environ)
#################
def get_instance_info(instance):
instance_info = {}
instance_info['name'] = instance
instance_info['root_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance)
instance_info['sessions_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'sessions')
instance_info['temp_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'tmp')
instance_info['log_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'logs')
instance_info['ui_conf_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'conf')
instance_info['ui_conf_file'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'conf', 'gpperfmonui.conf')
instance_info['lighttpd_conf_file'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'conf', 'lighttpd.conf')
instance_info['lighttpd_pid_file'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'lighttpd.pid')
instance_info['web_root_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'web')
instance_info['web_lib_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'web', 'lib')
instance_info['web_static_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'web', 'static')
instance_info['web_templates_dir'] = os.path.join(GPPERFMONHOME, 'instances', instance, 'web', 'templates')
return instance_info
#################
def lighty_version():
ver_string = 'Unknown'
try:
FILE = os.popen('%s -v' % LIGHTY_BIN)
if FILE:
ver_string = FILE.readline().split(' ')[0]
FILE.close()
except:
pass
return ver_string
#################
def lighty_start(instance):
res = 0
cfg = ConfigParser.SafeConfigParser()
pgpass = dbconn.Pgpass()
if not pgpass.pgpass_valid():
print 'Error: .pgpass file not valid. Unable to start instance'
return 0
instance_info = get_instance_info(instance)
if lighty_status(instance) == STOPPED:
# Read in the config so we can set env for remote connections if needed
try:
# parse configuration file
cfg.readfp(open(instance_info['ui_conf_file']))
except:
print 'Error loading configuration file %s for instance %s' % (instance_info['ui_conf_file'], instance)
return 0
# save off original values
old_pghost = os.getenv('PGHOST', '')
old_pgport = os.getenv('PGPORT', '')
old_pgpassword = os.getenv('PGPASSWORD', '')
# see if there are values in conf file
master_hostname = ''
master_port = ''
###########################
try:
master_hostname = cfg.get('WEB APP', 'master_hostname')
except:
pass
if master_hostname == '':
if old_pghost:
master_hostname = old_pghost
else:
master_hostname = '127.0.0.1'
os.environ['PGHOST'] = master_hostname
###########################
###########################
try:
master_port = cfg.get('WEB APP', 'master_port')
except:
pass
if master_port == '':
if old_pgport:
master_port = old_pgport
else:
master_port = '5432'
os.environ['PGPORT'] = master_port
###########################
password = pgpass.get_password('gpmon', master_hostname, master_port, 'gpperfmon')
if not password:
password = old_pgpassword
os.environ['PGPASSWORD'] = password
start_application('%s -f "%s" -m "%s"' % (LIGHTY_BIN, instance_info['lighttpd_conf_file'], os.path.join(GPPERFMONHOME, 'lib')))
time.sleep(1)
# restore the original values
os.environ['PGHOST'] = old_pghost
os.environ['PGPORT'] = old_pgport
os.environ['PGPASSWORD'] = old_pgpassword
return lighty_status(instance)
#################
def lighty_stop(instance):
light_bin = "bin/lighttpd"
instance_conf = "instances/%s/conf/lighttpd.conf" % instance
instance_info = get_instance_info(instance)
status = lighty_status(instance)
if status == RUNNING or status == STRANDED_LIGHT or status == ONLY_LIGHT:
try:
for process in psutil.process_iter():
try:
process_args = process.cmdline()
except:
continue
# we do not have privalege to view/kill this process, so skip it
if len(process_args) < 3:
continue
if not re.search(light_bin, process_args[0]):
continue
if not re.search(instance_conf, process_args[2]):
continue
print "killing pid %d" % process.pid
os.kill(process.pid, signal.SIGTERM)
# lighty shuts down quickly, but give it a little bit of time.
time.sleep(1)
except:
return lighty_status()
# clean up session info
try:
for dir in os.listdir(instance_info['sessions_dir']):
shutil.rmtree(os.path.join(instance_info['sessions_dir'], dir))
except Exception, msg:
pass
# clean up lighttpd's folder used for compression
try:
shutil.rmtree(os.path.join(instance_info['temp_dir'], 'lighttpd'))
except:
pass
return lighty_status(instance)
#################
def kill_orphaned_cgi_scripts():
"Search for any gpmonws.py that are stranded and do not belong to lighttpd"
for process in psutil.process_iter():
try:
process_args = process.cmdline()
except:
continue
# we do not have privalege to view/kill this process, so skip it
if len(process_args) >= 2:
if process_args[0] == 'python':
if re.search("gpmonws.py", process_args[1]):
if process.ppid() == 1:
print "Killing stranded gpmonws.py process with pid %d" % process.pid
os.kill(process.pid, signal.SIGTERM)
#################
def lighty_restart(instance):
if lighty_status(instance) == RUNNING:
status = lighty_stop(instance)
if status != STOPPED:
print 'Failed to stop gpperfmon instance %s during restart' % instance
return RESTART_FAILED
status = lighty_start(instance)
if status != RUNNING:
return RESTART_FAILED
return RESTART_SUCCESS
#################
def lighty_status(instance):
foundLighthttp = False
foundGpmonws = False
foundStrandedLight = False
foundStrandedPython = False
instance_conf = "instances/%s/conf/lighttpd.conf" % instance
instance_mon = "instances/%s/web/gpmonws.py" % instance
instance_info = get_instance_info(instance)
light_bin = "bin/lighttpd"
lightpid = 0
try:
FILE = open(instance_info['lighttpd_pid_file'], 'r')
lightpid = int(FILE.readline())
FILE.close()
except:
pass
try:
for process in psutil.process_iter():
try:
process_args = process.cmdline()
except:
continue
# we do not have privalege to view/kill this process, so skip it
if len(process_args) < 1:
continue
# lighttpd process
if re.search(light_bin, process_args[0]):
if lightpid != 0 and process.pid == lightpid:
foundLighthttp = True
elif len(process_args) >= 3:
if re.search(instance_conf, process_args[2]):
foundStrandedLight = True
# gpmonws.py process
elif re.search("python", process_args[0]):
if len(process_args) < 2:
continue
if re.search(instance_mon, process_args[1]):
if lightpid != 0 and process.ppid() == lightpid:
foundGpmonws = True
else:
foundStrandedPython = True
except:
pass
if foundStrandedLight:
return STRANDED_LIGHT
if foundStrandedPython:
return STRANDED_GPMONWS
if foundLighthttp and foundGpmonws:
return RUNNING
elif foundLighthttp:
return ONLY_LIGHT
else:
return STOPPED
#################
def webui_setup():
help = """An instance name is used by the Greenplum Performance monitor as
a way to uniquely identify a Greenplum Database that has the monitoring
components installed and configured. This name is also used to control
specific instances of the Greenplum Performance monitors web UI. Instance
names cannot contain spaces."""
instance_name = ask_input(help, 'Please enter a new instance name. Entering an existing\n'
'instance name will reconfigure that instance', '',
'default', len_nospace_validator, None)
instance_info = get_instance_info(instance_name)
reconfigure = os.path.exists(instance_info['root_dir'])
# defaults for webapi
server_name_default = '[server name to display]'
allow_trust_logon_default = '[no|yes] - setting to yes is insecure and only for testing'
config = ConfigParser.ConfigParser()
config.add_section('WEB APP')
config.set('WEB APP', '#allow_trust_logon', allow_trust_logon_default)
help = """The web component of the Greenplum Performance Monitor can connect to a
monitor database on a remote Greenplum Database."""
yn = ask_yesno(help, '\nIs the master host for the Greenplum Database remote?', 'N')
if yn:
config.set('WEB APP', 'remote', True)
master_hostname = ask_input(None, 'What is the hostname of the master', '', '', len_validator, None)
config.set('WEB APP', 'master_hostname', master_hostname)
config.set('WEB APP', 'server_name', master_hostname)
else:
help = """The display name is shown in the web interface and does not need to be
a hostname.
"""
display_name = ask_input(help, 'What would you like to use for the display name for this instance',
'', 'Greenplum Database', len_validator, None)
config.set('WEB APP', 'server_name', display_name)
pgport = int(os.getenv('PGPORT', 5432))
master_port = ask_int(None, 'What port does the Greenplum Database use?', '', pgport, 1, 65535)
config.set('WEB APP', 'master_port', master_port)
setup_instance_directory(instance_name)
enable_ssl = 'disable'
use_existing_cert = False
#TODO: check available ports?
lighty_port = 28080
#lighty conf
lighty_access_log = os.path.join(instance_info['log_dir'], 'lighttpd-access.log')
lighty_error_log = os.path.join(instance_info['log_dir'], 'lighttpd-error.log')
ipv6accesslog = os.path.join(instance_info['log_dir'], 'lighttpd-access-ipv6.log')
ipv6errorlog = os.path.join(instance_info['log_dir'], 'lighttpd-error-ipv6.log')
help = """The Greenplum Performance Monitor runs a small web server for the UI and web API.
This web server by default runs on port 28080, but you may specify any available port."""
lighty_port = ask_int(help, 'What port would you like the web server to use for this instance?', '', 28080, 1, 65534)
ssl_cert = ''
help = """Users logging in to the Performance Monitor must provide database user
credentials. In order to protect user names and passwords, it is recommended
that SSL be enabled."""
yn = ask_yesno(help, '\nDo you want to enable SSL for the Web API', 'Y')
if yn:
enable_ssl = 'enable'
if os.path.exists(os.path.join(instance_info['ui_conf_dir'], 'cert.pem')):
use_existing_cert = ask_yesno(None, '\nFound an existing SSL certificate. Do you want to use the existing certificate?', 'Y')
if enable_ssl == 'enable':
if not use_existing_cert:
ret = generate_cert(os.path.join(instance_info['ui_conf_dir'], 'cert.pem'))
if not ret:
print 'Failed to generate SSL certificate. SSL will be disabled.'
enable_ssl = 'disable'
ssl_cert = os.path.join(instance_info['ui_conf_dir'], 'cert.pem')
yn = ask_yesno(None, '\nDo you want to enable ipV6 for the Web API', 'N')
if yn:
useipv6 = True
else:
useipv6 = False
lighty_conf = generate_lighty_conf(instance_info, lighty_port, lighty_access_log, lighty_error_log,
os.path.join(instance_info['web_root_dir'], 'gpmonws.py'),
enable_ssl, ssl_cert, useipv6, ipv6accesslog, ipv6errorlog)
try:
FILE = open(instance_info['lighttpd_conf_file'], 'w')
FILE.writelines(lighty_conf)
FILE.close()
print '\nDone writing lighttpd configuration to %s' % instance_info['lighttpd_conf_file']
except (errno, errstr):
print 'Error: Failed to write lighttpd configuration file to %s' % instance_info['lighttpd_conf_file']
print errstr
sys.exit(1)
try:
FILE = open(instance_info['ui_conf_file'], 'w')
config.write(FILE)
FILE.close()
print 'Done writing web UI configuration to %s' % instance_info['ui_conf_file']
except (errno, errstr):
print 'Error: Failed to write gpperfmon configuration to %s' % instance_info['ui_conf_file']
print errstr
sys.exit(1)
webui_url = ''
if enable_ssl == 'enable':
webui_url = 'https://'
else:
webui_url = 'http://'
webui_url = webui_url + socket.gethostname() + ':' + str(lighty_port) + '/'
print '\nGreenplum Performance Monitor UI configuration is now complete. If'
print 'at a later date you want to change certain parameters, you can '
print 'either re-run \'gpperfmon --setup\' or edit the configuration file'
print 'located at ' + instance_info['ui_conf_file'] + '.'
if not reconfigure:
print '\nThe web UI for this instance is available at %s' % webui_url
print '\nYou can now start the web UI for this instance by running: gpperfmon --start ' + instance_info['name']
else:
print '\nRestarting web UI instance %s...' % instance_info['name']
status = lighty_restart(instance_info['name'])
if status == RESTART_SUCCESS:
print 'Done.'
print 'The web UI for this instance is available at %s' % webui_url
else:
print '\nThere was an error restarting web UI instance %s...' % instance_info['name']
#################
def setup_instance_directory(instance_name):
instance_info = get_instance_info(instance_name)
try:
os.mkdir(instance_info['root_dir'])
except OSError:
pass #dir exists
try:
os.mkdir(instance_info['sessions_dir'])
except OSError:
pass #dir exists
try:
os.mkdir(instance_info['temp_dir'])
except OSError:
pass #dir exists
try:
os.mkdir(instance_info['log_dir'])
except OSError:
pass #dir exists
try:
os.mkdir(instance_info['ui_conf_dir'])
except OSError:
pass #dir exists
try:
os.mkdir(instance_info['web_root_dir'])
except OSError:
pass #dir exists
try:
if os.path.islink(instance_info['web_lib_dir']):
os.unlink(instance_info['web_lib_dir'])
os.symlink(os.path.join(GPPERFMONHOME, 'www', 'lib'), instance_info['web_lib_dir'])
except OSError, detail:
print 'Error linking www/lib directory: %s' % detail
sys.exit(1)
try:
if os.path.islink(instance_info['web_static_dir']):
os.unlink(instance_info['web_static_dir'])
os.symlink(os.path.join(GPPERFMONHOME, 'www', 'static'), instance_info['web_static_dir'])
except OSError, detail:
print 'Error linking www/static directory: %s' % detail
sys.exit(1)
try:
if os.path.islink(instance_info['web_templates_dir']):
os.unlink(instance_info['web_templates_dir'])
os.symlink(os.path.join(GPPERFMONHOME, 'www', 'templates'), instance_info['web_templates_dir'])
except OSError, detail:
print 'Error linking www/templates directory: %s' % detail
sys.exit(1)
try:
if os.path.islink(os.path.join(instance_info['web_root_dir'], 'gpmonws.py')):
os.unlink(os.path.join(instance_info['web_root_dir'], 'gpmonws.py'))
os.symlink(os.path.join(GPPERFMONHOME,'www', 'gpmonws.py'), os.path.join(instance_info['web_root_dir'], 'gpmonws.py'))
except OSError, detail:
print 'Error linking www/gpmonws.py: %s' % detail
sys.exit(1)
#################
def generate_lighty_conf(instance_info, port, lighty_access_log,
lighty_err_log, webpybin, usessl, certpath, useipv6, ipv6accesslog, ipv6errorlog):
# TODO: by instance
fcgisocket = os.path.join(instance_info['root_dir'], 'perfmon.fastcgi.socket')
fileString = '''server.modules = (
"mod_rewrite",
"mod_fastcgi",
"mod_compress",
"mod_accesslog" )
server.document-root = "%s"
server.pid-file = "%s"
server.errorlog = "%s"
mimetype.assign = (
".pdf" => "application/pdf",
".sig" => "application/pgp-signature",
".spl" => "application/futuresplash",
".class" => "application/octet-stream",
".ps" => "application/postscript",
".torrent" => "application/x-bittorrent",
".dvi" => "application/x-dvi",
".gz" => "application/x-gzip",
".pac" => "application/x-ns-proxy-autoconfig",
".swf" => "application/x-shockwave-flash",
".tar.gz" => "application/x-tgz",
".tgz" => "application/x-tgz",
".tar" => "application/x-tar",
".zip" => "application/zip",
".mp3" => "audio/mpeg",
".m3u" => "audio/x-mpegurl",
".wma" => "audio/x-ms-wma",
".wax" => "audio/x-ms-wax",
".ogg" => "application/ogg",
".wav" => "audio/x-wav",
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".xbm" => "image/x-xbitmap",
".xpm" => "image/x-xpixmap",
".xwd" => "image/x-xwindowdump",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".js" => "text/javascript",
".asc" => "text/plain",
".c" => "text/plain",
".cpp" => "text/plain",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".mpeg" => "video/mpeg",
".mpg" => "video/mpeg",
".mov" => "video/quicktime",
".qt" => "video/quicktime",
".avi" => "video/x-msvideo",
".asf" => "video/x-ms-asf",
".asx" => "video/x-ms-asf",
".wmv" => "video/x-ms-wmv",
".bz2" => "application/x-bzip",
".tbz" => "application/x-bzip-compressed-tar",
".tar.bz2" => "application/x-bzip-compressed-tar"
)
accesslog.filename = "%s"
$HTTP["url"] =~ "\.pdf$" {
server.range-requests = "disable"
}
static-file.exclude-extensions = ( ".php", ".pl", ".fcgi", ".py" )
server.port = %d
compress.cache-dir = "%s"
compress.filetype = ("text/plain", "text/html", "text/xml")
fastcgi.server = ( "/gpmonws.py" =>
((
"socket" => "%s",
"bin-path" => "%s",
"max-procs" => 1,
"bin-environment" => (
"REAL_SCRIPT_NAME" => ""
),
"check-local" => "disable"
))
)
url.rewrite-once = (
"^/favicon.ico$" => "/static/favicon.ico",
"^/static/(.*)$" => "/static/$1",
"^/(.*)$" => "/gpmonws.py/$1",
)
''' % (instance_info['web_root_dir'],
instance_info['lighttpd_pid_file'],
lighty_err_log, lighty_access_log,
port, instance_info['temp_dir'],
fcgisocket, webpybin)
if useipv6:
fileString += '''
$SERVER["socket"] == "[::]:%d" {
accesslog.filename = "%s"
server.errorlog = "%s"
}
''' % (port, ipv6accesslog, ipv6errorlog)
if usessl == 'enable':
fileString += '''
#### SSL engine
ssl.engine = "%s"
ssl.pemfile = "%s"
''' % (usessl, certpath)
return fileString
#################
def webui_upgrade():
master_data_directory = os.getenv('MASTER_DATA_DIRECTORY')
if not master_data_directory:
print 'Error - MASTER_DATA_DIRECTORY environment variable not set.'
sys.exit(1)
if not os.path.exists(os.path.join(master_data_directory, 'gpperfmon')):
print 'Error - Unable to locate a previously installed version of gpperfmon.'
print ' The gpperfmon directory does not exist in the MASTER_DATA_DIRECTORY'
sys.exit(1)
gpperfmon_loc = os.path.dirname(os.path.abspath(os.path.join(sys.argv[0], '..')))
if gpperfmon_loc != GPPERFMONHOME:
print 'Error - GPPERFMONHOME does not match the location of the new version'
print ' Make sure that gpperfmon_path.sh from the new installation'
print ' has been sourced correctly and try running the upgrade again.'
sys.exit(1)
# setup a new instance
setup_instance_directory('default')
instance_info = get_instance_info('default')
# copy needed file from previous installation
gpperfmon_dir = os.path.join(master_data_directory, 'gpperfmon')
try:
shutil.rmtree(instance_info['ui_conf_dir'])
shutil.copytree(os.path.join(gpperfmon_dir, 'conf'), instance_info['ui_conf_dir'])
except Exception, msg:
print 'Error - Failed to copy configuration directory from previous installation:'
print ' %s' % msg
sys.exit(1)
# fix up possible path problems and update file
sed_cmd = 'sed "s/\/\/*/\//g" %s | sed "s/%s/%s/g" | sed "s/\/tmp.*fastcgi.socket/%s/g" > %s' % (
instance_info['lighttpd_conf_file'],
gpperfmon_dir.replace('/', '\/'),
instance_info['root_dir'].replace('/', '\/'),
os.path.join(instance_info['root_dir'], 'perfmon.fastcgi.socket').replace('/', '\/'),
instance_info['lighttpd_conf_file'] + '.new')
ret = os.system(sed_cmd)
if ret:
print 'Error - There was an error updating the lighttpd configuration file.'
sys.exit(1)
shutil.move(os.path.join(instance_info['ui_conf_dir'], 'lighttpd.conf.new'), instance_info['lighttpd_conf_file'])
# in this version the conf file name has changed.
shutil.move(os.path.join(instance_info['ui_conf_dir'], 'gpperfmon.conf'), instance_info['ui_conf_file'])
# cleanup unneeded files from the MASTER_DATA_DIRECTORY
try:
os.unlink(os.path.join(master_data_directory, 'gpperfmon', 'conf', 'lighttpd.conf'))
except Exception, msg:
print 'Warning - Error while trying to delete %s' % os.path.join(master_data_directory, 'gpperfmon', 'conf', 'lighttpd.conf')
print msg
try:
os.unlink(os.path.join(master_data_directory, 'gpperfmon', 'conf', 'cert.pem'))
except Exception, msg:
print 'Warning - could not delete %s' % os.path.join(master_data_directory, 'gpperfmon', 'conf', 'cert.pem')
print msg
print 'Upgrade finished. The Greenplum Performance Monitor web UI can be started'
print 'by running gpperfmon \'--start\'.'
#################
def generate_cert(destfile):
cert_gen_cmd = '%s req -config %s -new -x509 -keyout %s -out %s -days 3650 -nodes' % (OPENSSL_BIN, OPENSSL_CNF, destfile, destfile)
res = os.system(cert_gen_cmd)
return (res == 0)
#################
def get_instances():
instances = []
instance_dir = os.path.join(GPPERFMONHOME, 'instances')
for item in os.listdir(instance_dir):
if os.path.isdir(os.path.join(instance_dir, item)):
instance_info = get_instance_info(item)
if os.path.isfile(instance_info['ui_conf_file']):
instances.append(item)
return instances
#################
# script begin
currentUser = getpass.getuser()
if currentUser == "root":
print "This utility can not be run as 'root'"
sys.exit(1)
# read in instances that have been configured
valid_instances = get_instances()
# parse the command line
options, instances = parse_command_line()
# if we weren't given a specific instance, do the op on all of them.
if not instances:
instances = valid_instances
# validate the instance names given
for instance in instances:
if instance not in valid_instances:
print 'Error: %s is an invalid instance name' % instance
sys.exit(1)
try:
if options.start:
# look for stranded gpmonws.py processes
kill_orphaned_cgi_scripts()
for instance in instances:
print 'Starting instance %s...' % instance,
sys.stdout.flush()
result = lighty_start(instance)
if result == RUNNING:
print 'Done.'
else:
print 'Failed to start gpperfmon instance %s' % instance
elif options.stop:
# look for stranded gpmonws.py processes
kill_orphaned_cgi_scripts()
for instance in instances:
print 'Stopping instance %s...' % instance,
sys.stdout.flush()
result = lighty_stop(instance)
if result == STOPPED:
print 'Done.'
else:
print 'Failed to stop gpperfmon instance %s' % instance
elif options.restart:
# look for stranded gpmonws.py processes
kill_orphaned_cgi_scripts()
for instance in instances:
print 'Restarting instance %s...' % instance,
sys.stdout.flush()
status = lighty_restart(instance)
if status == RESTART_SUCCESS:
print 'Done.'
else:
print 'There was an error restarting instance %s.' % instance
# look for stranded gpmonws.py processes
kill_orphaned_cgi_scripts()
elif options.status:
for instance in instances:
status = lighty_status(instance)
if status == RUNNING:
print 'Greenplum Performance Monitor UI for instance \'%s\' - [RUNNING]' % instance
elif status == STRANDED_GPMONWS:
print 'Greenplum Performance Monitor UI for instance \'%s\' - [INCONSISTENT: Stranded gpmonws.py process found]' % instance
elif status == STRANDED_LIGHT:
print 'Greenplum Performance Monitor UI for instance \'%s\' - [INCONSISTENT: Stranded lighttpd process found]' % instance
elif status == ONLY_LIGHT:
print 'Greenplum Performance Monitor UI for instance \'%s\' - [INCONSISTENT: gpmonws.py not found]' % instance
else:
print 'Greenplum Performance Monitor UI for instance \'%s\' - [STOPPED]' % instance
elif options.setup:
webui_setup()
elif options.upgrade:
webui_upgrade()
except KeyboardInterrupt:
sys.exit('User canceled')
|
|
#!/usr/bin/python3
'''Manages virtual machines.'''
import argparse
import getpass
import json
import logging
import os.path
import shlex
import subprocess
import sys
from typing import Any, List, Mapping, Optional
def _run(args: List[str],
check: bool = True) -> 'subprocess.CompletedProcess[str]':
'''A small subprocess.run wrapper with logging.'''
logging.debug('Running %s', ' '.join(shlex.quote(arg) for arg in args))
result = subprocess.run(['azure'] + args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
universal_newlines=True,
shell=False,
check=check)
logging.debug('Result %d: %s', result.returncode, result.stdout)
return result
class Azure:
'''Abstracts away the Azure CLI API.'''
def __init__(self, subscription: str, resource_group: str, location: str):
self._subscription = subscription
self._resource_group = resource_group
self._location = location
def _nsg_name(self) -> str:
'''Returns the network security group name.'''
return '%s-%s-nsg' % (self._resource_group, self._location)
def _vnet_name(self) -> str:
'''Returns the virtual network name.'''
return '%s-%s-vnet' % (self._resource_group, self._location)
def _nic_name(self, vm_name: str) -> str:
'''Returns the network interface card name.'''
# pylint: disable=no-self-use
return '%s-nic' % vm_name
def network_nsg_show(self) -> Optional[Mapping[str, Any]]:
'''Returns the network security group information.'''
result = _run([
'network', 'nsg', 'show', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--name',
self._nsg_name()
])
if result.returncode != 0:
return None
show_result: Mapping[str, Any] = json.loads(result.stdout)
return show_result
def network_nsg_create(self) -> Mapping[str, Any]:
'''Creates a network security group.'''
result: Mapping[str, Any] = json.loads(
_run([
'network', 'nsg', 'create', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--location', self._location, '--name',
self._nsg_name()
]).stdout)
return result
def network_nsg_rule_create(self, protocol: str, port: int,
priority: int) -> Any:
'''Creates a network security group rule.'''
return json.loads(
_run([
'network', 'nsg', 'rule', 'create', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--nsg-name',
self._nsg_name(), '--name',
'allow-%s-port-%d' % (protocol, port), '--protocol', protocol,
'--destination-port-range',
str(port), '--priority',
str(priority)
]).stdout)
def network_vnet_show(self) -> Optional[Mapping[str, Any]]:
'''Returns the virtual network information.'''
result = _run([
'network', 'vnet', 'show', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--name',
self._vnet_name()
])
if result.returncode != 0:
return None
show_result: Mapping[str, Any] = json.loads(result.stdout)
return show_result
def network_vnet_create(self) -> Mapping[str, Any]:
'''Creates a virtual network.'''
result: Mapping[str, Any] = json.loads(
_run([
'network', 'vnet', 'create', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--location', self._location, '--name',
self._vnet_name()
]).stdout)
return result
def network_vnet_subnet_create(self) -> Any:
'''Creates a virtual network subnet.'''
return json.loads(
_run([
'network', 'vnet', 'subnet', 'create', '--json',
'--subscription', self._subscription, '--resource-group',
self._resource_group, '--vnet-name',
self._vnet_name(), '--name', 'default', '--address-prefix',
'10.0.0.0/24'
]).stdout)
def network_nic_show(self, vm_name: str) -> Optional[Mapping[str, Any]]:
'''Returns the network interface card information.'''
result: Optional[Mapping[str, Any]] = json.loads(
_run([
'network', 'nic', 'show', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--name',
'%s-nic' % vm_name
]).stdout)
return result
def network_nic_create(self, vm_name: str) -> Any:
'''Creates a network interface card.'''
return json.loads(
_run([
'network', 'nic', 'create', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--location', self._location, '--name',
self._nic_name(vm_name), '--subnet-vnet-name',
self._vnet_name(), '--subnet-name', 'default',
'--network-security-group-name',
self._nsg_name()
]).stdout)
def vm_list(self) -> List[Mapping[str, Any]]:
'''Lists the virtual machines.'''
result: List[Mapping[str, Any]] = json.loads(
_run(
['vm', 'list', '--json', '--subscription',
self._subscription]).stdout)
return result
def vm_show(self, vm_name: str) -> Optional[Mapping[str, Any]]:
'''Returns the virtual machine information.'''
result: Optional[Mapping[str, Any]] = json.loads(
_run([
'vm', 'show', '--json', '--subscription', self._subscription,
'--resource-group', self._resource_group, '--name', vm_name
]).stdout)
return result
# pylint: disable=too-many-arguments
def vm_create(self,
vm_name: str,
admin_username: str,
ssh_publickey_file: str,
os_type: str = 'Linux',
image_urn: str = 'Canonical:UbuntuServer:16.04-LTS:latest',
vm_size: str = 'Standard_A1_v2') -> None:
'''Createa a virtual machine.'''
_run(
[
'vm', 'create', '--json', '--subscription', self._subscription,
'--resource-group', self._resource_group, '--location',
self._location, '--name', vm_name, '--admin-username',
admin_username, '--ssh-publickey-file', ssh_publickey_file,
'--nic-name',
self._nic_name(vm_name), '--public-ip-name', vm_name,
'--public-ip-domain-name', vm_name, '--os-type', os_type,
'--image-urn', image_urn, '--vm-size', vm_size
],
check=True,
)
def vm_destroy(self, vm_name: str) -> None:
'''Destroys a virtual machine.'''
_run(
[
'vm', 'destroy', '--json', '--subscription',
self._subscription, '--resource-group', self._resource_group,
'--location', self._location, '--name', vm_name
],
check=True,
)
def _deploy(azure: Azure, args: argparse.Namespace) -> None:
deploy_runner_args = [
os.path.join(os.path.dirname(sys.argv[0]), 'deploy_runner.py')
]
if args.verbose:
deploy_runner_args.append('--verbose')
runner_hostname = '%s.%s.cloudapp.azure.com' % (args.vm_name,
args.location)
vm = azure.vm_show(args.vm_name)
if not vm:
nsg = azure.network_nsg_show()
if not nsg:
nsg = azure.network_nsg_create()
missing_ports = set(args.ports)
for rule in nsg['securityRules']:
missing_ports.remove(
'%s:%s:%s' % (rule['protocol'].lower(),
rule['destinationPortRange'], rule['priority']))
for port in missing_ports:
protocol, port, priority = port.split(':')
azure.network_nsg_rule_create(protocol, int(port), int(priority))
vnet = azure.network_vnet_show()
if not vnet:
vnet = azure.network_vnet_create()
if not vnet['subnets']:
azure.network_vnet_subnet_create()
nic = azure.network_nic_show(args.vm_name)
if not nic:
azure.network_nic_create(args.vm_name)
azure.vm_create(args.vm_name, args.username, args.pubkey_file)
# Remove any old SSH keys associated with that hostname.
subprocess.check_call([
'/usr/bin/ssh-keygen', '-f',
os.path.expanduser('~/.ssh/known_hosts'), '-R', runner_hostname
])
# And accept the new SSH key.
subprocess.check_call([
'/usr/bin/ssh', '-o', 'StrictHostKeyChecking=no', runner_hostname,
'/bin/true'
])
deploy_runner_args.append('--upgrade')
deploy_runner_args.extend(['--certroot', args.certroot, runner_hostname])
subprocess.check_call(deploy_runner_args)
def _destroy(azure: Azure, args: argparse.Namespace) -> None:
vm = azure.vm_show(args.vm_name)
if vm:
azure.vm_destroy(args.vm_name)
def main() -> None:
'''Main entrypoint.'''
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--subscription', required=True)
parser.add_argument('--resource-group', default='omegaup-v2-runner')
subparsers = parser.add_subparsers(dest='command')
deploy = subparsers.add_parser('deploy')
deploy.add_argument('--username', default=getpass.getuser())
deploy.add_argument('--port',
dest='ports',
metavar='PORT',
nargs='+',
default=['tcp:22:1000', 'tcp:6060:1010'])
deploy.add_argument('--pubkey-file',
default=os.path.expanduser('~/.ssh/azure.pub'))
deploy.add_argument('--certroot', required=True)
deploy.add_argument('location')
deploy.add_argument('vm_name', metavar='vm-name')
destroy = subparsers.add_parser('destroy')
destroy.add_argument('location')
destroy.add_argument('vm_name', metavar='vm-name')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
azure = Azure(args.subscription, args.resource_group, args.location)
if args.command == 'deploy':
_deploy(azure, args)
elif args.command == 'destroy':
_destroy(azure, args)
if __name__ == '__main__':
main()
# vim: expandtab shiftwidth=4 tabstop=4
|
|
"""Support for LIFX lights."""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import math
import aiolifx as aiolifx_module
import aiolifx_effects as aiolifx_effects_module
import voluptuous as vol
from homeassistant import util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_GROUP,
DOMAIN,
LIGHT_TURN_ON_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
VALID_BRIGHTNESS,
VALID_BRIGHTNESS_PCT,
LightEntity,
preprocess_turn_on_alternatives,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.color as color_util
from . import (
CONF_BROADCAST,
CONF_PORT,
CONF_SERVER,
DATA_LIFX_MANAGER,
DOMAIN as LIFX_DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISCOVERY_INTERVAL = 60
MESSAGE_TIMEOUT = 1.0
MESSAGE_RETRIES = 8
UNAVAILABLE_GRACE = 90
SERVICE_LIFX_SET_STATE = "set_state"
ATTR_INFRARED = "infrared"
ATTR_ZONES = "zones"
ATTR_POWER = "power"
LIFX_SET_STATE_SCHEMA = cv.make_entity_service_schema(
{
**LIGHT_TURN_ON_SCHEMA,
ATTR_INFRARED: vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255)),
ATTR_ZONES: vol.All(cv.ensure_list, [cv.positive_int]),
ATTR_POWER: cv.boolean,
}
)
SERVICE_EFFECT_PULSE = "effect_pulse"
SERVICE_EFFECT_COLORLOOP = "effect_colorloop"
SERVICE_EFFECT_STOP = "effect_stop"
ATTR_POWER_ON = "power_on"
ATTR_PERIOD = "period"
ATTR_CYCLES = "cycles"
ATTR_SPREAD = "spread"
ATTR_CHANGE = "change"
PULSE_MODE_BLINK = "blink"
PULSE_MODE_BREATHE = "breathe"
PULSE_MODE_PING = "ping"
PULSE_MODE_STROBE = "strobe"
PULSE_MODE_SOLID = "solid"
PULSE_MODES = [
PULSE_MODE_BLINK,
PULSE_MODE_BREATHE,
PULSE_MODE_PING,
PULSE_MODE_STROBE,
PULSE_MODE_SOLID,
]
LIFX_EFFECT_SCHEMA = {
vol.Optional(ATTR_POWER_ON, default=True): cv.boolean,
}
LIFX_EFFECT_PULSE_SCHEMA = cv.make_entity_service_schema(
{
**LIFX_EFFECT_SCHEMA,
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.small_float, cv.small_float)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): cv.positive_int,
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Range(min=0.05)),
ATTR_CYCLES: vol.All(vol.Coerce(float), vol.Range(min=1)),
ATTR_MODE: vol.In(PULSE_MODES),
}
)
LIFX_EFFECT_COLORLOOP_SCHEMA = cv.make_entity_service_schema(
{
**LIFX_EFFECT_SCHEMA,
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Clamp(min=0.05)),
ATTR_CHANGE: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_SPREAD: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_TRANSITION: cv.positive_float,
}
)
LIFX_EFFECT_STOP_SCHEMA = cv.make_entity_service_schema({})
def aiolifx():
"""Return the aiolifx module."""
return aiolifx_module
def aiolifx_effects():
"""Return the aiolifx_effects module."""
return aiolifx_effects_module
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the LIFX light platform. Obsolete."""
_LOGGER.warning("LIFX no longer works with light platform configuration")
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up LIFX from a config entry."""
# Priority 1: manual config
interfaces = hass.data[LIFX_DOMAIN].get(DOMAIN)
if not interfaces:
# Priority 2: scanned interfaces
lifx_ip_addresses = await aiolifx().LifxScan(hass.loop).scan()
interfaces = [{CONF_SERVER: ip} for ip in lifx_ip_addresses]
if not interfaces:
# Priority 3: default interface
interfaces = [{}]
platform = entity_platform.async_get_current_platform()
lifx_manager = LIFXManager(hass, platform, async_add_entities)
hass.data[DATA_LIFX_MANAGER] = lifx_manager
for interface in interfaces:
lifx_manager.start_discovery(interface)
return True
def lifx_features(bulb):
"""Return a feature map for this bulb, or a default map if unknown."""
return aiolifx().products.features_map.get(
bulb.product
) or aiolifx().products.features_map.get(1)
def find_hsbk(hass, **kwargs):
"""Find the desired color from a number of possible inputs."""
hue, saturation, brightness, kelvin = [None] * 4
preprocess_turn_on_alternatives(hass, kwargs)
if ATTR_HS_COLOR in kwargs:
hue, saturation = kwargs[ATTR_HS_COLOR]
elif ATTR_RGB_COLOR in kwargs:
hue, saturation = color_util.color_RGB_to_hs(*kwargs[ATTR_RGB_COLOR])
elif ATTR_XY_COLOR in kwargs:
hue, saturation = color_util.color_xy_to_hs(*kwargs[ATTR_XY_COLOR])
if hue is not None:
hue = int(hue / 360 * 65535)
saturation = int(saturation / 100 * 65535)
kelvin = 3500
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(
color_util.color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
)
saturation = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
hsbk = [hue, saturation, brightness, kelvin]
return None if hsbk == [None] * 4 else hsbk
def merge_hsbk(base, change):
"""Copy change on top of base, except when None."""
if change is None:
return None
return [b if c is None else c for b, c in zip(base, change)]
class LIFXManager:
"""Representation of all known LIFX entities."""
def __init__(self, hass, platform, async_add_entities):
"""Initialize the light."""
self.entities = {}
self.hass = hass
self.platform = platform
self.async_add_entities = async_add_entities
self.effects_conductor = aiolifx_effects().Conductor(hass.loop)
self.discoveries = []
self.cleanup_unsub = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, self.cleanup
)
self.register_set_state()
self.register_effects()
def start_discovery(self, interface):
"""Start discovery on a network interface."""
kwargs = {"discovery_interval": DISCOVERY_INTERVAL}
broadcast_ip = interface.get(CONF_BROADCAST)
if broadcast_ip:
kwargs["broadcast_ip"] = broadcast_ip
lifx_discovery = aiolifx().LifxDiscovery(self.hass.loop, self, **kwargs)
kwargs = {}
listen_ip = interface.get(CONF_SERVER)
if listen_ip:
kwargs["listen_ip"] = listen_ip
listen_port = interface.get(CONF_PORT)
if listen_port:
kwargs["listen_port"] = listen_port
lifx_discovery.start(**kwargs)
self.discoveries.append(lifx_discovery)
@callback
def cleanup(self, event=None):
"""Release resources."""
self.cleanup_unsub()
for discovery in self.discoveries:
discovery.cleanup()
for service in (
SERVICE_LIFX_SET_STATE,
SERVICE_EFFECT_STOP,
SERVICE_EFFECT_PULSE,
SERVICE_EFFECT_COLORLOOP,
):
self.hass.services.async_remove(LIFX_DOMAIN, service)
def register_set_state(self):
"""Register the LIFX set_state service call."""
self.platform.async_register_entity_service(
SERVICE_LIFX_SET_STATE, LIFX_SET_STATE_SCHEMA, "set_state"
)
def register_effects(self):
"""Register the LIFX effects as hass service calls."""
async def service_handler(service):
"""Apply a service, i.e. start an effect."""
entities = await self.platform.async_extract_from_service(service)
if entities:
await self.start_effect(entities, service.service, **service.data)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_PULSE,
service_handler,
schema=LIFX_EFFECT_PULSE_SCHEMA,
)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_COLORLOOP,
service_handler,
schema=LIFX_EFFECT_COLORLOOP_SCHEMA,
)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_STOP,
service_handler,
schema=LIFX_EFFECT_STOP_SCHEMA,
)
async def start_effect(self, entities, service, **kwargs):
"""Start a light effect on entities."""
bulbs = [light.bulb for light in entities]
if service == SERVICE_EFFECT_PULSE:
effect = aiolifx_effects().EffectPulse(
power_on=kwargs.get(ATTR_POWER_ON),
period=kwargs.get(ATTR_PERIOD),
cycles=kwargs.get(ATTR_CYCLES),
mode=kwargs.get(ATTR_MODE),
hsbk=find_hsbk(self.hass, **kwargs),
)
await self.effects_conductor.start(effect, bulbs)
elif service == SERVICE_EFFECT_COLORLOOP:
preprocess_turn_on_alternatives(self.hass, kwargs)
brightness = None
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
effect = aiolifx_effects().EffectColorloop(
power_on=kwargs.get(ATTR_POWER_ON),
period=kwargs.get(ATTR_PERIOD),
change=kwargs.get(ATTR_CHANGE),
spread=kwargs.get(ATTR_SPREAD),
transition=kwargs.get(ATTR_TRANSITION),
brightness=brightness,
)
await self.effects_conductor.start(effect, bulbs)
elif service == SERVICE_EFFECT_STOP:
await self.effects_conductor.stop(bulbs)
@callback
def register(self, bulb):
"""Handle aiolifx detected bulb."""
self.hass.async_create_task(self.register_new_bulb(bulb))
async def register_new_bulb(self, bulb):
"""Handle newly detected bulb."""
if bulb.mac_addr in self.entities:
entity = self.entities[bulb.mac_addr]
entity.registered = True
_LOGGER.debug("%s register AGAIN", entity.who)
await entity.update_hass()
else:
_LOGGER.debug("%s register NEW", bulb.ip_addr)
# Read initial state
ack = AwaitAioLIFX().wait
# Used to populate sw_version
# no need to wait as we do not
# need it until later
bulb.get_hostfirmware()
color_resp = await ack(bulb.get_color)
if color_resp:
version_resp = await ack(bulb.get_version)
if color_resp is None or version_resp is None:
_LOGGER.error("Failed to initialize %s", bulb.ip_addr)
bulb.registered = False
else:
bulb.timeout = MESSAGE_TIMEOUT
bulb.retry_count = MESSAGE_RETRIES
bulb.unregister_timeout = UNAVAILABLE_GRACE
if lifx_features(bulb)["multizone"]:
entity = LIFXStrip(bulb, self.effects_conductor)
elif lifx_features(bulb)["color"]:
entity = LIFXColor(bulb, self.effects_conductor)
else:
entity = LIFXWhite(bulb, self.effects_conductor)
_LOGGER.debug("%s register READY", entity.who)
self.entities[bulb.mac_addr] = entity
self.async_add_entities([entity], True)
@callback
def unregister(self, bulb):
"""Handle aiolifx disappearing bulbs."""
if bulb.mac_addr in self.entities:
entity = self.entities[bulb.mac_addr]
_LOGGER.debug("%s unregister", entity.who)
entity.registered = False
entity.async_write_ha_state()
class AwaitAioLIFX:
"""Wait for an aiolifx callback and return the message."""
def __init__(self):
"""Initialize the wrapper."""
self.message = None
self.event = asyncio.Event()
@callback
def callback(self, bulb, message):
"""Handle responses."""
self.message = message
self.event.set()
async def wait(self, method):
"""Call an aiolifx method and wait for its response."""
self.message = None
self.event.clear()
method(callb=self.callback)
await self.event.wait()
return self.message
def convert_8_to_16(value):
"""Scale an 8 bit level into 16 bits."""
return (value << 8) | value
def convert_16_to_8(value):
"""Scale a 16 bit level into 8 bits."""
return value >> 8
class LIFXLight(LightEntity):
"""Representation of a LIFX light."""
def __init__(self, bulb, effects_conductor):
"""Initialize the light."""
self.bulb = bulb
self.effects_conductor = effects_conductor
self.registered = True
self.postponed_update = None
self.lock = asyncio.Lock()
@property
def device_info(self):
"""Return information about the device."""
info = {
"identifiers": {(LIFX_DOMAIN, self.unique_id)},
"name": self.name,
"connections": {(dr.CONNECTION_NETWORK_MAC, self.bulb.mac_addr)},
"manufacturer": "LIFX",
}
version = self.bulb.host_firmware_version
if version is not None:
info["sw_version"] = version
product_map = aiolifx().products.product_map
model = product_map.get(self.bulb.product) or self.bulb.product
if model is not None:
info["model"] = model
return info
@property
def available(self):
"""Return the availability of the bulb."""
return self.registered
@property
def unique_id(self):
"""Return a unique ID."""
return self.bulb.mac_addr
@property
def name(self):
"""Return the name of the bulb."""
return self.bulb.label
@property
def who(self):
"""Return a string identifying the bulb."""
return f"{self.bulb.ip_addr} ({self.name})"
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
kelvin = lifx_features(self.bulb)["max_kelvin"]
return math.floor(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
kelvin = lifx_features(self.bulb)["min_kelvin"]
return math.ceil(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def supported_features(self):
"""Flag supported features."""
support = SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_EFFECT
bulb_features = lifx_features(self.bulb)
if bulb_features["min_kelvin"] != bulb_features["max_kelvin"]:
support |= SUPPORT_COLOR_TEMP
return support
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
fade = self.bulb.power_level / 65535
return convert_16_to_8(int(fade * self.bulb.color[2]))
@property
def color_temp(self):
"""Return the color temperature."""
_, sat, _, kelvin = self.bulb.color
if sat:
return None
return color_util.color_temperature_kelvin_to_mired(kelvin)
@property
def is_on(self):
"""Return true if light is on."""
return self.bulb.power_level != 0
@property
def effect(self):
"""Return the name of the currently running effect."""
effect = self.effects_conductor.effect(self.bulb)
if effect:
return f"lifx_effect_{effect.name}"
return None
async def update_hass(self, now=None):
"""Request new status and push it to hass."""
self.postponed_update = None
await self.async_update()
self.async_write_ha_state()
async def update_during_transition(self, when):
"""Update state at the start and end of a transition."""
if self.postponed_update:
self.postponed_update()
# Transition has started
await self.update_hass()
# Transition has ended
if when > 0:
self.postponed_update = async_track_point_in_utc_time(
self.hass,
self.update_hass,
util.dt.utcnow() + timedelta(milliseconds=when),
)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
kwargs[ATTR_POWER] = True
self.hass.async_create_task(self.set_state(**kwargs))
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
kwargs[ATTR_POWER] = False
self.hass.async_create_task(self.set_state(**kwargs))
async def set_state(self, **kwargs):
"""Set a color on the light and turn it on/off."""
async with self.lock:
bulb = self.bulb
await self.effects_conductor.stop([bulb])
if ATTR_EFFECT in kwargs:
await self.default_effect(**kwargs)
return
if ATTR_INFRARED in kwargs:
bulb.set_infrared(convert_8_to_16(kwargs[ATTR_INFRARED]))
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
# These are both False if ATTR_POWER is not set
power_on = kwargs.get(ATTR_POWER, False)
power_off = not kwargs.get(ATTR_POWER, True)
hsbk = find_hsbk(self.hass, **kwargs)
# Send messages, waiting for ACK each time
ack = AwaitAioLIFX().wait
if not self.is_on:
if power_off:
await self.set_power(ack, False)
# If fading on with color, set color immediately
if hsbk and power_on:
await self.set_color(ack, hsbk, kwargs)
await self.set_power(ack, True, duration=fade)
elif hsbk:
await self.set_color(ack, hsbk, kwargs, duration=fade)
elif power_on:
await self.set_power(ack, True, duration=fade)
else:
if power_on:
await self.set_power(ack, True)
if hsbk:
await self.set_color(ack, hsbk, kwargs, duration=fade)
if power_off:
await self.set_power(ack, False, duration=fade)
# Avoid state ping-pong by holding off updates as the state settles
await asyncio.sleep(0.3)
# Update when the transition starts and ends
await self.update_during_transition(fade)
async def set_power(self, ack, pwr, duration=0):
"""Send a power change to the bulb."""
await ack(partial(self.bulb.set_power, pwr, duration=duration))
async def set_color(self, ack, hsbk, kwargs, duration=0):
"""Send a color change to the bulb."""
hsbk = merge_hsbk(self.bulb.color, hsbk)
await ack(partial(self.bulb.set_color, hsbk, duration=duration))
async def default_effect(self, **kwargs):
"""Start an effect with default parameters."""
service = kwargs[ATTR_EFFECT]
data = {ATTR_ENTITY_ID: self.entity_id}
await self.hass.services.async_call(
LIFX_DOMAIN, service, data, context=self._context
)
async def async_update(self):
"""Update bulb status."""
if self.available and not self.lock.locked():
await AwaitAioLIFX().wait(self.bulb.get_color)
class LIFXWhite(LIFXLight):
"""Representation of a white-only LIFX light."""
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return [SERVICE_EFFECT_PULSE, SERVICE_EFFECT_STOP]
class LIFXColor(LIFXLight):
"""Representation of a color LIFX light."""
@property
def supported_features(self):
"""Flag supported features."""
support = super().supported_features
support |= SUPPORT_COLOR
return support
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return [SERVICE_EFFECT_COLORLOOP, SERVICE_EFFECT_PULSE, SERVICE_EFFECT_STOP]
@property
def hs_color(self):
"""Return the hs value."""
hue, sat, _, _ = self.bulb.color
hue = hue / 65535 * 360
sat = sat / 65535 * 100
return (hue, sat) if sat else None
class LIFXStrip(LIFXColor):
"""Representation of a LIFX light strip with multiple zones."""
async def set_color(self, ack, hsbk, kwargs, duration=0):
"""Send a color change to the bulb."""
bulb = self.bulb
num_zones = len(bulb.color_zones)
zones = kwargs.get(ATTR_ZONES)
if zones is None:
# Fast track: setting all zones to the same brightness and color
# can be treated as a single-zone bulb.
if hsbk[2] is not None and hsbk[3] is not None:
await super().set_color(ack, hsbk, kwargs, duration)
return
zones = list(range(0, num_zones))
else:
zones = [x for x in set(zones) if x < num_zones]
# Zone brightness is not reported when powered off
if not self.is_on and hsbk[2] is None:
await self.set_power(ack, True)
await asyncio.sleep(0.3)
await self.update_color_zones()
await self.set_power(ack, False)
await asyncio.sleep(0.3)
# Send new color to each zone
for index, zone in enumerate(zones):
zone_hsbk = merge_hsbk(bulb.color_zones[zone], hsbk)
apply = 1 if (index == len(zones) - 1) else 0
set_zone = partial(
bulb.set_color_zones,
start_index=zone,
end_index=zone,
color=zone_hsbk,
duration=duration,
apply=apply,
)
await ack(set_zone)
async def async_update(self):
"""Update strip status."""
if self.available and not self.lock.locked():
await super().async_update()
await self.update_color_zones()
async def update_color_zones(self):
"""Get updated color information for each zone."""
zone = 0
top = 1
while self.available and zone < top:
# Each get_color_zones can update 8 zones at once
resp = await AwaitAioLIFX().wait(
partial(self.bulb.get_color_zones, start_index=zone)
)
if resp:
zone += 8
top = resp.count
# We only await multizone responses so don't ask for just one
if zone == top - 1:
zone -= 1
|
|
"""
This file must not depend on any other CuPy modules.
"""
import ctypes
import json
import os
import os.path
import shutil
import sys
import warnings
# '' for uninitialized, None for non-existing
_cuda_path = ''
_nvcc_path = ''
_rocm_path = ''
_hipcc_path = ''
_cub_path = ''
"""
Library Preloading
------------------
Wheel packages are built against specific versions of CUDA libraries
(cuTENSOR/NCCL/cuDNN).
To avoid loading wrong version, these shared libraries are manually
preloaded.
# TODO(kmaehashi): Support NCCL
Example of `_preload_config` is as follows:
{
# installation source
'packaging': 'pip',
# CUDA version string
'cuda': '11.0',
'cudnn': {
# cuDNN version string
'version': '8.0.0',
# names of the shared library
'filenames': ['libcudnn.so.X.Y.Z'] # or `cudnn64_X.dll` for Windows
}
}
The configuration file is intended solely for internal purposes and
not expected to be parsed by end-users.
"""
_preload_config = None
_preload_libs = {
'cudnn': None,
'nccl': None,
'cutensor': None,
}
_preload_logs = []
def _log(msg):
# TODO(kmaehashi): replace with the standard logging
_preload_logs.append(msg)
def get_cuda_path():
# Returns the CUDA installation path or None if not found.
global _cuda_path
if _cuda_path == '':
_cuda_path = _get_cuda_path()
return _cuda_path
def get_nvcc_path():
# Returns the path to the nvcc command or None if not found.
global _nvcc_path
if _nvcc_path == '':
_nvcc_path = _get_nvcc_path()
return _nvcc_path
def get_rocm_path():
# Returns the ROCm installation path or None if not found.
global _rocm_path
if _rocm_path == '':
_rocm_path = _get_rocm_path()
return _rocm_path
def get_hipcc_path():
# Returns the path to the hipcc command or None if not found.
global _hipcc_path
if _hipcc_path == '':
_hipcc_path = _get_hipcc_path()
return _hipcc_path
def get_cub_path():
# Returns the CUB header path or None if not found.
global _cub_path
if _cub_path == '':
_cub_path = _get_cub_path()
return _cub_path
def _get_cuda_path():
# Use environment variable
cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows
if os.path.exists(cuda_path):
return cuda_path
# Use nvcc path
nvcc_path = shutil.which('nvcc')
if nvcc_path is not None:
return os.path.dirname(os.path.dirname(nvcc_path))
# Use typical path
if os.path.exists('/usr/local/cuda'):
return '/usr/local/cuda'
return None
def _get_nvcc_path():
# Honor the "NVCC" env var
nvcc_path = os.environ.get('NVCC', None)
if nvcc_path is not None:
return nvcc_path
# Lookup <CUDA>/bin
cuda_path = get_cuda_path()
if cuda_path is None:
return None
return shutil.which('nvcc', path=os.path.join(cuda_path, 'bin'))
def _get_rocm_path():
# Use environment variable
rocm_path = os.environ.get('ROCM_HOME', '')
if os.path.exists(rocm_path):
return rocm_path
# Use hipcc path
hipcc_path = shutil.which('hipcc')
if hipcc_path is not None:
return os.path.dirname(os.path.dirname(hipcc_path))
# Use typical path
if os.path.exists('/opt/rocm'):
return '/opt/rocm'
return None
def _get_hipcc_path():
# TODO(leofang): Introduce an env var HIPCC?
# Lookup <ROCM>/bin
rocm_path = get_rocm_path()
if rocm_path is None:
return None
return shutil.which('hipcc', path=os.path.join(rocm_path, 'bin'))
def _get_cub_path():
# runtime discovery of CUB headers
from cupy_backends.cuda.api import runtime
current_dir = os.path.dirname(os.path.abspath(__file__))
if not runtime.is_hip:
cuda_path = get_cuda_path()
if os.path.isdir(os.path.join(current_dir, '_core/include/cupy/cub')):
_cub_path = '<bundle>'
elif cuda_path is not None and os.path.isdir(
os.path.join(cuda_path, 'include/cub')):
# use built-in CUB for CUDA 11+
_cub_path = '<CUDA>'
else:
_cub_path = None
else:
# the bundled CUB does not work in ROCm
rocm_path = get_rocm_path()
if rocm_path is not None and os.path.isdir(
os.path.join(rocm_path, 'include/hipcub')):
# use hipCUB
_cub_path = '<ROCm>'
else:
_cub_path = None
return _cub_path
def _setup_win32_dll_directory():
# Setup DLL directory to load CUDA Toolkit libs and shared libraries
# added during the build process.
if sys.platform.startswith('win32'):
is_conda = ((os.environ.get('CONDA_PREFIX') is not None)
or (os.environ.get('CONDA_BUILD_STATE') is not None))
# Path to the CUDA Toolkit binaries
cuda_path = get_cuda_path()
if cuda_path is not None:
if is_conda:
cuda_bin_path = cuda_path
else:
cuda_bin_path = os.path.join(cuda_path, 'bin')
else:
cuda_bin_path = None
warnings.warn(
'CUDA path could not be detected.'
' Set CUDA_PATH environment variable if CuPy fails to load.')
_log('CUDA_PATH: {}'.format(cuda_path))
# Path to shared libraries in wheel
wheel_libdir = os.path.join(
get_cupy_install_path(), 'cupy', '.data', 'lib')
if os.path.isdir(wheel_libdir):
_log('Wheel shared libraries: {}'.format(wheel_libdir))
else:
_log('Not wheel distribution ({} not found)'.format(
wheel_libdir))
wheel_libdir = None
if (3, 8) <= sys.version_info:
if cuda_bin_path is not None:
_log('Adding DLL search path: {}'.format(cuda_bin_path))
os.add_dll_directory(cuda_bin_path)
if wheel_libdir is not None:
_log('Adding DLL search path: {}'.format(wheel_libdir))
os.add_dll_directory(wheel_libdir)
else:
# Users are responsible for adding `%CUDA_PATH%/bin` to PATH.
if wheel_libdir is not None:
_log('Adding to PATH: {}'.format(wheel_libdir))
path = os.environ.get('PATH', '')
os.environ['PATH'] = wheel_libdir + os.pathsep + path
def get_cupy_install_path():
# Path to the directory where the package is installed.
return os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
def get_cupy_cuda_lib_path():
"""Returns the directory where CUDA external libraries are installed.
This environment variable only affects wheel installations.
Shared libraries are looked up from
`$CUPY_CUDA_LIB_PATH/$CUDA_VER/$LIB_NAME/$LIB_VER/{lib,lib64,bin}`,
e.g., `~/.cupy/cuda_lib/11.2/cudnn/8.1.1/lib64/libcudnn.so.8.1.1`.
The default $CUPY_CUDA_LIB_PATH is `~/.cupy/cuda_lib`.
"""
cupy_cuda_lib_path = os.environ.get('CUPY_CUDA_LIB_PATH', None)
if cupy_cuda_lib_path is None:
return os.path.expanduser('~/.cupy/cuda_lib')
return os.path.abspath(cupy_cuda_lib_path)
def get_preload_config():
global _preload_config
if _preload_config is None:
config_path = os.path.join(
get_cupy_install_path(), 'cupy', '.data', '_wheel.json')
if not os.path.exists(config_path):
return None
with open(config_path) as f:
_preload_config = json.load(f)
return _preload_config
def _can_attempt_preload(lib: str) -> bool:
"""Returns if the preload can be attempted."""
config = get_preload_config()
if (config is None) or (config['packaging'] == 'conda'):
# We don't do preload if CuPy is installed from Conda-Forge, as we
# cannot guarantee the version pinned in _wheel.json, which is
# encoded in config[lib]['filenames'], is always available on
# Conda-Forge. See here for the configuration files used in
# Conda-Forge distributions.
# https://github.com/conda-forge/cupy-feedstock/blob/master/recipe/preload_config/
_log(f'Cannot preload {lib} as this is not a wheel installation')
return False
if lib not in _preload_libs:
raise AssertionError(f'Unknown preload library: {lib}')
if lib not in config:
_log(f'Preload {lib} not configured in wheel')
return False
if _preload_libs[lib] is not None:
_log(f'Preload already attempted: {lib}')
return False
return True
def _preload_library(lib):
"""Preload dependent shared libraries.
The preload configuration file (cupy/.data/_wheel.json) will be added
during the wheel build process.
"""
_log(f'Preloading triggered for library: {lib}')
if not _can_attempt_preload(lib):
return
_preload_libs[lib] = {}
config = get_preload_config()
cuda_version = config['cuda']
_log('CuPy wheel package built for CUDA {}'.format(cuda_version))
cupy_cuda_lib_path = get_cupy_cuda_lib_path()
_log('CuPy CUDA library directory: {}'.format(cupy_cuda_lib_path))
version = config[lib]['version']
filenames = config[lib]['filenames']
for filename in filenames:
_log(f'Looking for {lib} version {version} ({filename})')
# "lib": cuTENSOR (Linux/Windows) / NCCL (Linux)
# "lib64": cuDNN (Linux)
# "bin": cuDNN (Windows)
libpath_cands = [
os.path.join(
cupy_cuda_lib_path, config['cuda'], lib, version, x,
filename)
for x in ['lib', 'lib64', 'bin']]
for libpath in libpath_cands:
if not os.path.exists(libpath):
_log('Rejected candidate (not found): {}'.format(libpath))
continue
try:
_log(f'Trying to load {libpath}')
# Keep reference to the preloaded module.
_preload_libs[lib][libpath] = ctypes.CDLL(libpath)
_log('Loaded')
break
except Exception as e:
e_type = type(e).__name__ # NOQA
msg = (
f'CuPy failed to preload library ({libpath}): '
f'{e_type} ({e})')
_log(msg)
warnings.warn(msg)
else:
_log('File {} could not be found'.format(filename))
# Lookup library with fully-qualified version (e.g.,
# `libcudnn.so.X.Y.Z`).
_log(f'Trying to load {filename} from default search path')
try:
_preload_libs[lib][filename] = ctypes.CDLL(filename)
_log('Loaded')
except Exception as e:
# Fallback to the standard shared library lookup which only
# uses the major version (e.g., `libcudnn.so.X`).
_log(f'Library {lib} could not be preloaded: {e}')
def _get_preload_logs():
return '\n'.join(_preload_logs)
def _preload_warning(lib, exc):
config = get_preload_config()
if config is not None and lib in config:
msg = '''
{lib} library could not be loaded.
Reason: {exc_type} ({exc})
You can install the library by:
'''
if config['packaging'] == 'pip':
msg += '''
$ python -m cupyx.tools.install_library --library {lib} --cuda {cuda}
'''
elif config['packaging'] == 'conda':
msg += '''
$ conda install -c conda-forge {lib}
'''
else:
raise AssertionError
msg = msg.format(
lib=lib, exc_type=type(exc).__name__, exc=str(exc),
cuda=config['cuda'])
warnings.warn(msg)
def _detect_duplicate_installation():
# importlib.metadata only available in Python 3.8+.
if sys.version_info < (3, 8):
return
import importlib.metadata
# List of all CuPy packages, including out-dated ones.
known = [
'cupy',
'cupy-cuda80',
'cupy-cuda90',
'cupy-cuda91',
'cupy-cuda92',
'cupy-cuda100',
'cupy-cuda101',
'cupy-cuda102',
'cupy-cuda110',
'cupy-cuda111',
'cupy-cuda112',
'cupy-cuda113',
'cupy-cuda114',
'cupy-cuda115',
'cupy-cuda116',
'cupy-rocm-4-0',
'cupy-rocm-4-1',
'cupy-rocm-4-2',
'cupy-rocm-4-3',
'cupy-rocm-5-0',
]
cupy_installed = [
name for name in known
if list(importlib.metadata.distributions(name=name))]
if 1 < len(cupy_installed):
cupy_packages_list = ', '.join(sorted(cupy_installed))
warnings.warn(f'''
--------------------------------------------------------------------------------
CuPy may not function correctly because multiple CuPy packages are installed
in your environment:
{cupy_packages_list}
Follow these steps to resolve this issue:
1. For all packages listed above, run the following command to remove all
existing CuPy installations:
$ pip uninstall <package_name>
If you previously installed CuPy via conda, also run the following:
$ conda uninstall cupy
2. Install the appropriate CuPy package.
Refer to the Installation Guide for detailed instructions.
https://docs.cupy.dev/en/stable/install.html
--------------------------------------------------------------------------------
''')
def _diagnose_import_error() -> str:
# TODO(kmaehashi): provide better diagnostics.
return '''\
Failed to import CuPy.
If you installed CuPy via wheels (cupy-cudaXXX or cupy-rocm-X-X), make sure that the package matches with the version of CUDA or ROCm installed.
On Linux, you may need to set LD_LIBRARY_PATH environment variable depending on how you installed CUDA/ROCm.
On Windows, try setting CUDA_PATH environment variable.
Check the Installation Guide for details:
https://docs.cupy.dev/en/latest/install.html''' # NOQA
|
|
import feedparser
import json
import logging
import sys
import plugin
from utils import str_utils
FAIL_MESSAGE = (
"Unable to download or parse feed. Remove unused feeds using "
"the !listfeed and !removefeed commands."
)
HELP_MESSAGE = (
"!addfeed url [fetch time [custom title]] where:\n"
"url - is the url of the atom or rss feed\n"
"fetch time - is the number of minutes between each request\n"
"custom title - is the title used for this feed.\n"
"If no title is given, the default title parsed from the "
"feed will be used instead."
)
REMOVING_FEED_MESSAGE = u"Removing: #{} - {}"
LIST_FEED_ITEM_MESSAGE = u"#{}: {}"
NO_FEED_MESSAGE = u"No feeds"
DEFAULT_FETCH_TIME = 10 * 60
def FeedItemToString(title, link, feed_title=""):
return str_utils.sanitize_string(u"{}: {} <{}>".format(feed_title, title, link))
# Simple polling class, fetches the feed in a regular interval and passes
# the information on to the Feed object
class Feedpoller:
def __init__(self, feed, on_created, on_entry, on_error):
self.feed = feed
self.feed["title"] = str_utils.sanitize_string(self.feed["title"])
self.last_entry = None
self.consecutive_fails = 0
self.update_count = 0
self.on_created = on_created
self.on_entry = on_entry
self.on_error = on_error
parsed = self.read(feed["url"])
if parsed.bozo == 0:
self._set_last(parsed.entries)
if self.feed["title"] == "":
self.feed["title"] = str_utils.sanitize_string(parsed.feed.title)
on_created(self.feed)
else:
self.modified = ""
raise Exception("Could not parse feed")
def read(self, url, modified=None, etag=None):
parsed = feedparser.parse(url, modified=modified, etag=etag)
if parsed.bozo == 0:
self.modified = parsed.get("modified", None)
self.etag = parsed.get("etag", None)
return parsed
def update(self):
self.update_count += 1
if self.update_count < self.feed["frequency"]:
return
self.update_count = 0
self.update_now()
def update_now(self):
parsed = self.read(self.feed["url"], self.modified, self.etag)
if parsed.bozo == 1:
self.consecutive_fails += 1
if self.consecutive_fails % 10 == 0:
self.on_error(self.feed, FAIL_MESSAGE)
return
for entry in parsed.entries:
# TODO: Check id, link, etc
# Maybe save the entire data.entries and remove all duplicate when
# a new update happens?
if self.last_entry is not None:
if "published_parsed" in entry:
if entry.published_parsed <= self.last_entry.published_parsed:
break
else:
if entry.title == self.last_entry.title:
break
self.on_entry(self.feed, entry)
self._set_last(parsed.entries)
self.consecutive_fails = 0
def _set_last(self, entries):
if len(entries) > 0:
self.last_entry = entries[0]
# Aggregator class for adding and handling feeds
class Feedretriever(plugin.Plugin):
def __init__(self):
plugin.Plugin.__init__(self, "feedretriever")
self.feeds = []
def started(self, settings):
logging.info("Feedretriever.started %s", settings)
self.settings = json.loads(settings)
logging.info("Feedretriever.started %s", self.settings)
for feed in self.settings["feeds"]:
self.add_feed(feed, new=False)
def add_feed(self, feed, new=True):
def on_created(feed):
self.privmsg(
feed["server"], feed["channel"], "Added feed: " + feed["title"]
)
self.settings["feeds"].append(feed)
self._save_settings(json.dumps(self.settings))
def on_entry(feed, entry):
self.privmsg(
feed["server"],
feed["channel"],
FeedItemToString(entry.title, entry.link, feed["title"]),
)
def on_error(feed, message):
self.privmsg(
feed["server"], feed["channel"], feed["title"] + ": " + message
)
try:
poller = Feedpoller(
feed,
on_created=on_created if new else lambda *a, **kw: None,
on_entry=on_entry,
on_error=on_error,
)
self.feeds.append(poller)
except Exception as e:
logging.info("Failed to add feed: %r", e)
self.privmsg(
feed["server"], feed["channel"], "Failed to add: " + feed["url"]
)
def remove_feed(self, feed):
self.feeds.remove(feed)
self.settings["feeds"].remove(feed.feed)
self._save_settings(json.dumps(self.settings))
def on_pubmsg(self, server, user, channel, message):
if message.startswith("!feed") or message.startswith("!addfeed"):
_, url, frequency, title = str_utils.split(message, " ", 4)
if url == "":
self.privmsg(server, channel, HELP_MESSAGE)
return
try:
frequency = int(frequency) * 60
except ValueError:
frequency = DEFAULT_FETCH_TIME
feed = {
"url": url,
"title": title,
"server": server,
"channel": channel,
"frequency": frequency,
}
self.add_feed(feed)
elif message.startswith("!removefeed"):
feeds = list(
filter(
lambda f: f.feed["server"] == server
and f.feed["channel"] == channel,
self.feeds,
)
)
feeds_to_remove = []
for i in message.split(" "):
i = int(i) if i.isdecimal() else -1
if i >= 0 and i < len(feeds):
feeds_to_remove.append(i)
for i in sorted(feeds_to_remove, reverse=True):
self.privmsg(
server,
channel,
REMOVING_FEED_MESSAGE.format(i, feeds[i].feed["title"]),
)
self.remove_feed(feeds[i])
logging.info("Removed feed: %d", i)
elif message.startswith("!listfeed"):
feeds = list(
filter(
lambda f: f.feed["server"] == server
and f.feed["channel"] == channel,
self.feeds,
)
)
if len(feeds) == 0:
self.privmsg(server, channel, NO_FEED_MESSAGE)
for i, feed in enumerate(feeds):
self.privmsg(
server,
channel,
LIST_FEED_ITEM_MESSAGE.format(i, feed.feed["title"]),
)
def update(self):
for feed in self.feeds:
feed.update()
if __name__ == "__main__":
sys.exit(Feedretriever.run())
|
|
class CpuStoppedCall(Exception):
pass
instruction_map = {}
instruction_names = {}
DEBUG = False
def instruction(alt=None):
def decorator(func):
number = 110 + len(instruction_map)
instruction_map[number] = func
instruction_names[alt or func.__name__] = number
return func
return decorator
def exception_wrapper(func):
def decorator(*args):
try:
if DEBUG:
print("OPcode: {.__name__}".format(func))
return func(*args)
except TypeError as e:
raise Exception("Instruction received invalid amount of arguments",
"expected {}, recieved {}".format(func.__code__.co_argcount, len(args)), e)
except ValueError as e: # we assume a value error is caused by attempting to treat an absolute value as a mutable object
raise Exception(
"Possible attempt to use absolute value (#) as mutable type", e)
decorator.__name__ = func.__name__
decorator.__doc__ = func.__doc__
return decorator
class InstructionSet:
"""Container for cpu instructions
Arguments
---------
cpu: <cpu object> CPU object to use to execute commands
Not needed if only being used to compile a program
Functions
---------
run_encoded( command, *args ): Execute a decoded instruction
encode_name( command_name ): Return numeric ID of a instruction, returns None if non existant instruction
"""
def __init__(self, cpu=None): # no cpu needed for compiler
self.encoded_commands = instruction_map.copy()
self.instruction_names = instruction_names.copy()
self.cpu = cpu
def run_encoded(self, command, *args):
"""Run an encoded instruction
Arguments
---------
command: <int> Decoded command to execute
*args: <str> Operands to run instruction with"""
command = self.encoded_commands.get(command)
if command:
command(self, *args)
else:
raise CpuStoppedCall(
"Invalid command, ID was: {}. Arguments were: {}".format(command, args))
def encode_name(self, command_name):
return self.instruction_names.get(command_name)
@instruction()
@exception_wrapper
def add(self, value):
self.cpu.registers["acc"] += self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def sub(self, value):
self.cpu.registers["acc"] -= self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def mul(self, value):
self.cpu.registers["acc"] *= self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def div(self, value):
self.cpu.registers["acc"] /= self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def set(self, value):
self.cpu.registers["acc"] = self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def mov(self, from_loc, to_loc):
if to_loc.startswith("@") and to_loc[1:] in self.cpu.registers.registers.keys():
self.cpu.registers[to_loc.lstrip(
"@")] = self.cpu.interpret_read_address(from_loc)
else:
self.cpu.memory[self.cpu.interpret_read_address(
to_loc)] = self.cpu.interpret_read_address(from_loc)
@instruction()
@exception_wrapper
def cmp(self, a, b=0):
av = self.cpu.interpret_read_address(a)
# print("comp interpreted as {}".format(av))
bv = self.cpu.interpret_read_address(b) if b else 0
functions = [
(lambda a, b: a < b),
(lambda a, b: a > b),
(lambda a, b: a <= b),
(lambda a, b: a >= b),
(lambda a, b: a == b),
(lambda a, b: a != b)
]
self.cpu.registers["cmp"] = "".join(
[str(1 if i(av, bv) else 0) for i in functions])
# jumps to memory address provided, no interpreting
def _internal_jump(self, location):
self.cpu.registers["cur"] = location
@instruction()
@exception_wrapper
def jump(self, jump):
self._internal_jump(self.cpu.interpret_read_address(jump))
def _test_cmp(self, index):
return int(self.cpu.registers["cmp"][index])
@instruction()
@exception_wrapper
def lje(self, jump): # less than
if self._test_cmp(0):
self.jump(jump)
@instruction()
@exception_wrapper
def mje(self, jump): # more than
if self._test_cmp(1):
self.jump(jump)
@instruction()
@exception_wrapper
def leje(self, jump): # less than equal
if self._test_cmp(2):
self.jump(jump)
@instruction()
@exception_wrapper
def meje(self, jump): # more than equal
if self._test_cmp(3):
self.jump(jump)
@instruction()
@exception_wrapper
def eqje(self, jump): # equal
if self._test_cmp(4):
self.jump(jump)
@instruction()
@exception_wrapper
def nqje(self, jump): # not equal
if self._test_cmp(5):
self.jump(jump)
@instruction()
@exception_wrapper
def prntint(self, memloc):
val = str(self.cpu.interpret_read_address(memloc))
print(val, end="")
self.cpu.stdout[-1] += val
@instruction()
@exception_wrapper
def prntstr(self, memloc):
val = chr(self.cpu.interpret_read_address(memloc))
print(val, end='')
self.cpu.stdout[-1] += val
@instruction()
@exception_wrapper
def prntnl(self):
print("\n")
self.cpu.stdout.append("")
@instruction(alt="input")
@exception_wrapper
def inp(self, memloc):
if memloc.startswith("@") and memloc[1:] in self.cpu.registers.registers.keys():
self.cpu.registers[memloc.strip("@").lower()] = int(
input("Enter number: "))
else:
self.cpu.memory[self.cpu.interpret_read_address(
memloc)] = int(input("Enter number: "))
# like anything wrong could happen here
@instruction()
@exception_wrapper
def halt(self):
raise CpuStoppedCall("CPU halt triggered")
@instruction()
@exception_wrapper
def pop(self, memloc=None):
if self.cpu.registers["stk"] > self.cpu.memory.size:
raise Exception("Stack underflow, attempt to pop from empty stack")
if memloc is not None:
if memloc.startswith("@") and memloc[1:] in self.cpu.registers.registers.keys():
self.cpu.registers[memloc.lstrip("@")] = self.cpu.memory[
self.cpu.registers["stk"]]
else:
self.cpu.memory[self.cpu.interpret_read_address(memloc)] = self.cpu.memory[
self.cpu.registers["stk"]]
self.cpu.registers["stk"] += 1 # stack descends upwardas
@instruction()
@exception_wrapper
def push(self, value):
# decrement first since last push will leave us one below
self.cpu.registers["stk"] -= 1
self.cpu.memory[self.cpu.registers["stk"]
] = self.cpu.interpret_read_address(value)
@instruction()
@exception_wrapper
def call(self, function_location, *args):
collected_args = [self.cpu.interpret_read_address(i) for i in args]
# since stack position will change once we push return location
self._push_stk_py(self.cpu.registers["cur"])
# push return address to stack
for i in collected_args:
self._push_stk_py(i) # push vars to stack
self.jump(function_location)
def _push_stk_py(self, value):
self.cpu.registers["stk"] -= 1
self.cpu.memory[self.cpu.registers["stk"]] = value
def _pop_stk_py(self):
if self.cpu.registers["stk"] > self.cpu.memory.size:
return 0
pre = self.cpu.memory[self.cpu.registers["stk"]]
self.cpu.registers["stk"] += 1
return pre
@instruction()
@exception_wrapper
def ret(self, retval=None):
ret_loc = self._pop_stk_py()
if retval is not None:
self._push_stk_py(self.cpu.interpret_read_address(retval))
self._internal_jump(ret_loc)
@instruction()
@exception_wrapper
def nop(self):
pass
|
|
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
.. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text),
'import'),
(words((
'always', 'always_comb', 'always_ff', 'always_latch', 'and',
'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1',
'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign',
'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive',
'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for',
'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0',
'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large',
'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge',
'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed',
'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1',
'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return',
'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed',
'small', 'specify', 'specparam', 'strength', 'string', 'strong0',
'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1',
'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait',
'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype',
'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected',
'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate',
'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames',
'nounconnected_drive', 'protect', 'protected', 'remove_gatenames',
'remove_netnames', 'resetall', 'timescale', 'unconnected_drive',
'undef'), prefix=r'`', suffix=r'\b'),
Comment.Preproc),
(words((
'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose',
'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite',
'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log',
'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale',
'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset',
'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope',
'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb',
'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'),
prefix=r'\$', suffix=r'\b'),
Name.Builtin),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wor'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
(r'\\(\S+)', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Whitespace, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def analyse_text(text):
"""Verilog code will use one of reg/wire/assign for sure, and that
is not common elsewhere."""
result = 0
if 'reg' in text:
result += 0.1
if 'wire' in text:
result += 0.1
if 'assign' in text:
result += 0.1
return result
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
.. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^(\s*)(`define)', bygroups(Whitespace, Comment.Preproc), 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace)),
(r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'import'),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([1-9][_0-9]*)?\s*\'[sS]?[bB]\s*[xXzZ?01][_xXzZ?01]*',
Number.Bin),
(r'([1-9][_0-9]*)?\s*\'[sS]?[oO]\s*[xXzZ?0-7][_xXzZ?0-7]*',
Number.Oct),
(r'([1-9][_0-9]*)?\s*\'[sS]?[dD]\s*[xXzZ?0-9][_xXzZ?0-9]*',
Number.Integer),
(r'([1-9][_0-9]*)?\s*\'[sS]?[hH]\s*[xXzZ?0-9a-fA-F][_xXzZ?0-9a-fA-F]*',
Number.Hex),
(r'\'[01xXzZ]', Number),
(r'[0-9][_0-9]*', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(words(('inside', 'dist'), suffix=r'\b'), Operator.Word),
(r'[()\[\],.;\'$]', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(words((
'accept_on', 'alias', 'always', 'always_comb', 'always_ff',
'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic',
'before', 'begin', 'bind', 'bins', 'binsof', 'break', 'buf',
'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cell',
'checker', 'clocking', 'cmos', 'config',
'constraint', 'context', 'continue', 'cover', 'covergroup',
'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design',
'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclocking', 'endconfig', 'endfunction',
'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage',
'endprimitive', 'endprogram', 'endproperty', 'endsequence',
'endspecify', 'endtable', 'endtask', 'enum', 'eventually',
'expect', 'export', 'extern', 'final', 'first_match',
'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function',
'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff',
'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'implements', 'import',
'incdir', 'include', 'initial', 'inout', 'input',
'instance', 'interconnect', 'interface', 'intersect', 'join',
'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
'local', 'localparam', 'macromodule', 'matches',
'medium', 'modport', 'module', 'nand', 'negedge', 'nettype', 'new', 'nexttime',
'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null',
'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge',
'primitive', 'priority', 'program', 'property', 'protected', 'pull0',
'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect',
'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase',
'randsequence', 'rcmos', 'ref',
'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually',
's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence',
'showcancelled', 'small', 'soft', 'solve',
'specify', 'specparam', 'static', 'strong', 'strong0',
'strong1', 'struct', 'super', 'sync_accept_on',
'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1',
'typedef', 'union', 'unique', 'unique0', 'until',
'until_with', 'untyped', 'use', 'vectored',
'virtual', 'wait', 'wait_order', 'weak', 'weak0',
'weak1', 'while', 'wildcard', 'with', 'within',
'xnor', 'xor'),
suffix=r'\b'),
Keyword),
(r'(class)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(extends)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(endclass\b)(?:(\s*)(:)(\s*)([a-zA-Z_]\w*))?',
bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Class)),
(words((
# Variable types
'bit', 'byte', 'chandle', 'const', 'event', 'int', 'integer',
'logic', 'longint', 'real', 'realtime', 'reg', 'shortint',
'shortreal', 'signed', 'string', 'time', 'type', 'unsigned',
'var', 'void',
# Net types
'supply0', 'supply1', 'tri', 'triand', 'trior', 'trireg',
'tri0', 'tri1', 'uwire', 'wand', 'wire', 'wor'),
suffix=r'\b'),
Keyword.Type),
(words((
'`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine',
'`default_nettype', '`define', '`else', '`elsif', '`end_keywords',
'`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include',
'`line', '`nounconnected_drive', '`pragma', '`resetall',
'`timescale', '`unconnected_drive', '`undef', '`undefineall'),
suffix=r'\b'),
Comment.Preproc),
(words((
# Simulation control tasks (20.2)
'$exit', '$finish', '$stop',
# Simulation time functions (20.3)
'$realtime', '$stime', '$time',
# Timescale tasks (20.4)
'$printtimescale', '$timeformat',
# Conversion functions
'$bitstoreal', '$bitstoshortreal', '$cast', '$itor',
'$realtobits', '$rtoi', '$shortrealtobits', '$signed',
'$unsigned',
# Data query functions (20.6)
'$bits', '$isunbounded', '$typename',
# Array query functions (20.7)
'$dimensions', '$high', '$increment', '$left', '$low', '$right',
'$size', '$unpacked_dimensions',
# Math functions (20.8)
'$acos', '$acosh', '$asin', '$asinh', '$atan', '$atan2',
'$atanh', '$ceil', '$clog2', '$cos', '$cosh', '$exp', '$floor',
'$hypot', '$ln', '$log10', '$pow', '$sin', '$sinh', '$sqrt',
'$tan', '$tanh',
# Bit vector system functions (20.9)
'$countbits', '$countones', '$isunknown', '$onehot', '$onehot0',
# Severity tasks (20.10)
'$info', '$error', '$fatal', '$warning',
# Assertion control tasks (20.12)
'$assertcontrol', '$assertfailoff', '$assertfailon',
'$assertkill', '$assertnonvacuouson', '$assertoff', '$asserton',
'$assertpassoff', '$assertpasson', '$assertvacuousoff',
# Sampled value system functions (20.13)
'$changed', '$changed_gclk', '$changing_gclk', '$falling_gclk',
'$fell', '$fell_gclk', '$future_gclk', '$past', '$past_gclk',
'$rising_gclk', '$rose', '$rose_gclk', '$sampled', '$stable',
'$stable_gclk', '$steady_gclk',
# Coverage control functions (20.14)
'$coverage_control', '$coverage_get', '$coverage_get_max',
'$coverage_merge', '$coverage_save', '$get_coverage',
'$load_coverage_db', '$set_coverage_db_name',
# Probabilistic distribution functions (20.15)
'$dist_chi_square', '$dist_erlang', '$dist_exponential',
'$dist_normal', '$dist_poisson', '$dist_t', '$dist_uniform',
'$random',
# Stochastic analysis tasks and functions (20.16)
'$q_add', '$q_exam', '$q_full', '$q_initialize', '$q_remove',
# PLA modeling tasks (20.17)
'$async$and$array', '$async$and$plane', '$async$nand$array',
'$async$nand$plane', '$async$nor$array', '$async$nor$plane',
'$async$or$array', '$async$or$plane', '$sync$and$array',
'$sync$and$plane', '$sync$nand$array', '$sync$nand$plane',
'$sync$nor$array', '$sync$nor$plane', '$sync$or$array',
'$sync$or$plane',
# Miscellaneous tasks and functions (20.18)
'$system',
# Display tasks (21.2)
'$display', '$displayb', '$displayh', '$displayo', '$monitor',
'$monitorb', '$monitorh', '$monitoro', '$monitoroff',
'$monitoron', '$strobe', '$strobeb', '$strobeh', '$strobeo',
'$write', '$writeb', '$writeh', '$writeo',
# File I/O tasks and functions (21.3)
'$fclose', '$fdisplay', '$fdisplayb', '$fdisplayh',
'$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', '$fgets',
'$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen',
'$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb',
'$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb',
'$fwriteh', '$fwriteo', '$rewind', '$sformat', '$sformatf',
'$sscanf', '$swrite', '$swriteb', '$swriteh', '$swriteo',
'$ungetc',
# Memory load tasks (21.4)
'$readmemb', '$readmemh',
# Memory dump tasks (21.5)
'$writememb', '$writememh',
# Command line input (21.6)
'$test$plusargs', '$value$plusargs',
# VCD tasks (21.7)
'$dumpall', '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff',
'$dumpon', '$dumpports', '$dumpportsall', '$dumpportsflush',
'$dumpportslimit', '$dumpportsoff', '$dumpportson', '$dumpvars',
), suffix=r'\b'),
Name.Builtin),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
(r'\\(\S+)', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?$', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Whitespace, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
.. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'--.*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-z_]\w*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\"]*"', String),
(r'(library)(\s+)([a-z_]\w*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Whitespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*\.)(all)',
bygroups(Keyword, Whitespace, Name.Namespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(std|ieee)(\.[a-z_]\w*)',
bygroups(Name.Namespace, Name.Namespace)),
(words(('std', 'ieee', 'work'), suffix=r'\b'),
Name.Namespace),
(r'(entity|component)(\s+)([a-z_]\w*)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword, Whitespace,
Name.Class, Whitespace, Keyword)),
(r'([a-z_]\w*)(:)(\s+)(process|for)',
bygroups(Name.Class, Operator, Whitespace, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Whitespace), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-z_]\w*', Name),
],
'endblock': [
include('keywords'),
(r'[a-z_]\w*', Name.Class),
(r'\s+', Whitespace),
(r';', Punctuation, '#pop'),
],
'types': [
(words((
'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
'delay_length', 'natural', 'positive', 'string', 'bit_vector',
'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'),
Keyword.Type),
],
'keywords': [
(words((
'abs', 'access', 'after', 'alias', 'all', 'and',
'architecture', 'array', 'assert', 'attribute', 'begin', 'block',
'body', 'buffer', 'bus', 'case', 'component', 'configuration',
'constant', 'disconnect', 'downto', 'else', 'elsif', 'end',
'entity', 'exit', 'file', 'for', 'function', 'generate',
'generic', 'group', 'guarded', 'if', 'impure', 'in',
'inertial', 'inout', 'is', 'label', 'library', 'linkage',
'literal', 'loop', 'map', 'mod', 'nand', 'new',
'next', 'nor', 'not', 'null', 'of', 'on',
'open', 'or', 'others', 'out', 'package', 'port',
'postponed', 'procedure', 'process', 'pure', 'range', 'record',
'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select',
'severity', 'signal', 'shared', 'sla', 'sll', 'sra',
'srl', 'subtype', 'then', 'to', 'transport', 'type',
'units', 'until', 'use', 'variable', 'wait', 'when',
'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-f_]+#?', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float),
(r'X"[0-9a-f_]+"', Number.Hex),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[01_]+"', Number.Bin),
],
}
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.videointelligence_v1.types import video_intelligence
from .transports.base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import VideoIntelligenceServiceGrpcTransport
from .transports.grpc_asyncio import VideoIntelligenceServiceGrpcAsyncIOTransport
class VideoIntelligenceServiceClientMeta(type):
"""Metaclass for the VideoIntelligenceService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[VideoIntelligenceServiceTransport]]
_transport_registry["grpc"] = VideoIntelligenceServiceGrpcTransport
_transport_registry["grpc_asyncio"] = VideoIntelligenceServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class VideoIntelligenceServiceClient(metaclass=VideoIntelligenceServiceClientMeta):
"""Service that implements the Video Intelligence API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "videointelligence.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> VideoIntelligenceServiceTransport:
"""Returns the transport used by the client instance.
Returns:
VideoIntelligenceServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, VideoIntelligenceServiceTransport):
# transport is a VideoIntelligenceServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def annotate_video(
self,
request: Union[video_intelligence.AnnotateVideoRequest, dict] = None,
*,
input_uri: str = None,
features: Sequence[video_intelligence.Feature] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
.. code-block:: python
from google.cloud import videointelligence_v1
def sample_annotate_video():
# Create a client
client = videointelligence_v1.VideoIntelligenceServiceClient()
# Initialize request argument(s)
request = videointelligence_v1.AnnotateVideoRequest(
features="PERSON_DETECTION",
)
# Make the request
operation = client.annotate_video(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.videointelligence_v1.types.AnnotateVideoRequest, dict]):
The request object. Video annotation request.
input_uri (str):
Input video location. Currently, only `Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are
supported. URIs must be specified in the following
format: ``gs://bucket-id/object-id`` (other URI formats
return
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
For more information, see `Request
URIs <https://cloud.google.com/storage/docs/request-endpoints>`__.
To identify multiple videos, a video URI may include
wildcards in the ``object-id``. Supported wildcards: '*'
to match 0 or more characters; '?' to match 1 character.
If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content``
must be unset.
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
features (Sequence[google.cloud.videointelligence_v1.types.Feature]):
Required. Requested video annotation
features.
This corresponds to the ``features`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.videointelligence_v1.types.AnnotateVideoResponse` Video annotation response. Included in the response
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([input_uri, features])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a video_intelligence.AnnotateVideoRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, video_intelligence.AnnotateVideoRequest):
request = video_intelligence.AnnotateVideoRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if input_uri is not None:
request.input_uri = input_uri
if features is not None:
request.features = features
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.annotate_video]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
video_intelligence.AnnotateVideoResponse,
metadata_type=video_intelligence.AnnotateVideoProgress,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-videointelligence",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("VideoIntelligenceServiceClient",)
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'ActiveNodes.ActiveNode.Ltrace.AllocationParams' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.Ltrace.AllocationParams',
False,
[
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'InfraLtraceModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_ltrace_cfg', 'InfraLtraceModeEnum',
[], [],
''' Select an allocation mode (static:1, dynamic
:2)
''',
'mode',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
_MetaInfoClassMember('scale-factor', REFERENCE_ENUM_CLASS, 'InfraLtraceScaleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_ltrace_cfg', 'InfraLtraceScaleEnum',
[], [],
''' Select a scaling down factor
''',
'scale_factor',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
],
'Cisco-IOS-XR-infra-ltrace-cfg',
'allocation-params',
_yang_ns._namespaces['Cisco-IOS-XR-infra-ltrace-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.Ltrace' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.Ltrace',
False,
[
_MetaInfoClassMember('allocation-params', REFERENCE_CLASS, 'AllocationParams' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.Ltrace.AllocationParams',
[], [],
''' Select Ltrace mode and scale-factor
''',
'allocation_params',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
],
'Cisco-IOS-XR-infra-ltrace-cfg',
'ltrace',
_yang_ns._namespaces['Cisco-IOS-XR-infra-ltrace-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np',
False,
[
_MetaInfoClassMember('id1', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' none
''',
'id1',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', True),
_MetaInfoClassMember('rate', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Packets per second
''',
'rate',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'np',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps',
False,
[
_MetaInfoClassMember('np', REFERENCE_LIST, 'Np' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np',
[], [],
''' Table of NP names
''',
'np',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'nps',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable',
False,
[
_MetaInfoClassMember('id1', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' none
''',
'id1',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', True),
_MetaInfoClassMember('nps', REFERENCE_CLASS, 'Nps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps',
[], [],
''' NP name
''',
'nps',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'ipolicer-local-table',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables',
False,
[
_MetaInfoClassMember('ipolicer-local-table', REFERENCE_LIST, 'IpolicerLocalTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable',
[], [],
''' Pre IFIB (Internal Forwarding Information
Base) configuration table
''',
'ipolicer_local_table',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'ipolicer-local-tables',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences',
False,
[
_MetaInfoClassMember('precedence', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Precedence values
''',
'precedence',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False, [
_MetaInfoClassMember('precedence', REFERENCE_LEAFLIST, 'LptsPreIFibPrecedenceNumberEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg', 'LptsPreIFibPrecedenceNumberEnum',
[], [],
''' Precedence values
''',
'precedence',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False, max_elements=8),
_MetaInfoClassMember('precedence', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '7')], [],
''' Precedence values
''',
'precedence',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False, max_elements=8),
], max_elements=8),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'precedences',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow',
False,
[
_MetaInfoClassMember('flow-type', REFERENCE_ENUM_CLASS, 'LptsFlowEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg', 'LptsFlowEnum',
[], [],
''' LPTS Flow Type
''',
'flow_type',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', True),
_MetaInfoClassMember('precedences', REFERENCE_CLASS, 'Precedences' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences',
[], [],
''' TOS Precedence value(s)
''',
'precedences',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('rate', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Configured rate value
''',
'rate',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'flow',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows',
False,
[
_MetaInfoClassMember('flow', REFERENCE_LIST, 'Flow' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow',
[], [],
''' selected flow type
''',
'flow',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'flows',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enabled
''',
'enable',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('flows', REFERENCE_CLASS, 'Flows' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows',
[], [],
''' Table for Flows
''',
'flows',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'ipolicer-local',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.LptsLocal' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.LptsLocal',
False,
[
_MetaInfoClassMember('ipolicer-local', REFERENCE_CLASS, 'IpolicerLocal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal',
[], [],
''' Node specific Pre IFIB (Internal Forwarding
Information Base) Configuration
''',
'ipolicer_local',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('ipolicer-local-tables', REFERENCE_CLASS, 'IpolicerLocalTables' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables',
[], [],
''' Node specific Pre IFIB (Internal Forwarding
Information Base) Configuration
''',
'ipolicer_local_tables',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'lpts-local',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.SsrpGroup.Groups.Group' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.SsrpGroup.Groups.Group',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The identifier for this group
''',
'group_id',
'Cisco-IOS-XR-ppp-ma-ssrp-cfg', True),
_MetaInfoClassMember('profile', ATTRIBUTE, 'str' , None, None,
[], [],
''' This specifies the SSRP profile to use for
this group
''',
'profile',
'Cisco-IOS-XR-ppp-ma-ssrp-cfg', False),
],
'Cisco-IOS-XR-ppp-ma-ssrp-cfg',
'group',
_yang_ns._namespaces['Cisco-IOS-XR-ppp-ma-ssrp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.SsrpGroup.Groups' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.SsrpGroup.Groups',
False,
[
_MetaInfoClassMember('group', REFERENCE_LIST, 'Group' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.SsrpGroup.Groups.Group',
[], [],
''' SSRP Group configuration
''',
'group',
'Cisco-IOS-XR-ppp-ma-ssrp-cfg', False),
],
'Cisco-IOS-XR-ppp-ma-ssrp-cfg',
'groups',
_yang_ns._namespaces['Cisco-IOS-XR-ppp-ma-ssrp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.SsrpGroup' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.SsrpGroup',
False,
[
_MetaInfoClassMember('groups', REFERENCE_CLASS, 'Groups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.SsrpGroup.Groups',
[], [],
''' Table of SSRP Group configuration
''',
'groups',
'Cisco-IOS-XR-ppp-ma-ssrp-cfg', False),
],
'Cisco-IOS-XR-ppp-ma-ssrp-cfg',
'ssrp-group',
_yang_ns._namespaces['Cisco-IOS-XR-ppp-ma-ssrp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold',
False,
[
_MetaInfoClassMember('critical', ATTRIBUTE, 'int' , None, None,
[('3', '40')], [],
''' Threshold, Range(3, severe)
''',
'critical',
'Cisco-IOS-XR-watchd-cfg', False),
_MetaInfoClassMember('minor', ATTRIBUTE, 'int' , None, None,
[('5', '40')], [],
''' Threshold, Range(5, 40)
''',
'minor',
'Cisco-IOS-XR-watchd-cfg', False),
_MetaInfoClassMember('severe', ATTRIBUTE, 'int' , None, None,
[('4', '40')], [],
''' Threshold, Range(4, minor)
''',
'severe',
'Cisco-IOS-XR-watchd-cfg', False),
],
'Cisco-IOS-XR-watchd-cfg',
'memory-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-watchd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold',
False,
[
_MetaInfoClassMember('memory-threshold', REFERENCE_CLASS, 'MemoryThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold',
[], [],
''' Memory thresholds
''',
'memory_threshold',
'Cisco-IOS-XR-watchd-cfg', False),
],
'Cisco-IOS-XR-watchd-cfg',
'Cisco-IOS-XR-watchd-cfg_watchdog-node-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-watchd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold',
False,
[
_MetaInfoClassMember('critical', ATTRIBUTE, 'int' , None, None,
[('3', '40')], [],
''' Threshold, Range(3, severe)
''',
'critical',
'Cisco-IOS-XR-wd-cfg', False),
_MetaInfoClassMember('minor', ATTRIBUTE, 'int' , None, None,
[('5', '40')], [],
''' Threshold, Range(5, 40)
''',
'minor',
'Cisco-IOS-XR-wd-cfg', False),
_MetaInfoClassMember('severe', ATTRIBUTE, 'int' , None, None,
[('4', '40')], [],
''' Threshold, Range(4, minor)
''',
'severe',
'Cisco-IOS-XR-wd-cfg', False),
],
'Cisco-IOS-XR-wd-cfg',
'memory-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-wd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold',
False,
[
_MetaInfoClassMember('memory-threshold', REFERENCE_CLASS, 'MemoryThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold',
[], [],
''' Memory thresholds
''',
'memory_threshold',
'Cisco-IOS-XR-wd-cfg', False),
],
'Cisco-IOS-XR-wd-cfg',
'Cisco-IOS-XR-wd-cfg_watchdog-node-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-wd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes.ActiveNode' : {
'meta_info' : _MetaInfoClass('ActiveNodes.ActiveNode',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [b'([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The identifier for this node
''',
'node_name',
'Cisco-IOS-XR-config-mda-cfg', True),
_MetaInfoClassMember('Cisco-IOS-XR-watchd-cfg_watchdog-node-threshold', REFERENCE_CLASS, 'CiscoIosXrWatchdCfg_WatchdogNodeThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold',
[], [],
''' watchdog node threshold
''',
'cisco_ios_xr_watchd_cfg_watchdog_node_threshold',
'Cisco-IOS-XR-watchd-cfg', False),
_MetaInfoClassMember('Cisco-IOS-XR-wd-cfg_watchdog-node-threshold', REFERENCE_CLASS, 'CiscoIosXrWdCfg_WatchdogNodeThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold',
[], [],
''' Watchdog threshold configuration
''',
'cisco_ios_xr_wd_cfg_watchdog_node_threshold',
'Cisco-IOS-XR-wd-cfg', False),
_MetaInfoClassMember('lpts-local', REFERENCE_CLASS, 'LptsLocal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.LptsLocal',
[], [],
''' lpts node specific configuration commands
''',
'lpts_local',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('ltrace', REFERENCE_CLASS, 'Ltrace' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.Ltrace',
[], [],
''' Ltrace Memory configuration
''',
'ltrace',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
_MetaInfoClassMember('ssrp-group', REFERENCE_CLASS, 'SsrpGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode.SsrpGroup',
[], [],
''' Per-node SSRP configuration data
''',
'ssrp_group',
'Cisco-IOS-XR-ppp-ma-ssrp-cfg', False),
],
'Cisco-IOS-XR-config-mda-cfg',
'active-node',
_yang_ns._namespaces['Cisco-IOS-XR-config-mda-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'ActiveNodes' : {
'meta_info' : _MetaInfoClass('ActiveNodes',
False,
[
_MetaInfoClassMember('active-node', REFERENCE_LIST, 'ActiveNode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'ActiveNodes.ActiveNode',
[], [],
''' The configuration for an active node
''',
'active_node',
'Cisco-IOS-XR-config-mda-cfg', False),
],
'Cisco-IOS-XR-config-mda-cfg',
'active-nodes',
_yang_ns._namespaces['Cisco-IOS-XR-config-mda-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.Ltrace.AllocationParams' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.Ltrace.AllocationParams',
False,
[
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'InfraLtraceModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_ltrace_cfg', 'InfraLtraceModeEnum',
[], [],
''' Select an allocation mode (static:1, dynamic
:2)
''',
'mode',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
_MetaInfoClassMember('scale-factor', REFERENCE_ENUM_CLASS, 'InfraLtraceScaleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_ltrace_cfg', 'InfraLtraceScaleEnum',
[], [],
''' Select a scaling down factor
''',
'scale_factor',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
],
'Cisco-IOS-XR-infra-ltrace-cfg',
'allocation-params',
_yang_ns._namespaces['Cisco-IOS-XR-infra-ltrace-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.Ltrace' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.Ltrace',
False,
[
_MetaInfoClassMember('allocation-params', REFERENCE_CLASS, 'AllocationParams' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.Ltrace.AllocationParams',
[], [],
''' Select Ltrace mode and scale-factor
''',
'allocation_params',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
],
'Cisco-IOS-XR-infra-ltrace-cfg',
'ltrace',
_yang_ns._namespaces['Cisco-IOS-XR-infra-ltrace-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np',
False,
[
_MetaInfoClassMember('id1', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' none
''',
'id1',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', True),
_MetaInfoClassMember('rate', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Packets per second
''',
'rate',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'np',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps',
False,
[
_MetaInfoClassMember('np', REFERENCE_LIST, 'Np' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np',
[], [],
''' Table of NP names
''',
'np',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'nps',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable',
False,
[
_MetaInfoClassMember('id1', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' none
''',
'id1',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', True),
_MetaInfoClassMember('nps', REFERENCE_CLASS, 'Nps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps',
[], [],
''' NP name
''',
'nps',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'ipolicer-local-table',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables',
False,
[
_MetaInfoClassMember('ipolicer-local-table', REFERENCE_LIST, 'IpolicerLocalTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable',
[], [],
''' Pre IFIB (Internal Forwarding Information
Base) configuration table
''',
'ipolicer_local_table',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'ipolicer-local-tables',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences',
False,
[
_MetaInfoClassMember('precedence', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Precedence values
''',
'precedence',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False, [
_MetaInfoClassMember('precedence', REFERENCE_LEAFLIST, 'LptsPreIFibPrecedenceNumberEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg', 'LptsPreIFibPrecedenceNumberEnum',
[], [],
''' Precedence values
''',
'precedence',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False, max_elements=8),
_MetaInfoClassMember('precedence', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '7')], [],
''' Precedence values
''',
'precedence',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False, max_elements=8),
], max_elements=8),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'precedences',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow',
False,
[
_MetaInfoClassMember('flow-type', REFERENCE_ENUM_CLASS, 'LptsFlowEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_pre_ifib_cfg', 'LptsFlowEnum',
[], [],
''' LPTS Flow Type
''',
'flow_type',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', True),
_MetaInfoClassMember('precedences', REFERENCE_CLASS, 'Precedences' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences',
[], [],
''' TOS Precedence value(s)
''',
'precedences',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('rate', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Configured rate value
''',
'rate',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'flow',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows',
False,
[
_MetaInfoClassMember('flow', REFERENCE_LIST, 'Flow' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow',
[], [],
''' selected flow type
''',
'flow',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'flows',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enabled
''',
'enable',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('flows', REFERENCE_CLASS, 'Flows' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows',
[], [],
''' Table for Flows
''',
'flows',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'ipolicer-local',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.LptsLocal' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.LptsLocal',
False,
[
_MetaInfoClassMember('ipolicer-local', REFERENCE_CLASS, 'IpolicerLocal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal',
[], [],
''' Node specific Pre IFIB (Internal Forwarding
Information Base) Configuration
''',
'ipolicer_local',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('ipolicer-local-tables', REFERENCE_CLASS, 'IpolicerLocalTables' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables',
[], [],
''' Node specific Pre IFIB (Internal Forwarding
Information Base) Configuration
''',
'ipolicer_local_tables',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
],
'Cisco-IOS-XR-lpts-pre-ifib-cfg',
'lpts-local',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-pre-ifib-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold',
False,
[
_MetaInfoClassMember('critical', ATTRIBUTE, 'int' , None, None,
[('3', '40')], [],
''' Threshold, Range(3, severe)
''',
'critical',
'Cisco-IOS-XR-watchd-cfg', False),
_MetaInfoClassMember('minor', ATTRIBUTE, 'int' , None, None,
[('5', '40')], [],
''' Threshold, Range(5, 40)
''',
'minor',
'Cisco-IOS-XR-watchd-cfg', False),
_MetaInfoClassMember('severe', ATTRIBUTE, 'int' , None, None,
[('4', '40')], [],
''' Threshold, Range(4, minor)
''',
'severe',
'Cisco-IOS-XR-watchd-cfg', False),
],
'Cisco-IOS-XR-watchd-cfg',
'memory-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-watchd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold',
False,
[
_MetaInfoClassMember('memory-threshold', REFERENCE_CLASS, 'MemoryThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold',
[], [],
''' Memory thresholds
''',
'memory_threshold',
'Cisco-IOS-XR-watchd-cfg', False),
],
'Cisco-IOS-XR-watchd-cfg',
'Cisco-IOS-XR-watchd-cfg_watchdog-node-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-watchd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold',
False,
[
_MetaInfoClassMember('critical', ATTRIBUTE, 'int' , None, None,
[('3', '40')], [],
''' Threshold, Range(3, severe)
''',
'critical',
'Cisco-IOS-XR-wd-cfg', False),
_MetaInfoClassMember('minor', ATTRIBUTE, 'int' , None, None,
[('5', '40')], [],
''' Threshold, Range(5, 40)
''',
'minor',
'Cisco-IOS-XR-wd-cfg', False),
_MetaInfoClassMember('severe', ATTRIBUTE, 'int' , None, None,
[('4', '40')], [],
''' Threshold, Range(4, minor)
''',
'severe',
'Cisco-IOS-XR-wd-cfg', False),
],
'Cisco-IOS-XR-wd-cfg',
'memory-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-wd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold',
False,
[
_MetaInfoClassMember('memory-threshold', REFERENCE_CLASS, 'MemoryThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold',
[], [],
''' Memory thresholds
''',
'memory_threshold',
'Cisco-IOS-XR-wd-cfg', False),
],
'Cisco-IOS-XR-wd-cfg',
'Cisco-IOS-XR-wd-cfg_watchdog-node-threshold',
_yang_ns._namespaces['Cisco-IOS-XR-wd-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes.PreconfiguredNode' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes.PreconfiguredNode',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [b'([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The identifier for this node
''',
'node_name',
'Cisco-IOS-XR-config-mda-cfg', True),
_MetaInfoClassMember('Cisco-IOS-XR-watchd-cfg_watchdog-node-threshold', REFERENCE_CLASS, 'CiscoIosXrWatchdCfg_WatchdogNodeThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold',
[], [],
''' watchdog node threshold
''',
'cisco_ios_xr_watchd_cfg_watchdog_node_threshold',
'Cisco-IOS-XR-watchd-cfg', False),
_MetaInfoClassMember('Cisco-IOS-XR-wd-cfg_watchdog-node-threshold', REFERENCE_CLASS, 'CiscoIosXrWdCfg_WatchdogNodeThreshold' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold',
[], [],
''' Watchdog threshold configuration
''',
'cisco_ios_xr_wd_cfg_watchdog_node_threshold',
'Cisco-IOS-XR-wd-cfg', False),
_MetaInfoClassMember('lpts-local', REFERENCE_CLASS, 'LptsLocal' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.LptsLocal',
[], [],
''' lpts node specific configuration commands
''',
'lpts_local',
'Cisco-IOS-XR-lpts-pre-ifib-cfg', False),
_MetaInfoClassMember('ltrace', REFERENCE_CLASS, 'Ltrace' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode.Ltrace',
[], [],
''' Ltrace Memory configuration
''',
'ltrace',
'Cisco-IOS-XR-infra-ltrace-cfg', False),
],
'Cisco-IOS-XR-config-mda-cfg',
'preconfigured-node',
_yang_ns._namespaces['Cisco-IOS-XR-config-mda-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
'PreconfiguredNodes' : {
'meta_info' : _MetaInfoClass('PreconfiguredNodes',
False,
[
_MetaInfoClassMember('preconfigured-node', REFERENCE_LIST, 'PreconfiguredNode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg', 'PreconfiguredNodes.PreconfiguredNode',
[], [],
''' The configuration for a non-active node
''',
'preconfigured_node',
'Cisco-IOS-XR-config-mda-cfg', False),
],
'Cisco-IOS-XR-config-mda-cfg',
'preconfigured-nodes',
_yang_ns._namespaces['Cisco-IOS-XR-config-mda-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_mda_cfg'
),
},
}
_meta_table['ActiveNodes.ActiveNode.Ltrace.AllocationParams']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.Ltrace']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows.Flow']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal.Flows']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocalTables']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal.IpolicerLocal']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.LptsLocal']['meta_info']
_meta_table['ActiveNodes.ActiveNode.SsrpGroup.Groups.Group']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.SsrpGroup.Groups']['meta_info']
_meta_table['ActiveNodes.ActiveNode.SsrpGroup.Groups']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.SsrpGroup']['meta_info']
_meta_table['ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold']['meta_info']
_meta_table['ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold']['meta_info']
_meta_table['ActiveNodes.ActiveNode.Ltrace']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode']['meta_info']
_meta_table['ActiveNodes.ActiveNode.LptsLocal']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode']['meta_info']
_meta_table['ActiveNodes.ActiveNode.SsrpGroup']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode']['meta_info']
_meta_table['ActiveNodes.ActiveNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode']['meta_info']
_meta_table['ActiveNodes.ActiveNode.CiscoIosXrWdCfg_WatchdogNodeThreshold']['meta_info'].parent =_meta_table['ActiveNodes.ActiveNode']['meta_info']
_meta_table['ActiveNodes.ActiveNode']['meta_info'].parent =_meta_table['ActiveNodes']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.Ltrace.AllocationParams']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.Ltrace']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps.Np']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable.Nps']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables.IpolicerLocalTable']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow.Precedences']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows.Flow']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal.Flows']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocalTables']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal.IpolicerLocal']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold.MemoryThreshold']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold.MemoryThreshold']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.Ltrace']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.LptsLocal']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWatchdCfg_WatchdogNodeThreshold']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode.CiscoIosXrWdCfg_WatchdogNodeThreshold']['meta_info'].parent =_meta_table['PreconfiguredNodes.PreconfiguredNode']['meta_info']
_meta_table['PreconfiguredNodes.PreconfiguredNode']['meta_info'].parent =_meta_table['PreconfiguredNodes']['meta_info']
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import abc
import functools
import six
from six.moves import urllib
from keystoneclient import auth
from keystoneclient import exceptions
from keystoneclient.i18n import _
from keystoneclient.openstack.common.apiclient import base
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
def filter_none(**kwargs):
"""Remove any entries from a dictionary where the value is None."""
return dict((k, v) for k, v in six.iteritems(kwargs) if v is not None)
def filter_kwargs(f):
@functools.wraps(f)
def func(*args, **kwargs):
new_kwargs = {}
for key, ref in six.iteritems(kwargs):
if ref is None:
# drop null values
continue
id_value = getid(ref)
if id_value != ref:
# If an object with an id was passed, then use the id, e.g.:
# user: user(id=1) becomes user_id: 1
key = '%s_id' % key
new_kwargs[key] = id_value
return f(*args, **new_kwargs)
return func
class Manager(object):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
:param client: instance of BaseClient descendant for HTTP requests
"""
resource_class = None
def __init__(self, client):
super(Manager, self).__init__()
self.client = client
@property
def api(self):
"""Deprecated. Use `client` instead.
"""
return self.client
def _list(self, url, response_key, obj_class=None, body=None, **kwargs):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param kwargs: Additional arguments will be passed to the request.
"""
if body:
resp, body = self.client.post(url, body=body, **kwargs)
else:
resp, body = self.client.get(url, **kwargs)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key, **kwargs):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.get(url, **kwargs)
return self.resource_class(self, body[response_key], loaded=True)
def _head(self, url, **kwargs):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.head(url, **kwargs)
return resp.status_code == 204
def _create(self, url, body, response_key, return_raw=False, **kwargs):
"""Deprecated. Use `_post` instead.
"""
return self._post(url, body, response_key, return_raw, **kwargs)
def _post(self, url, body, response_key, return_raw=False, **kwargs):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.post(url, body=body, **kwargs)
if return_raw:
return body[response_key]
return self.resource_class(self, body[response_key])
def _put(self, url, body=None, response_key=None, **kwargs):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.put(url, body=body, **kwargs)
# PUT requests may not return a body
if body is not None:
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, body=None, response_key=None, **kwargs):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.patch(url, body=body, **kwargs)
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url, **kwargs):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
:param kwargs: Additional arguments will be passed to the request.
"""
return self.client.delete(url, **kwargs)
def _update(self, url, body=None, response_key=None, method="PUT",
**kwargs):
methods = {"PUT": self.client.put,
"POST": self.client.post,
"PATCH": self.client.patch}
try:
resp, body = methods[method](url, body=body,
**kwargs)
except KeyError:
raise exceptions.ClientException(_("Invalid update method: %s")
% method)
# PUT requests may not return a body
if body:
return self.resource_class(self, body[response_key])
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(Manager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
rl = self.findall(**kwargs)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(kwargs)s.") % {
'name': self.resource_class.__name__, 'kwargs': kwargs}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(Manager):
"""Base manager class for manipulating Keystone entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
base_url = None
def build_url(self, dict_args_in_out=None):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
If a `base_url` is provided, the generated URL will be appended to it.
"""
if dict_args_in_out is None:
dict_args_in_out = {}
url = dict_args_in_out.pop('base_url', None) or self.base_url or ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = dict_args_in_out.pop('%s_id' % self.key, None)
if entity_id is not None:
url += '/%s' % entity_id
return url
@filter_kwargs
def create(self, **kwargs):
url = self.build_url(dict_args_in_out=kwargs)
return self._create(
url,
{self.key: kwargs},
self.key)
@filter_kwargs
def get(self, **kwargs):
return self._get(
self.build_url(dict_args_in_out=kwargs),
self.key)
@filter_kwargs
def head(self, **kwargs):
return self._head(self.build_url(dict_args_in_out=kwargs))
def _build_query(self, params):
return '?%s' % urllib.parse.urlencode(params) if params else ''
def build_key_only_query(self, params_list):
"""Builds a query that does not include values, just keys.
The Identity API has some calls that define queries without values,
this can not be accomplished by using urllib.parse.urlencode(). This
method builds a query using only the keys.
"""
return '?%s' % '&'.join(params_list) if params_list else ''
@filter_kwargs
def list(self, fallback_to_auth=False, **kwargs):
url = self.build_url(dict_args_in_out=kwargs)
try:
query = self._build_query(kwargs)
url_query = '%(url)s%(query)s' % {'url': url, 'query': query}
return self._list(
url_query,
self.collection_key)
except exceptions.EmptyCatalog:
if fallback_to_auth:
return self._list(
url_query,
self.collection_key,
endpoint_filter={'interface': auth.AUTH_INTERFACE})
else:
raise
@filter_kwargs
def put(self, **kwargs):
return self._update(
self.build_url(dict_args_in_out=kwargs),
method='PUT')
@filter_kwargs
def update(self, **kwargs):
url = self.build_url(dict_args_in_out=kwargs)
return self._update(
url,
{self.key: kwargs},
self.key,
method='PATCH')
@filter_kwargs
def delete(self, **kwargs):
return self._delete(
self.build_url(dict_args_in_out=kwargs))
@filter_kwargs
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``."""
url = self.build_url(dict_args_in_out=kwargs)
query = self._build_query(kwargs)
rl = self._list(
'%(url)s%(query)s' % {
'url': url,
'query': query,
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(kwargs)s.") % {
'name': self.resource_class.__name__, 'kwargs': kwargs}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Resource(base.Resource):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
def delete(self):
return self.manager.delete(self)
|
|
import os
from django.contrib import messages
import traceback
from django.contrib.auth.views import login
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.utils.importlib import import_module
from limbo import exceptions
from django.views.static import serve
from limbo.paths import request_full_url
from limbo.strings import unslugify
from django.utils.translation import ugettext as _
from django.core.urlresolvers import Resolver404, reverse
import logging
from django.conf import settings
urlconf = settings.ROOT_URLCONF
urls = import_module(settings.ROOT_URLCONF)
log = logging.getLogger(__file__)
class LoginMiddleware:
def process_view(self, request, view_func, view_args, view_kwargs):
if any([
view_kwargs.pop('public', False),
request.user.is_authenticated(),
view_func == serve,
view_func == login,
request.path.startswith(settings.MEDIA_URL),
os.path.splitext(request.path)[-1].lower() in ('.ico', '.png', '.jpg', '.gif')
]):
return
if request.method == 'POST':
response = login(request)
if request.user.is_authenticated():
return HttpResponseRedirect(request.path)
return response
else:
return login(request)
class ExceptionsMiddleware(object):
def process_exception(self, request, exception):
if isinstance(exception, exceptions.ReloadRequest):
exception.path = request_full_url(request)
return exception.response
elif isinstance(exception, exceptions.SecurityError):
messages.debug(request, traceback.format_exc())
exception.log(request)
new_e = exceptions.RedirectRequest('/')
return new_e.response
# TODO: Redirect to a 403 forbidden page with full content
elif isinstance(exception, exceptions.MiddlewareException) and \
'response' in dir(exception):
return exception.response
class PaginationMiddleware(object):
def process_request(self, request):
try:
request.paginate_by = min(int(request.GET.get('paginate_by', 100)), 100)
except ValueError:
request.paginate_by = 100
class RequestMiddleware:
def process_request(self, request):
"""
Puts is_post, is_get, post_data, get_data and file_data on the request object
"""
request.is_get = self.is_get(request)
request.is_post = self.is_post(request)
request.post_data = self.post_data(request)
request.get_data = self.get_data(request)
request.file_data = self.file_data(request)
request.urls = self.default_urls()
def post_data_prefix(prefix):
data = request.post_data
if not data:
return None
for key in data.keys():
if key.startswith(prefix):
return data
request.post_data_prefix = post_data_prefix
def is_post(self, request):
return request.method in ('POST', 'PUT')
def is_get(self, request):
return request.method == 'GET'
def post_data(self, request, key = None):
""" Returns the POST dictionary object if the request is of method POST, else None """
if self.is_post(request):
if not key:
return request.POST
else:
if request.POST.has_key(key):
return request.POST
return None
def file_data(self, request):
""" If request is of method POST, returns request.FILES """
if self.is_post(request):
return request.FILES
return None
def get_data(self, request):
if self.is_get(request) and len(request.GET.keys()) > 0:
return request.GET
return None
def default_urls(self):
urls = {
"random_string":reverse('limbo:random_string'),
"message_sync":reverse('limbo:message_sync'),
"js_errors":reverse('limbo:js_errors'),
}
return urls
class Pages:
def __init__(self, request, view_func, view_args, view_kwargs):
self.request = request
self.view_func = view_func
self.view_args = view_args
self.view_kwargs = view_kwargs
if self.is_static_media():
return
self.parse_page()
self.parse_breadcrumbs()
request.relative_path = self.relative_path
def relative_path(self, offset = 0):
offset = abs(offset)
path = self.request.path
ews = path.endswith('/')
if ews:
path = path[:-1]
parts = path.split('/')
if len(parts) < offset:
return '/'
rpath = parts[:-offset]
if ews:
rpath += ['']
return '/'.join(rpath)
def is_static_media(self):
media_root = settings.MEDIA_ROOT[1:]
path = self.request.path[1:]
return path.startswith(media_root)
def parse_breadcrumbs(self):
if not hasattr(self.request, 'breadcrumbs'):
return
self.parse_display()
history = []
for part in self.request.path.split('/'):
if not part:
continue
history.append(part)
url = '/'.join(history + [""])
for pattern in urls.urlpatterns:
try:
resolved = pattern.resolve(url)
if resolved:
view, arts, kwargs = resolved
display = kwargs.get('dislpay', self.get_url_display(url, kwargs))
self.request.breadcrumbs(_(display), '/' + url)
except Resolver404:
pass
except Exception:
log.error(traceback.format_exc())
def parse_page(self):
self.page = self.view_kwargs.pop('page', None)
def parse_display(self):
self.display = self.view_kwargs.pop('display', self.get_url_display())
def get_url_display(self, path = None, kwargs = None):
if path is None:
path = self.request.path
if kwargs is None:
kwargs = self.view_kwargs
parts = path.split('/')
try:
new_path = parts[-1]
if not new_path:
new_path = parts[-2]
return unslugify(new_path).title()
except IndexError:
return ""
class PageMiddleware:
def process_view(self, request, view_func, view_args, view_kwargs):
request.pages = Pages(request, view_func, view_args, view_kwargs)
try:
from debug_toolbar.middleware import DebugToolbarMiddleware
except:
DebugToolbarMiddleware = object
class AdminDebugToolbarMiddleware(DebugToolbarMiddleware):
""" All superusers see debug toolbar """
def _show_toolbar(self, request):
if request.user.is_superuser:
return True
else:
return super(AdminDebugToolbarMiddleware, self)._show_toolbar(request)
|
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.utils.encoding import force_str
from django.utils.six import StringIO
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
if hasattr(out, 'isatty') and out.isatty():
self.style_func = style_func
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = [f for f in (style_func, self.style_func, lambda x:x)
if f is not None][0]
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of being
forcibly set to 'en-us'.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrim.auth
permissions) as making the locale differ from the de facto default
'en-us' might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to set the locale needs access to
settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Raise on exception'),
make_option('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output."),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
try:
self.execute(*args, **options.__dict__)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# self.stderr is not guaranteed to be set here
stderr = getattr(self, 'stderr', OutputWrapper(sys.stderr, self.style.ERROR))
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``, except if force-skipped).
"""
self.stdout = OutputWrapper(options.get('stdout', sys.stdout))
if options.get('no_color'):
self.style = no_style()
self.stderr = OutputWrapper(options.get('stderr', sys.stderr))
else:
self.stderr = OutputWrapper(options.get('stderr', sys.stderr), self.style.ERROR)
if self.can_import_settings:
from django.conf import settings
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Switch to US English, because django-admin.py creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.activate('en-us')
try:
if self.requires_model_validation and not options.get('skip_validation'):
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;"))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found" % (num_errors, '' if num_errors == 1 else 's'))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError('subclasses of AppCommand must provide a handle_app() method')
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import re
import sys
from collections import namedtuple
from operator import attrgetter
import enum
import six
from docker.errors import APIError
from docker.utils import LogConfig
from docker.utils.ports import build_port_bindings
from docker.utils.ports import split_port
from . import __version__
from .config import DOCKER_CONFIG_KEYS
from .config import merge_environment
from .config.validation import VALID_NAME_CHARS
from .const import DEFAULT_TIMEOUT
from .const import IS_WINDOWS_PLATFORM
from .const import LABEL_CONFIG_HASH
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_VERSION
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output
from .progress_stream import StreamOutputError
from .utils import json_hash
from .utils import parallel_execute
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'cgroup_parent',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'ipc',
'read_only',
'net',
'log_driver',
'log_opt',
'mem_limit',
'memswap_limit',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
class NoSuchImageError(Exception):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
VolumeFromSpec = namedtuple('VolumeFromSpec', 'source mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
@enum.unique
class ConvergenceStrategy(enum.Enum):
"""Enumeration for all possible convergence strategies. Values refer to
when containers should be recreated.
"""
changed = 1
always = 2
never = 3
@property
def allows_recreate(self):
return self is not type(self).never
class Service(object):
def __init__(
self,
name,
client=None,
project='default',
use_networking=False,
links=None,
volumes_from=None,
net=None,
**options
):
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
self.name = name
self.client = client
self.project = project
self.use_networking = use_networking
self.links = links or []
self.volumes_from = volumes_from or []
self.net = net or Net(None)
self.options = options
def containers(self, stopped=False, one_off=False, filters={}):
filters.update({'label': self.labels(one_off=one_off)})
containers = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters=filters)]))
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
# TODO: remove these functions, project takes care of starting/stopping,
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s" % c.name)
c.stop(**options)
def pause(self, **options):
for c in self.containers(filters={'status': 'running'}):
log.info("Pausing %s" % c.name)
c.pause(**options)
def unpause(self, **options):
for c in self.containers(filters={'status': 'paused'}):
log.info("Unpausing %s" % c.name)
c.unpause()
def kill(self, **options):
for c in self.containers():
log.info("Killing %s" % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s" % c.name)
c.restart(**options)
# end TODO
def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name() and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name()))
if self.specifies_host_port():
log.warn('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
if desired_num == num_running:
# do nothing as we already have the desired number
log.info('Desired container number already achieved')
return
if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
# we have some stopped containers, let's start them up again
stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))
num_stopped = len(stopped_containers)
if num_stopped + num_running > desired_num:
num_to_start = desired_num - num_running
containers_to_start = stopped_containers[:num_to_start]
else:
containers_to_start = stopped_containers
parallel_execute(
objects=containers_to_start,
obj_callable=lambda c: c.start(),
msg_index=lambda c: c.name,
msg="Starting"
)
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
self.remove_stopped()
def remove_stopped(self, **options):
containers = [c for c in self.containers(stopped=True) if not c.is_running]
parallel_execute(
objects=containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def create_container(self,
one_off=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s" % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
try:
self.image()
return
except NoSuchImageError:
pass
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull()
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self, strategy=ConvergenceStrategy.changed):
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if strategy is ConvergenceStrategy.never:
return ConvergencePlan('start', containers)
if (
strategy is ConvergenceStrategy.always or
self._containers_have_diverged(containers)
):
return ConvergencePlan('recreate', containers)
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
def _containers_have_diverged(self, containers):
config_hash = None
try:
config_hash = self.config_hash
except NoSuchImageError as e:
log.debug(
'Service %s has diverged: %s',
self.name, six.text_type(e),
)
return True
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s" % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s" % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
for c in self.duplicate_containers():
log.info('Removing %s' % c.name)
c.stop(timeout=timeout)
c.remove()
def duplicate_containers(self):
containers = sorted(
self.containers(stopped=True),
key=lambda c: c.get('Created'),
)
numbers = set()
for c in containers:
if c.number in numbers:
yield c
else:
numbers.add(c.number)
@property
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
'links': self.get_link_names(),
'net': self.net.id,
'volumes_from': self.get_volumes_from_names(),
}
def get_dependency_names(self):
net_name = self.net.service_name
return (self.get_linked_service_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_service_names(self):
return [service.name for (service, _) in self.links]
def get_link_names(self):
return [(service.name, alias) for service, alias in self.links]
def get_volumes_from_names(self):
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
])
numbers = [c.number for c in containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.options.get('external_links') or []:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_from_spec in self.volumes_from:
volumes = build_volume_from(volume_from_spec)
volumes_from.extend(volumes)
return volumes_from
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
if self.custom_container_name() and not one_off:
container_options['name'] = self.custom_container_name()
elif not container_options.get('name'):
container_options['name'] = self.get_container_name(number, one_off)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'hostname' not in container_options and self.use_networking:
container_options['hostname'] = self.name
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port_range in all_ports:
internal_range, _ = split_port(port_range)
for port in internal_range:
port = str(port)
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number,
self.config_hash if add_config_hash else None)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(
type=options.get('log_driver', ""),
config=options.get('log_opt', None)
)
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
cgroup_parent = options.get('cgroup_parent', None)
return self.client.create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self.net.mode,
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'),
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt,
ipc_mode=options.get('ipc'),
cgroup_parent=cgroup_parent
)
def build(self, no_cache=False, pull=False):
log.info('Building %s' % self.name)
path = self.options['build']
# python2 os.path() doesn't support unicode, so we need to encode it to
# a byte string
if not six.PY3:
path = path.encode('utf8')
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
pull=pull,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, six.text_type(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def custom_container_name(self):
return self.options.get('container_name')
def specifies_host_port(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return True
return False
def pull(self, ignore_pull_failures=False):
if 'image' not in self.options:
return
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
try:
stream_output(output, sys.stdout)
except StreamOutputError as e:
if not ignore_pull_failures:
raise
else:
log.error(six.text_type(e))
class Net(object):
"""A `standard` network mode (ex: host, bridge)"""
service_name = None
def __init__(self, net):
self.net = net
@property
def id(self):
return self.net
mode = id
class ContainerNet(object):
"""A network mode that uses a container's network stack."""
service_name = None
def __init__(self, container):
self.container = container
@property
def id(self):
return self.container.id
@property
def mode(self):
return 'container:' + self.container.id
class ServiceNet(object):
"""A network mode that uses a service's network stack."""
def __init__(self, service):
self.service = service
@property
def id(self):
return self.service.name
service_name = id
@property
def mode(self):
containers = self.service.containers()
if containers:
return 'container:' + containers[0].id
log.warn("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.id))
return None
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(repo_path):
"""Splits image identification into base image path, tag/digest
and it's separator.
Example:
>>> parse_repository_tag('user/repo@sha256:digest')
('user/repo', 'sha256:digest', '@')
>>> parse_repository_tag('user/repo:v1')
('user/repo', 'v1', ':')
"""
tag_separator = ":"
digest_separator = "@"
if digest_separator in repo_path:
repo, tag = repo_path.rsplit(digest_separator, 1)
return repo, tag, digest_separator
repo, tag = repo_path, ""
if tag_separator in repo_path:
repo, tag = repo_path.rsplit(tag_separator, 1)
if "/" in tag:
repo, tag = repo_path, ""
return repo, tag, tag_separator
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return list(volume_bindings.values())
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + list(image_volumes)):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def normalize_paths_for_engine(external_path, internal_path):
"""
Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
if IS_WINDOWS_PLATFORM:
if external_path:
drive, tail = os.path.splitdrive(external_path)
if drive:
reformatted_drive = "/{}".format(drive.replace(":", ""))
external_path = reformatted_drive + tail
external_path = "/".join(external_path.split("\\"))
return external_path, "/".join(internal_path.split("\\"))
else:
return external_path, internal_path
def parse_volume_spec(volume_config):
"""
Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM:
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
drive, tail = os.path.splitdrive(volume_config)
parts = tail.split(":")
if drive:
parts[0] = drive + parts[0]
else:
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external, internal = normalize_paths_for_engine(None, os.path.normpath(parts[0]))
else:
external, internal = normalize_paths_for_engine(os.path.normpath(parts[0]), os.path.normpath(parts[1]))
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return VolumeSpec(external, internal, mode)
def build_volume_from(volume_from_spec):
"""
volume_from can be either a service or a container. We want to return the
container.id and format it into a string complete with the mode.
"""
if isinstance(volume_from_spec.source, Service):
containers = volume_from_spec.source.containers(stopped=True)
if not containers:
return ["{}:{}".format(volume_from_spec.source.create_container().id, volume_from_spec.mode)]
container = containers[0]
return ["{}:{}".format(container.id, volume_from_spec.mode)]
elif isinstance(volume_from_spec.source, Container):
return ["{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)]
def parse_volume_from_spec(volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_from_config)
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
return VolumeFromSpec(source, mode)
# Labels
def build_container_labels(label_options, service_labels, number, config_hash):
labels = dict(label_options or {})
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
if config_hash:
log.debug("Added config hash: %s" % config_hash)
labels[LABEL_CONFIG_HASH] = config_hash
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
|
|
"""
For developing tabular api
"""
from os.path import join, isfile, dirname, realpath
import sys
import json
import requests
from gc_apps.geo_utils.msg_util import *
"""
Load up the server and username
"""
GEONODE_CREDS_FNAME = join(dirname(realpath(__file__)), 'server_creds.json')
assert isfile(GEONODE_CREDS_FNAME), 'Server credentials file not found: %s' % GEONODE_CREDS_FNAME
try:
GEONODE_CREDS_FNAME = json.loads(open(GEONODE_CREDS_FNAME, 'r').read())
except:
raise Exception('Could not parse tabular credentials JSON file: %s' % 'server_creds.json')
GEONODE_SERVER = GEONODD_CREDS_JSON['SERVER_URL']
GEONODE_USERNAME = GEONODD_CREDS_JSON['USERNAME']
GEONODE_PASSWORD = GEONODD_CREDS_JSON['PASSWORD']
INPUT_DIR = join('..', 'input')
class TabularTest:
def __init__(self, base_url=GEONODE_SERVER, username=GEONODE_USERNAME, pw=GEONODE_PASSWORD):
self.client = requests.session()
self.base_url = base_url
self.geonode_username = username
self.geonode_password = pw
#self.login_url = self.base_url + "/account/login/" # GeoNode
self.login_url = self.base_url + "/accounts/login/" # WorldMap
self.csv_upload_url = self.base_url + '/datatables/api/upload'
#self.shp_layer_upload_url = self.base_url + '/layers/upload'
self.join_datatable_url = self.base_url + '/datatables/api/join'
self.upload_and_join_datatable_url = self.base_url + '/datatables/api/upload_and_join'
self.upload_lat_lng_url = self.base_url + '/datatables/api/upload_lat_lon'
def login_for_cookie(self):
msgt('login_for_cookie: %s' % self.login_url)
# Retrieve the CSRF token first
self.client.get(self.login_url) # sets the cookie
csrftoken = self.client.cookies['csrftoken']
login_data = dict(username=self.geonode_username\
, password=self.geonode_password\
, csrfmiddlewaretoken=csrftoken\
)
#headers=dict(Referer=URL)
r = self.client.post(self.login_url\
, data=login_data\
, headers={"Referer": self.login_url}\
#, headers={"Referer": "test-client"}\
)
#print r.text
print r.status_code
def upload_csv_file(self, title, fname_to_upload):
assert isfile(fname_to_upload), "File to upload not found: %s" % fname_to_upload
msgt('upload_csv_file: %s' % self.csv_upload_url)
files = {'uploaded_file': open(fname_to_upload,'rb')}
response = self.client.post(self.csv_upload_url\
, data={'title' : title }\
, files=files)
fname = 'res.html'
open(fname, 'w').write(response.text)
msg('file written: %s' % fname)
print response.text
print response.status_code
resp_dict = json.loads(response.content)
datatable_name = resp_dict['data']['datatable_name']
print datatable_name
return
def add_shapefile_layer(self, shp_dirname, shp_fname_prefix):
msgt('add_shapefile_layer: %s' % self.shp_layer_upload_url)
files = {
'base_file': open(join(shp_dirname, '%s.shp' % shp_fname_prefix), 'rb'),
'dbf_file': open(join(shp_dirname, '%s.dbf' % shp_fname_prefix), 'rb'),
'prj_file': open(join(shp_dirname, '%s.prj' % shp_fname_prefix), 'rb'),
'shx_file': open(join(shp_dirname, '%s.shx' % shp_fname_prefix), 'rb'),
}
# 'base_file': open('scratch/tl_2013_06_tract.shp','rb'),
# 'dbf_file': open('scratch/tl_2013_06_tract.dbf','rb'),
# 'prj_file': open('scratch/tl_2013_06_tract.prj','rb'),
# 'shx_file': open('scratch/tl_2013_06_tract.shx','rb'),
# 'xml_file': open('scratch/tl_2013_06_tract.shp.xml','rb')
# Retrieve the CSRF token first
#self.client.get() # sets the cookie
csrftoken = self.client.cookies['csrftoken']
perms = '{"users":{"AnonymousUser":["view_resourcebase","download_resourcebase"]},"groups":{}}'
response = self.client.post(self.shp_layer_upload_url\
, files=files\
, data={'csrfmiddlewaretoken':csrftoken\
, 'permissions':perms\
}\
)
print response.content
new_layer_name = json.loads(response.content)['url'].split('/')[2].replace('%3A', ':')
print new_layer_name
def join_datatable_to_layer(self, join_props):
"""
Join a layer to a csv data table. Example:
join_props = {
'table_name': 'ca_tracts_pop_002',
'table_attribute': 'GEO.id2',
'layer_typename': 'geonode:tl_2013_06_tract',
'layer_attribute': 'GEOID'
}
"""
msgt('join_datatable_to_layer: %s' % self.join_datatable_url)
assert isinstance(join_props, dict), "join_props must be a dict {}"
for k in ('table_name', 'table_attribute', 'layer_typename', 'layer_attribute'):
assert join_props.has_key(k), "join_props is missing key: %s" % k
msg(join_props)
response = self.client.post(self.join_datatable_url, data=join_props)
print response.content
def upload_table_with_lat_lng(self, params, fname_to_upload):
"""
Map a table using its lat/lng columns:
params = {
'title' : 'boston income'
'abstract' : 'longer description...'
'lng_attribute': 'GEO.id2',
'lat_attribute': 'geonode:tl_2013_06_tract',
}
"""
assert isfile(fname_to_upload), "File to upload not found: %s" % fname_to_upload
assert isinstance(params, dict), "params must be a dict {}"
for k in ('title', 'abstract', 'lat_attribute', 'lng_attribute'):
assert params.has_key(k), "params is missing key: %s" % k
msg(params)
files = {'uploaded_file': open(fname_to_upload,'rb')}
msg('post url: %s' % self.upload_lat_lng_url)
response = self.client.post(self.upload_lat_lng_url\
, data=params\
, files=files\
)
print response.content
def upload_datatable_and_join_to_layer(self, params, fname_to_upload):
"""
Join a layer to a csv data table. Example:
params = {
'title' : 'boston income'
'table_attribute': 'GEO.id2',
'layer_typename': 'geonode:tl_2013_06_tract',
'layer_attribute': 'GEOID'
}
"""
assert isfile(fname_to_upload), "File to upload not found: %s" % fname_to_upload
assert isinstance(params, dict), "params must be a dict {}"
for k in ('title', 'table_attribute', 'layer_typename', 'layer_attribute'):
assert params.has_key(k), "params is missing key: %s" % k
msg(params)
files = {'uploaded_file': open(fname_to_upload,'rb')}
r = self.client.post(self.upload_and_join_datatable_url\
, data=params\
, files=files\
)
return r
if __name__=='__main__':
tr = TestRun()
tr.login_for_cookie()
# Upload CSV
title = 'California Pop Test'
fname_to_upload = join(INPUT_DIR, 'ca_tracts_pop_002.csv')
#tr.upload_csv_file(title, fname_to_upload)
# {"datatable_id": 28, "datatable_name": "ca_tracts_pop_002"}
# Join CSV to existing layer
tr.upload_three('---', '----')
# {'layer_typename': 'geonode:tl_2013_06_tract', 'table_name': 'ca_tracts_pop_002', 'table_attribute': 'GEO.id2', 'layer_attribute': 'GEOID'}
#{"join_layer": "geonode:view_join_tl_2013_06_tract_ca_tracts_pop_002", "source_layer": "geonode:tl_2013_06_tract", "view_name": "view_join_tl_2013_06_tract_ca_tracts_pop_002", "table_attribute": "GEO.id2", "layer_attribute": "GEOID", "layer_url": "/layers/geonode%3Aview_join_tl_2013_06_tract_ca_tracts_pop_002", "datatable": "ca_tracts_pop_002", "join_id": 8}
#tr.add_shapefile_layer('social_disorder_in_boston_yqh_zip_411')
#tr.upload_three('social_disorder_in_boston_yqh_zip_411', 'geonode:c_bra_bl')
"""
National zip codes:
- tl_2014_us_zcta510.zip
"""
|
|
# -*- coding: utf-8 -*-
"""
ulmo.cdec.historical.core
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides access to data provided by the `California Department
of Water Resources`_ `California Data Exchange Center`_ web site.
.. _California Department of Water Resources: http://www.water.ca.gov/
.. _California Data Exchange Center: http://cdec.water.ca.gov
SELECTED CDEC SENSOR NUMBERS (these are not be available for all sites):
1 river stage [ft]
2 precipitation, accumulated [in]
3 SWE [in]
4 air temperature [F]
5 EC [ms/cm]
6 reservoir elevation [ft]
7 reservoir scheduled release [cfs]
8 full natural flow [cfs]
15 reservoir storage [af]
20 flow -- river discharge [cfs]
22 reservoir storage change [af]
23 reservoir outflow [cfs]
24 Evapotranspiration [in]
25 water temperature [F]
27 water turbidity [ntu]
28 chlorophyll [ug/l]
41 flow -- mean daily [cfs]
45 precipitation, incremental [in]
46 runoff volume [af]
61 water dissolved oxygen [mg/l]
62 water pH value [pH]
64 pan evaporation (incremental) [in]
65 full natural flow [af]
66 flow -- monthly volume [af]
67 accretions (estimated) [af]
71 spillway discharge [cfs]
74 lake evaporation (computed) [cfs]
76 reservoir inflow [cfs]
85 control regulating discharge [cfs]
94 top conservation storage (reservoir) [af]
100 water EC [us/cm]
CDEC DURATION CODES:
E event
H hourly
D daily
M monthly
"""
import pandas as pd
from tsgettoolbox.ulmo import util
DEFAULT_START_DATE = "01/01/1901"
DEFAULT_END_DATE = "Now"
def get_stations():
"""Fetches information on all CDEC sites.
Returns
-------
df : pandas DataFrame
a pandas DataFrame (indexed on site id) with station information.
"""
# I haven't found a better list of stations, seems pretty janky
# to just have them in a file, and not sure if/when it is updated.
url = "http://cdec.water.ca.gov/misc/all_stations.csv"
# the csv is malformed, so some rows think there are 7 fields
col_names = ["id", "meta_url", "name", "num", "lat", "lon", "junk"]
df = pd.read_csv(url, names=col_names, header=None, quotechar="'", index_col=0)
return df
def get_sensors(sensor_id=None):
"""
Gets a list of sensor ids as a DataFrame indexed on sensor
number. Can be limited by a list of numbers.
Usage example::
from tsgettoolbox.ulmo import cdec
# to get all available sensor info
sensors = cdec.historical.get_sensors()
# or to get just one sensor
sensor = cdec.historical.get_sensors([1])
Parameters
----------
sites : iterable of integers or ``None``
Returns
-------
df : pandas DataFrame
a python dict with site codes mapped to site information
"""
url = "http://cdec.water.ca.gov/misc/senslist.html"
df = pd.read_html(url, header=0)[0]
df.set_index("Sensor No")
if sensor_id is None:
return df
return df.loc[sensor_id]
def get_station_sensors(station_ids=None, sensor_ids=None, resolutions=None):
"""
Gets available sensors for the given stations, sensor ids and time
resolutions. If no station ids are provided, all available stations will
be used (this is not recommended, and will probably take a really long
time).
The list can be limited by a list of sensor numbers, or time resolutions
if you already know what you want. If none of the provided sensors or
resolutions are available, an empty DataFrame will be returned for that
station.
Usage example::
from tsgettoolbox.ulmo import cdec
# to get all available sensors
available_sensors = cdec.historical.get_station_sensors(['NEW'])
Parameters
----------
station_ids : iterable of strings or ``None``
sensor_ids : iterable of integers or ``None``
check out or use the ``get_sensors()`` function to see a list of
available sensor numbers
resolutions : iterable of strings or ``None``
Possible values are 'event', 'hourly', 'daily', and 'monthly' but not
all of these time resolutions are available at every station.
Returns
-------
dict : a python dict
a python dict with site codes as keys with values containing pandas
DataFrames of available sensor numbers and metadata.
"""
# PRA&SensorNums=76&dur_code=H&Start=2019-02-02&End=2019-02-04
station_sensors = {}
if station_ids is None:
station_ids = get_stations().index
for station_id in station_ids:
url = "http://cdec.water.ca.gov/dynamicapp/staMeta?station_id=%s" % (station_id)
try:
sensor_list = pd.read_html(url, match="Sensor Description")[0]
except:
sensor_list = pd.read_html(url)[0]
try:
sensor_list.columns = ["sensor_id", "variable", "resolution", "timerange"]
except:
sensor_list.columns = [
"variable",
"sensor_id",
"resolution",
"varcode",
"method",
"timerange",
]
sensor_list[["variable", "units"]] = sensor_list.variable.str.split(
",", 1, expand=True
)
sensor_list.resolution = sensor_list.resolution.str.strip("()")
station_sensors[station_id] = _limit_sensor_list(
sensor_list, sensor_ids, resolutions
)
return station_sensors
def get_data(station_ids=None, sensor_ids=None, resolutions=None, start=None, end=None):
"""
Downloads data for a set of CDEC station and sensor ids. If either is not
provided, all available data will be downloaded. Be really careful with
choosing hourly resolution as the data sets are big, and CDEC's servers
are slow as molasses in winter.
Usage example::
from tsgettoolbox.ulmo import cdec
dat = cdec.historical.get_data(['PRA'],resolutions=['daily'])
Parameters
----------
station_ids : iterable of strings or ``None``
sensor_ids : iterable of integers or ``None``
check out or use the ``get_sensors()`` function to see a list of
available sensor numbers
resolutions : iterable of strings or ``None``
Possible values are 'event', 'hourly', 'daily', and 'monthly' but not
all of these time resolutions are available at every station.
Returns
-------
dict : a python dict
a python dict with site codes as keys. Values will be nested dicts
containing all of the sensor/resolution combinations.
"""
if start is None:
start_date = util.convert_date(DEFAULT_START_DATE)
else:
start_date = util.convert_date(start)
if end is None:
end_date = util.convert_date(DEFAULT_END_DATE)
else:
end_date = util.convert_date(end)
start_date_str = _format_date(start_date)
end_date_str = _format_date(end_date)
if station_ids is None:
station_ids = get_stations().index
sensors = get_station_sensors(station_ids, sensor_ids, resolutions)
d = {}
for station_id, sensor_list in list(sensors.items()):
station_data = {}
for index, row in sensor_list.iterrows():
res = row.loc["resolution"]
var = row.loc["variable"]
sensor_id = row.loc["sensor_id"]
station_data[var] = _download_raw(
station_id,
sensor_id,
_res_to_dur_code(res),
start_date_str,
end_date_str,
)
d[station_id] = station_data
return d
def _limit_sensor_list(sensor_list, sensor_ids, resolution):
if sensor_ids is not None:
sensor_list = sensor_list[[x in sensor_ids for x in sensor_list.sensor_id]]
if resolution is not None:
sensor_list = sensor_list[[x in resolution for x in sensor_list.resolution]]
return sensor_list
def _download_raw(station_id, sensor_num, dur_code, start_date, end_date):
url = (
"http://cdec.water.ca.gov/dynamicapp/req/CSVDataServlet"
+ "?Stations="
+ station_id
+ "&dur_code="
+ dur_code
+ "&SensorNums="
+ str(sensor_num)
+ "&Start="
+ start_date
+ "&End="
+ end_date
)
df = pd.read_csv(url, parse_dates=[4, 5], index_col="DATE TIME", na_values="---")
df.columns = [
"station_id",
"duration",
"sensor_number",
"sensor_type",
"obs_date",
"value",
"data_flag",
"units",
]
return df
def _res_to_dur_code(res):
map = {"hourly": "H", "daily": "D", "monthly": "M", "event": "E"}
return map[res]
def _format_date(date):
return "{}/{}/{}".format(date.month, date.day, date.year)
|
|
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import atexit
import os
import ssl
import time
try:
# requests is required for exception handling of the ConnectionError
import requests
from pyVim import connect
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six import integer_types, iteritems, string_types
class TaskError(Exception):
pass
def wait_for_task(task):
while True:
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
try:
raise TaskError(task.info.error)
except AttributeError:
raise TaskError("An unknown error has occurred")
if task.info.state == vim.TaskInfo.State.running:
time.sleep(15)
if task.info.state == vim.TaskInfo.State.queued:
time.sleep(15)
def find_obj(content, vimtype, name, first=True):
container = content.viewManager.CreateContainerView(container=content.rootFolder, recursive=True, type=vimtype)
obj_list = container.view
container.Destroy()
# Backward compatible with former get_obj() function
if name is None:
if obj_list:
return obj_list[0]
return None
# Select the first match
if first is True:
for obj in obj_list:
if obj.name == name:
return obj
# If no object found, return None
return None
# Return all matching objects if needed
return [obj for obj in obj_list if obj.name == name]
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_entity_child_by_path(content, entityRootFolder, path):
entity = entityRootFolder
searchIndex = content.searchIndex
paths = path.split("/")
try:
for path in paths:
entity = searchIndex.FindChild(entity, path)
if entity.name == paths[-1]:
return entity
except:
pass
return None
# Maintain for legacy, or remove with 2.1 ?
# Should be replaced with find_cluster_by_name
def find_cluster_by_name_datacenter(datacenter, cluster_name):
host_folder = datacenter.hostFolder
for folder in host_folder.childEntity:
if folder.name == cluster_name:
return folder
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
clusters = get_all_objs(content, [vim.ClusterComputeResource], folder)
for cluster in clusters:
if cluster.name == cluster_name:
return cluster
return None
def find_datacenter_by_name(content, datacenter_name):
datacenters = get_all_objs(content, [vim.Datacenter])
for dc in datacenters:
if dc.name == datacenter_name:
return dc
return None
def find_datastore_by_name(content, datastore_name):
datastores = get_all_objs(content, [vim.Datastore])
for ds in datastores:
if ds.name == datastore_name:
return ds
return None
def find_dvs_by_name(content, switch_name):
vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch])
for dvs in vmware_distributed_switches:
if dvs.name == switch_name:
return dvs
return None
def find_hostsystem_by_name(content, hostname):
host_system = get_all_objs(content, [vim.HostSystem])
for host in host_system:
if host.name == hostname:
return host
return None
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
vms = get_all_objs(content, [vim.VirtualMachine], folder, recurse=recurse)
for vm in vms:
if vm.name == vm_name:
return vm
return None
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
if thisobj.name == 'Datacenters':
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
}
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
for k, v in iteritems(net_dict):
for ipaddress in v:
if ipaddress:
if '::' in ipaddress:
facts['ipv6'] = ipaddress
else:
facts['ipv4'] = ipaddress
ethernet_idx = 0
for idx, entry in enumerate(vm.config.hardware.device):
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('vm', 'snapshot'))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str', required=True),
username=dict(type='str', aliases=['user', 'admin'], required=True),
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
validate_certs = module.params['validate_certs']
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or or use validate_certs=false')
ssl_context = None
if not validate_certs:
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=ssl_context)
except vim.fault.InvalidLogin as e:
module.fail_json(msg="Unable to log on to vCenter or ESXi API at %s as %s: %s" % (hostname, username, e.msg))
except (requests.ConnectionError, ssl.SSLError) as e:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/443: %s" % (hostname, e))
except Exception as e:
module.fail_json(msg="Unknown error connecting to vCenter or ESXi API at %s: %s" % (hostname, e))
if service_instance is None:
module.fail_json(msg="Unknown error connecting to vCenter or ESXi API at %s" % hostname)
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def fetch_file_from_guest(module, content, vm, username, password, src, dest):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
fti = content.guestOperationsManager.fileManager. \
InitiateFileTransferFromGuest(vm, creds, src)
result['size'] = fti.size
result['url'] = fti.url
# Use module_utils to fetch the remote url returned from the api
rsp, info = fetch_url(module, fti.url, use_proxy=False,
force=True, last_mod_time=None,
timeout=10, headers=None)
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
# exit early if xfer failed
if info['status'] != 200:
result['failed'] = True
return result
# attempt to read the content and write it
try:
with open(dest, 'wb') as f:
f.write(rsp.read())
except Exception as e:
result['failed'] = True
result['msg'] = str(e)
return result
def push_file_to_guest(module, content, vm, username, password, src, dest, overwrite=True):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# the api requires a filesize in bytes
fdata = None
try:
# filesize = os.path.getsize(src)
filesize = os.stat(src).st_size
with open(src, 'rb') as f:
fdata = f.read()
result['local_filesize'] = filesize
except Exception as e:
result['failed'] = True
result['msg'] = "Unable to read src file: %s" % str(e)
return result
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
file_attribute = vim.vm.guest.FileManager.FileAttributes()
url = content.guestOperationsManager.fileManager. \
InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
filesize, overwrite)
# PUT the filedata to the url ...
rsp, info = fetch_url(module, url, method="put", data=fdata,
use_proxy=False, force=True, last_mod_time=None,
timeout=10, headers=None)
result['msg'] = str(rsp.read())
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
return result
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
data[x] = xo
elif issubclass(xt, dict):
data[x] = {}
for k, v in xo.items():
data[x][k] = serialize_spec(v)
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = str(xo) + ':' + xo.name
elif isinstance(xo, vim.vm.ProfileSpec):
pass
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
|
|
from __future__ import absolute_import
from __future__ import print_function
from functools import wraps
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.conf import settings
from django.db.models import Q
from django.core.cache.backends.base import BaseCache
from typing import Any, Callable, Dict, Iterable, List, Optional, Union, TypeVar, Text
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import subprocess
import time
import base64
import random
import sys
import os
import os.path
import hashlib
import six
if False:
from zerver.models import UserProfile, Realm, Message
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
class NotFoundInCache(Exception):
pass
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time():
# type: () -> float
return remote_cache_total_time
def get_remote_cache_requests():
# type: () -> int
return remote_cache_total_requests
def remote_cache_stats_start():
# type: () -> None
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish():
# type: () -> None
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix():
# type: () -> Text
if settings.CASPER_TESTS:
# This sets the prefix for the benefit of the Casper tests.
#
# Having a fixed key is OK since we don't support running
# multiple copies of the casper tests at the same time anyway.
return u'casper_tests:'
elif settings.TEST_SUITE:
# The Python tests overwrite KEY_PREFIX on each test, but use
# this codepath as well, just to save running the more complex
# code below for reading the normal key prefix.
return u'django_tests_unused:'
# directory `var` should exist in production
subprocess.check_call(["mkdir", "-p", os.path.join(settings.DEPLOY_ROOT, "var")])
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(Text(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: Text
def bounce_key_prefix_for_testing(test_name):
# type: (Text) -> None
global KEY_PREFIX
KEY_PREFIX = test_name + u':' + Text(os.getpid()) + u':'
def get_cache_backend(cache_name):
# type: (Optional[str]) -> BaseCache
if cache_name is None:
return djcache
return caches[cache_name]
def get_cache_with_key(keyfunc, cache_name=None):
# type: (Any, Optional[str]) -> Any
"""
The main goal of this function getting value from the cache like in the "cache_with_key".
A cache value can contain any data including the "None", so
here used exception for case if value isn't found in the cache.
"""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
if val is not None:
return val[0]
raise NotFoundInCache()
return func_with_caching
return decorator
def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None):
# type: (Any, Optional[str], Optional[int], Optional[str]) -> Any
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key, val, cache_name=None, timeout=None):
# type: (Text, Any, Optional[str], Optional[int]) -> None
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key, cache_name=None):
# type: (Text, Optional[str]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys, cache_name=None):
# type: (List[Text], Optional[str]) -> Dict[Text, Any]
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items, cache_name=None, timeout=None):
# type: (Dict[Text, Any], Optional[str], Optional[int]) -> None
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def cache_delete(key, cache_name=None):
# type: (Text, Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items, cache_name=None):
# type: (Iterable[Text], Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
ObjKT = TypeVar('ObjKT', int, Text)
ItemT = Any # https://github.com/python/mypy/issues/1721
CompressedItemT = Any # https://github.com/python/mypy/issues/1721
def generic_bulk_cached_fetch(cache_key_function, # type: Callable[[ObjKT], Text]
query_function, # type: Callable[[List[ObjKT]], Iterable[Any]]
object_ids, # type: Iterable[ObjKT]
extractor=lambda obj: obj, # type: Callable[[CompressedItemT], ItemT]
setter=lambda obj: obj, # type: Callable[[ItemT], CompressedItemT]
id_fetcher=lambda obj: obj.id, # type: Callable[[Any], ObjKT]
cache_transformer=lambda obj: obj # type: Callable[[Any], ItemT]
):
# type: (...) -> Dict[ObjKT, Any]
cache_keys = {} # type: Dict[ObjKT, Text]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects = cache_get_many([cache_keys[object_id]
for object_id in object_ids])
for (key, val) in cached_objects.items():
cached_objects[key] = extractor(cached_objects[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[Text, Any]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func):
# type: (FuncT) -> FuncT
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__) # type: ignore # https://github.com/python/mypy/issues/1923
@wraps(func)
def keyfunc(*args, **kwargs):
# type: (*Any, **Any) -> str
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def display_recipient_cache_key(recipient_id):
# type: (int) -> Text
return u"display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email):
# type: (Text) -> Text
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return u'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_by_id_cache_key(user_profile_id):
# type: (int) -> Text
return u"user_profile_by_id:%s" % (user_profile_id,)
# TODO: Refactor these cache helpers into another file that can import
# models.py so that python v3 style type annotations can also work.
active_user_dict_fields = [
'id', 'full_name', 'short_name', 'email',
'avatar_source', 'avatar_version',
'is_realm_admin', 'is_bot', 'timezone'] # type: List[str]
def active_user_dicts_in_realm_cache_key(realm):
# type: (Realm) -> Text
return u"active_user_dicts_in_realm:%s" % (realm.id,)
bot_dict_fields = ['id', 'full_name', 'short_name', 'email',
'is_active', 'default_sending_stream__name',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source',
'avatar_version'] # type: List[str]
def bot_dicts_in_realm_cache_key(realm):
# type: (Realm) -> Text
return u"bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name, realm):
# type: (Text, Union[Realm, int]) -> Text
from zerver.models import Realm
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return u"stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def delete_user_profile_caches(user_profiles):
# type: (Iterable[UserProfile]) -> None
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
cache_delete_many(keys)
def delete_display_recipient_cache(user_profile):
# type: (UserProfile) -> None
from zerver.models import Subscription # We need to import here to avoid cyclic dependency.
recipient_ids = Subscription.objects.filter(user_profile=user_profile)
recipient_ids = recipient_ids.values_list('recipient_id', flat=True)
keys = [display_recipient_cache_key(rid) for rid in recipient_ids]
cache_delete_many(keys)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender, **kwargs):
# type: (Any, **Any) -> None
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if kwargs.get('update_fields') is None or \
len(set(active_user_dict_fields + ['is_active', 'email']) &
set(kwargs['update_fields'])) > 0:
cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm))
if kwargs.get('updated_fields') is None or \
'email' in kwargs['update_fields']:
delete_display_recipient_cache(user_profile)
# Invalidate our bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and (kwargs['update_fields'] is None or
(set(bot_dict_fields) & set(kwargs['update_fields']))):
cache_delete(bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if kwargs.get('update_fields') is None or "alert_words" in kwargs['update_fields']:
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
if realm.deactivated:
cache_delete(active_user_dicts_in_realm_cache_key(realm))
cache_delete(bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
def realm_alert_words_cache_key(realm):
# type: (Realm) -> Text
return u"realm_alert_words:%s" % (realm.string_id,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender, **kwargs):
# type: (Any, **Any) -> None
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)).exists():
cache_delete(bot_dicts_in_realm_cache_key(stream.realm))
# TODO: Rename to_dict_cache_key_id and to_dict_cache_key
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> Text
return u'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> Text
return to_dict_cache_key_id(message.id, apply_markdown)
def flush_message(sender, **kwargs):
# type: (Any, **Any) -> None
message = kwargs['instance']
cache_delete(to_dict_cache_key(message, False))
cache_delete(to_dict_cache_key(message, True))
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8 tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
#============================================================================#
# #
# FILE: make_sheet (python 3) #
# AUTHOR: Michael D Dacre, [email protected] #
# ORGANIZATION: Stanford University #
# LICENSE: MIT License, property of Stanford, use as you wish #
# CREATED: 2015-12-13 10:07 #
# Last modified: 2015-12-14 16:59 #
# #
# DESCRIPTION: Format the output of the alleleseq pipeline into gene and #
# snp level tab-delimited outputs sorted by tissue #
# #
# USAGE: Provide a list of count files, in the same directory, there #
# should exist the files <count_file>.FDR.txt and #
# <count_file>.interestingHets.txt #
# #
#============================================================================#
"""
import os
import sys
import operator
from re import sub
from bed_lookup import BedFile # github.com:MikeDacre/python_bed_lookup.git
# Defaults should be altered for your usage
tissue_lookup_file = 'tissue_lookup.txt' # Format: dir\tcross\ttissue
master_lookup_file = 'master.lookup.v2' # Format: gene_name\tmodel_name
bed_snp_file = 'refseq.ucsc.ensembl.mRNA.mm9.nr.bed'
def create_tissue_lookup(tissue_file=tissue_lookup_file):
""" Return a dictionary of sample data from the tissue
lookup file. This should be altered for the sample
data you want to extract. The index here is the name
of the alleleseq output file up to the first period.
In my case the name 123.cnt.counts.txt turns into 123 """
tissue_lookup = {}
with open(tissue_file) as infile:
for line in infile:
fields = line.rstrip().split('\t')
assert(len(fields) == 4)
tissue_lookup[fields[0]] = {'cross': fields[1],
'tissue': fields[2],
'failed': fields[3]}
return tissue_lookup
def create_master_lookup(master_file=master_lookup_file):
""" Return a dictionary from a two column file. Used for
replacing gene names extracted from the bed with other
names. Used later for the 'model' parameter, which is
what the gene-level data is indexed by """
master_lookup = {}
with open(master_file) as infile:
for line in infile:
fields = line.rstrip().split('\t')
assert(len(fields) == 2)
master_lookup[fields[0]] = fields[1]
return master_lookup
def get_snp_data(count_files, tissue_file=tissue_lookup_file,
bed_file=bed_snp_file, master_file=master_lookup_file):
""" Extract SNP-level data from the count files and add
sample level information from the tissue_file (via
create_tissue_lookup and gene names from bed_lookup
and master_lookup (using a bed file and master gene
name dictionary) """
tissues = {} # Dictionary to hold all data
tissue_lookup = create_tissue_lookup(tissue_file)
master_lookup = create_master_lookup(master_file)
bed_lookup = BedFile(bed_file)
for i in count_files:
# Create entry for this count file
t = i.split('.')[0]
tissues[t] = {}
# Lookup tissue-level data
if t in tissue_lookup:
tissues[t]['tissue'] = tissue_lookup[t]['tissue']
tissues[t]['cross'] = tissue_lookup[t]['cross']
tissues[t]['failed'] = tissue_lookup[t]['failed']
else:
tissues[t]['tissue'] = 'unknown'
tissues[t]['cross'] = 'unknown'
tissues[t]['failed'] = 'unknown'
# Get list of hets that beat an FDR of 0.1
hets = []
with open(i + '.interestingHets.txt') as infile:
for line in infile:
if line.startswith('chrm'):
continue
f = line.rstrip().split('\t')
chr = f[0] if f[0].startswith('c') else 'chr' + f[0]
snp = f[1]
hets.append(chr + '_' + snp)
# Extract SNPs from count file
tissues[t]['snps'] = {}
with open(i) as infile:
for line in infile:
if line.startswith('chrm'):
continue
f = line.rstrip().split('\t')
chr = f[0] if f[0].startswith('c') else 'chr' + str(f[0])
snp = f[1]
gene = bed_lookup.lookup(chr, int(snp))
gene = gene if gene else ''
model = master_lookup[gene] if gene else ''
sig = 'Y' if chr + '_' + snp in hets else 'N'
id = chr + '_' + snp
tissues[t]['snps'][id] = {
'chr': chr,
'snp': snp,
'gene': gene,
'model': model,
'mat_gtyp': f[7],
'pat_gtyp': f[8],
'counts': {'A': int(f[9]),
'C': int(f[10]),
'G': int(f[11]),
'T': int(f[12]),
'unknown': 'unknown'},
'win': f[13],
'p': f[15],
'beats_FDR': sig}
return tissues
def snps_to_genes(snp_dict):
""" Take a dictionary of snps by tissue from get_snp_data()
and return a dictionary inventoried by tissue->gene
instead of tissue->snp.
'mat' and 'pat' data are converted to data named by
parent and counts are indexed by parent rather than
by base.
In addition, a 'snp_count' entry is added to track the
number of snps contributing to a count. """
gene_dict = {}
for t in snp_dict.keys():
assert(t not in gene_dict)
gene_dict[t] = {}
gene_dict[t]['genes'] = {}
gene_dict[t]['tissue'] = snp_dict[t]['tissue']
gene_dict[t]['cross'] = snp_dict[t]['cross']
gene_dict[t]['failed'] = snp_dict[t]['failed']
for location, data in snp_dict[t]['snps'].items():
# Assign genotype to single parent
if snp_dict[t]['cross'] == 'CxB' or snp_dict[t]['cross'] == 'cxb':
ca = data['pat_gtyp']
b6 = data['mat_gtyp']
ca_counts = data['counts'][ca]
b6_counts = data['counts'][b6]
elif snp_dict[t]['cross'] == 'BxC' or snp_dict[t]['cross'] == 'bxc':
ca = data['mat_gtyp']
b6 = data['pat_gtyp']
ca_counts = data['counts'][ca]
b6_counts = data['counts'][b6]
elif snp_dict[t]['cross'] == 'unknown':
ca = 'unknown'
b6 = 'unknown'
ca_counts = 'unknown'
b6_counts = 'unknown'
else:
raise Exception
m = data['model'] # Index by gene model name
if m in gene_dict[t]['genes']:
# This is a new SNP in an existing transcript
gene_dict[t]['genes'][m]['snp_count'] += 1
gene_dict[t]['genes'][m]['ca_counts'] = \
_combine_counts(gene_dict[t]['genes'][m]['ca_counts'], ca_counts)
gene_dict[t]['genes'][m]['b6_counts'] = \
_combine_counts(gene_dict[t]['genes'][m]['b6_counts'], b6_counts)
gene_dict[t]['genes'][m]['p_vals'].append(data['p'])
gene_dict[t]['genes'][m]['beats_FDR'].append(data['beats_FDR'])
else:
gene_dict[t]['genes'][m] = {}
gene_dict[t]['genes'][m]['snp_count'] = 1
gene_dict[t]['genes'][m]['ca_counts'] = ca_counts
gene_dict[t]['genes'][m]['b6_counts'] = b6_counts
gene_dict[t]['genes'][m]['p_vals'] = [data['p']]
gene_dict[t]['genes'][m]['beats_FDR'] = [data['beats_FDR']]
gene_dict[t]['genes'][m]['gene'] = data['gene']
return gene_dict
def print_snp_data(snp_dict, outfile=''):
""" Print tab delimited data from output of make_data() """
o = open(outfile, 'w') if outfile else sys.stdout
o.write('DIR ID\tChr\tlocation\tGene\tTX\ttissue\tcross\tB6 Counts\tCAST Counts\t' +
'p-value\tbeats FDR 0.1\tB6 gtyp\tCAST gtyp\tMat gTyp\tPat gTyp\t' +
'A counts\tC counts\tG Counts\tT Counts\tWining Parent\tFailed\n')
for t in snp_dict.keys():
temp_dict = {} # A dictionary to allow sorting for printing
for k, d in snp_dict[t]['snps'].items():
if snp_dict[t]['cross'] == 'CxB' or snp_dict[t]['cross'] == 'cxb':
ca = d['pat_gtyp']
b6 = d['mat_gtyp']
elif snp_dict[t]['cross'] == 'BxC' or snp_dict[t]['cross'] == 'bxc':
ca = d['mat_gtyp']
b6 = d['pat_gtyp']
elif snp_dict[t]['cross'] == 'unknown':
ca = 'unknown'
b6 = 'unknown'
else:
raise Exception
print_string = [t, d['chr'], d['snp'], d['model'], d['gene'],
snp_dict[t]['tissue'], snp_dict[t]['cross'],
d['counts'][b6], d['counts'][ca], d['p'],
d['beats_FDR'], b6, ca, d['mat_gtyp'], d['pat_gtyp'],
d['counts']['A'], d['counts']['C'], d['counts']['G'],
d['counts']['T'], d['win'], snp_dict[t]['failed']]
print_string = tuple([str(i) for i in print_string])
if d['chr'] not in temp_dict:
temp_dict[d['chr']] = {}
temp_dict[d['chr']][int(d['snp'])] = print_string
temp_dict = _make_sorted(temp_dict)
for k, v in temp_dict:
for i, print_string in v:
o.write('\t'.join(print_string) + '\n')
o.close()
def print_gene_data(gene_dict, outfile=''):
""" Print tab delimited data from output of make_data() """
o = open(outfile, 'w') if outfile else sys.stdout
o.write('Gene\tTX\tTissue\tDIR ID\tCross\tB6 Counts\tCAST Counts\t' +
'p-value\tbeats FDR 0.1\tSNP Count\tFailed\n')
for t in gene_dict.keys():
for k, d in gene_dict[t]['genes'].items():
p_vals = ','.join(d['p_vals'])
fdrs = ','.join(d['beats_FDR'])
print_string = [k, d['gene'], gene_dict[t]['tissue'], t,
gene_dict[t]['cross'], d['b6_counts'], d['ca_counts'],
p_vals, fdrs, d['snp_count'], gene_dict[t]['failed']]
print_string = tuple([str(i) for i in print_string])
o.write('\t'.join(print_string) + '\n')
o.close()
def main(files, tissue_file=tissue_lookup_file, bed_file=bed_snp_file,
master_file=master_lookup_file, outfile='', snp_outfile=''):
""" Run everything """
snp_dict = get_snp_data(files, tissue_file, bed_file, master_file)
gene_dict = snps_to_genes(snp_dict)
if snp_outfile:
print_snp_data(snp_dict, snp_outfile)
print_gene_data(gene_dict, outfile)
#####################
# Don't alter these #
#####################
def _combine_counts(count1, count2):
""" Sum two counts, but check that if one is 'unknown',
both are 'unknown'. In those cases, return a single
value of 'unknown'. """
if count1 == 'unknown' or count2 == 'unknown':
assert(count1 == 'unknown' and count2 == 'unknown')
return 'unknown'
assert(type(count1) == int and type(count2) == int)
return count1 + count2
def _make_sorted(dict):
""" Sort a dictionary for printing """
print_dict = {}
for k, v in dict.items():
index = sub(r'chr', '', k)
if index == 'X':
index = '99'
elif index == 'Y':
index = '100'
elif index == 'M' or index == 'MT':
index = '101'
print_dict[int(index)] = sorted(v.items(), key=operator.itemgetter(1))
return sorted(print_dict.items(), key=operator.itemgetter(0))
###########################
# For running as a script #
###########################
if __name__ == '__main__' and '__file__' in globals():
""" Command Line Argument Parsing """
import argparse
f_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=f_class)
# Input files
parser.add_argument('count_files', nargs='+',
help="Files created by AlleleSeq_pipeline_v1.2a/CombineSnpCounts.py")
# Optional Files
parser.add_argument('-t', '--tissue_file', default=tissue_lookup_file,
help="Tissue lookup file")
parser.add_argument('-b', '--bed_snp_file', default=bed_snp_file,
help="Bed format gene lookup file")
parser.add_argument('-m', '--master_lookup', default=master_lookup_file,
help="Model from gene lookup file")
parser.add_argument('-o', '--gene_outfile', default='',
help="Output file, Default STDOUT")
parser.add_argument('-s', '--snp_outfile', default='',
help="Also print SNP level data to this file. " +
"By default, SNP level data are not output.")
args = parser.parse_args()
# Run the script
main(args.count_files, args.tissue_file, args.bed_snp_file, args.master_lookup,
args.gene_outfile, args.snp_outfile)
##
# The End #
|
|
from __future__ import (absolute_import, division, print_function)
from logging import getLogger
from PySide import QtCore, QtGui
from ret.elf import RetkitElfDocument
from .ConsoleWindow import ConsoleWindow
from .Ui_RetWindow import Ui_RetWindow
log = getLogger("ret.ui.qt")
class FunctionTableModel(QtCore.QAbstractTableModel):
headerText = ['Start address', 'End address', 'Name', 'Return type',
'Arguments', 'Convention']
def __init__(self, document, parent=None):
super(FunctionTableModel, self).__init__(parent)
self.document = document
return
def columnCount(self, parent):
return 6
def rowCount(self, parent):
return len(self.document.functions)
def headerData(self, pos, orientation, role):
if (role == QtCore.Qt.ItemDataRole.DisplayRole and
orientation == QtCore.Qt.Orientation.Horizontal and
pos < len(self.headerText)):
return self.headerText[pos]
else:
return None
def data(self, index, role):
row = index.row()
col = index.column()
if (role != QtCore.Qt.ItemDataRole.DisplayRole or
row >= len(self.document.functions) or
col >= len(self.headerText)):
return None
fn = self.document.functions[row]
return ["0x%08x" % fn.start_address, "0x%08x" % fn.end_address,
fn.name, str(fn.return_type),
str(fn.arguments) if fn.arguments else "void",
fn.calling_convention][col]
def parent(self, index):
return self.createIndex(-1, -1)
sort_functions = [
lambda fn: fn.start_address,
lambda fn: fn.end_address,
lambda fn: fn.name,
lambda fn: str(fn.return_type),
lambda fn: str(fn.arguments) if fn.arguments else "void",
lambda fn: fn.calling_convention,
]
def sort(self, column, order):
log.debug("sort: column=%r order=%r", column, order)
reverse = (order == QtCore.Qt.SortOrder.DescendingOrder)
self.document.functions.sort(
key=self.sort_functions[column],
reverse=reverse)
self.dataChanged.emit(
self.createIndex(0, 0),
self.createIndex(len(self.document.functions) - 1, 5))
return
class FunctionDisassemblyModel(QtCore.QAbstractTableModel):
headerText = ['Address', 'Instruction']
def __init__(self, function):
"""\
FunctionDisassemblyModel(function) -> model
Create a new FunctionDisassemblyModel object for the given function
(normally a ret.state.Function object or subclass thereof). This allows the
function disassembly to be viewed in a QTableView object.
"""
super(FunctionDisassemblyModel, self).__init__()
self.function = function
self.instructions = list(function.instructions.values())
self.instructions.sort(key=lambda instr: instr.addr)
return
def columnCount(self, parent):
return 1
def rowCount(self, parent):
return len(self.instructions)
def headerData(self, pos, orientation, role):
if role == QtCore.Qt.ItemDataRole.DisplayRole:
try:
if orientation == QtCore.Qt.Orientation.Horizontal:
return self.headerText[pos]
else:
return hex(self.instructions[pos].addr)
except IndexError:
pass
return None
def data(self, index, role):
row = index.row()
col = index.column()
if (role != QtCore.Qt.ItemDataRole.DisplayRole or
row >= len(self.instructions) or
col >= 2):
return None
return str(self.instructions[row])
def parent(self, index):
return self.createIndex(-1, -1)
class RetWindow(QtGui.QMainWindow, Ui_RetWindow):
def __init__(self, application, parent=None):
super(RetWindow, self).__init__(parent)
self.setupUi(self)
self.application = application
self.document = None
self.functionsTableView.horizontalHeader().setClickable(True)
QtCore.QObject.connect(
self.functionsTableView.horizontalHeader(),
QtCore.SIGNAL("sortIndicatorChanged(int, Qt::SortOrder)"),
self.functionsTableView.sortByColumn)
self.functionsTableView.sortByColumn(
0, QtCore.Qt.SortOrder.AscendingOrder)
self.functionDisassemblyViews = {}
self.consoleWindow = ConsoleWindow(self)
return
def open(self):
filename, filter = QtGui.QFileDialog.getOpenFileName(
self, "Open object file", "",
"Retkit documents (*.retkit);;Shared libraries (*.so);;"
"All files (*)")
if filename is None or len(filename) == 0:
return
if self.document is None:
target = self
else:
target = RetWindow(self.application, self.parent)
target.load(filename)
return
def load(self, filename):
## FIXME: Don't hardcode the document here.
self.document = RetkitElfDocument(
filename=None, object_filename=filename)
model = FunctionTableModel(self.document)
self.functionsTableView.setModel(model)
return
def save(self):
pass
def saveAs(self):
pass
def close(self):
super(RetWindow, self).close()
return
def undo(self):
return
def redo(self):
return
def cut(self):
return
def copy(self):
return
def paste(self):
return
def delete(self):
return
def selectAll(self):
return
def about(self):
return
def functionDoubleClicked(self, index):
model = self.functionsTableView.model()
if model is None:
log.error("function double clicked but no model is present")
return None
fn = model.document.functions[index.row()]
view = self.functionDisassemblyViews.get(id(fn))
if view is not None:
view.raise_()
else:
view = QtGui.QTableView(self.contents)
self.functionDisassemblyViews[id(fn)] = view
view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
view.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerItem)
view.setSortingEnabled(False)
view.setCornerButtonEnabled(False)
view.setObjectName("functionDisassembly%x" % id(fn))
view.horizontalHeader().setVisible(True)
view.verticalHeader().setVisible(True)
view.setModel(FunctionDisassemblyModel(fn))
view.show()
return
def showConsole(self):
if self.isMaximized() and not self.consoleWindow.isVisible():
# Show the console window below this window.
desktop = QtGui.QApplication.desktop()
screenSize = desktop.availableGeometry(self)
# Compute the size of the window decorations
frameGeometry = self.frameGeometry()
clientGeometry = self.geometry()
decorWidth = frameGeometry.width() - clientGeometry.width()
decorHeight = frameGeometry.height() - clientGeometry.height()
# This is the top of the console window's frame.
consoleTop = (screenSize.bottom() - self.consoleWindow.height())
# De-maximize ourself and set the geometry accordingly.
self.setWindowState(
self.windowState() & ~QtCore.Qt.WindowMaximized)
self.setGeometry(
screenSize.left(), screenSize.top(),
screenSize.width() - decorWidth,
consoleTop - screenSize.top() - 2 * decorHeight)
# Position the console window and show it.
self.consoleWindow.setGeometry(
screenSize.left(), consoleTop,
screenSize.width(),
self.consoleWindow.height())
self.consoleWindow.show()
# Local variables:
# mode: Python
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vi: set expandtab tabstop=8
|
|
""" Model creation / weight loading / state_dict helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import os
import math
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Optional, Tuple
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from .features import FeatureListNet, FeatureDictNet, FeatureHookNet
from .fx_features import FeatureGraphNet
from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf
from .layers import Conv2dSame, Linear
_logger = logging.getLogger(__name__)
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = ''
if isinstance(checkpoint, dict):
if use_ema and checkpoint.get('state_dict_ema', None) is not None:
state_dict_key = 'state_dict_ema'
elif use_ema and checkpoint.get('model_ema', None) is not None:
state_dict_key = 'model_ema'
elif 'state_dict' in checkpoint:
state_dict_key = 'state_dict'
elif 'model' in checkpoint:
state_dict_key = 'model'
if state_dict_key:
state_dict = checkpoint[state_dict_key]
new_state_dict = OrderedDict()
for k, v in state_dict.items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
# numpy checkpoint, try to load via model specific load_pretrained fn
if hasattr(model, 'load_pretrained'):
model.load_pretrained(checkpoint_path)
else:
raise NotImplementedError('Model cannot load numpy checkpoint')
return
state_dict = load_state_dict(checkpoint_path, use_ema)
model.load_state_dict(state_dict, strict=strict)
def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
if log_info:
_logger.info('Restoring model state from checkpoint...')
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if optimizer is not None and 'optimizer' in checkpoint:
if log_info:
_logger.info('Restoring optimizer state from checkpoint...')
optimizer.load_state_dict(checkpoint['optimizer'])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info('Restoring AMP loss scaler state from checkpoint...')
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch']
if 'version' in checkpoint and checkpoint['version'] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False):
r"""Loads a custom (read non .pth) weight file
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
a passed in custom load fun, or the `load_pretrained` model member fn.
If the object is already present in `model_dir`, it's deserialized and returned.
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
model: The instantiated model to load weights into
default_cfg (dict): Default pretrained model cfg
load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named
'laod_pretrained' on the model will be called if it exists
progress (bool, optional): whether or not to display a progress bar to stderr. Default: False
check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file. Default: False
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
if not pretrained_url:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress)
if load_fn is not None:
load_fn(model, cached_file)
elif hasattr(model, 'load_pretrained'):
model.load_pretrained(cached_file)
else:
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(model, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False):
""" Load pretrained checkpoint
Args:
model (nn.Module) : PyTorch model module
default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset
num_classes (int): num_classes for model
in_chans (int): in_chans for model
filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args)
strict (bool): strict load of checkpoint
progress (bool): enable progress bar for weight download
"""
default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {}
pretrained_url = default_cfg.get('url', None)
hf_hub_id = default_cfg.get('hf_hub', None)
if not pretrained_url and not hf_hub_id:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
if pretrained_url:
_logger.info(f'Loading pretrained weights from url ({pretrained_url})')
state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu')
elif hf_hub_id and has_hf_hub(necessary=True):
_logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})')
state_dict = load_state_dict_from_hf(hf_hub_id)
if filter_fn is not None:
# for backwards compat with filter fn that take one arg, try one first, the two
try:
state_dict = filter_fn(state_dict)
except TypeError:
state_dict = filter_fn(state_dict, model)
input_convs = default_cfg.get('first_conv', None)
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
_logger.info(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
_logger.warning(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifiers = default_cfg.get('classifier', None)
label_offset = default_cfg.get('label_offset', 0)
if classifiers is not None:
if isinstance(classifiers, str):
classifiers = (classifiers,)
if num_classes != default_cfg['num_classes']:
for classifier_name in classifiers:
# completely discard fully connected if model num_classes doesn't match pretrained weights
state_dict.pop(classifier_name + '.weight', None)
state_dict.pop(classifier_name + '.bias', None)
strict = False
elif label_offset > 0:
for classifier_name in classifiers:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
model.load_state_dict(state_dict, strict=strict)
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
for l in layer[:lst_index]:
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
l = layer[lst_index]
setattr(module, l, val)
def adapt_model_from_string(parent_module, model_string):
separator = '***'
state_dict = {}
lst_shape = model_string.split(separator)
for k in lst_shape:
k = k.split(':')
key = k[0]
shape = k[1][1:-1].split(',')
if shape[0] != '':
state_dict[key] = [int(i) for i in shape]
new_module = deepcopy(parent_module)
for n, m in parent_module.named_modules():
old_module = extract_layer(parent_module, n)
if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
if isinstance(old_module, Conv2dSame):
conv = Conv2dSame
else:
conv = nn.Conv2d
s = state_dict[n + '.weight']
in_channels = s[1]
out_channels = s[0]
g = 1
if old_module.groups > 1:
in_channels = out_channels
g = in_channels
new_conv = conv(
in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
groups=g, stride=old_module.stride)
set_layer(new_module, n, new_conv)
if isinstance(old_module, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(
num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
affine=old_module.affine, track_running_stats=True)
set_layer(new_module, n, new_bn)
if isinstance(old_module, nn.Linear):
# FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
num_features = state_dict[n + '.weight'][1]
new_fc = Linear(
in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
set_layer(new_module, n, new_fc)
if hasattr(new_module, 'num_features'):
new_module.num_features = num_features
new_module.eval()
parent_module.eval()
return new_module
def adapt_model_from_file(parent_module, model_variant):
adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt')
with open(adapt_file, 'r') as f:
return adapt_model_from_string(parent_module, f.read().strip())
def default_cfg_for_features(default_cfg):
default_cfg = deepcopy(default_cfg)
# remove default pretrained cfg fields that don't have much relevance for feature backbone
to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size?
for tr in to_remove:
default_cfg.pop(tr, None)
return default_cfg
def overlay_external_default_cfg(default_cfg, kwargs):
""" Overlay 'external_default_cfg' in kwargs on top of default_cfg arg.
"""
external_default_cfg = kwargs.pop('external_default_cfg', None)
if external_default_cfg:
default_cfg.pop('url', None) # url should come from external cfg
default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg
default_cfg.update(external_default_cfg)
def set_default_kwargs(kwargs, names, default_cfg):
for n in names:
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
# default_cfg has one input_size=(C, H ,W) entry
if n == 'img_size':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[-2:])
elif n == 'in_chans':
input_size = default_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[0])
else:
default_val = default_cfg.get(n, None)
if default_val is not None:
kwargs.setdefault(n, default_cfg[n])
def filter_kwargs(kwargs, names):
if not kwargs or not names:
return
for n in names:
kwargs.pop(n, None)
def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs
could/should be replaced by an improved configuration mechanism
Args:
default_cfg: input default_cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Overlay default cfg values from `external_default_cfg` if it exists in kwargs
overlay_external_default_cfg(default_cfg, kwargs)
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if default_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg)
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
filter_kwargs(kwargs, names=kwargs_filter)
def build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
default_cfg: dict,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
pretrained_custom_load: bool = False,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls (nn.Module): model class
variant (str): model variant name
pretrained (bool): load pretrained weights
default_cfg (dict): model's default pretrained/task config
model_cfg (Optional[Dict]): model's architecture config
feature_cfg (Optional[Dict]: feature extraction adapter config
pretrained_strict (bool): load pretrained weights strictly
pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights
pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights
kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
default_cfg = deepcopy(default_cfg) if default_cfg else {}
update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter)
default_cfg.setdefault('architecture', variant)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
# Build the model
model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)
model.default_cfg = default_cfg
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
if pretrained_custom_load:
load_custom_pretrained(model)
else:
load_pretrained(
model,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
strict=pretrained_strict)
# Wrap the model in a feature extraction module if enabled
if features:
feature_cls = FeatureListNet
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
elif feature_cls == 'fx':
feature_cls = FeatureGraphNet
else:
assert False, f'Unknown feature class {feature_cls}'
model = feature_cls(model, **feature_cfg)
model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg
return model
def model_parameters(model, exclude_head=False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(module: nn.Module, name='', depth_first=True, include_root=False):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
|
|
from __future__ import unicode_literals
from frappe import _
from frappe.desk.moduleview import add_setup_section
def get_data():
data = [
{
"label": _("Users"),
"icon": "fa fa-group",
"items": [
{
"type": "doctype",
"name": "User",
"description": _("System and Website Users")
},
{
"type": "doctype",
"name": "Role",
"description": _("User Roles")
}
]
},
{
"label": _("Permissions"),
"icon": "fa fa-lock",
"items": [
{
"type": "page",
"name": "permission-manager",
"label": _("Role Permissions Manager"),
"icon": "fa fa-lock",
"description": _("Set Permissions on Document Types and Roles")
},
{
"type": "page",
"name": "user-permissions",
"label": _("User Permissions Manager"),
"icon": "fa fa-shield",
"description": _("Set Permissions per User")
},
{
"type": "page",
"name": "modules_setup",
"label": _("Show / Hide Modules"),
"icon": "fa fa-upload",
"description": _("Show or hide modules globally.")
},
{
"type": "report",
"is_query_report": True,
"doctype": "User",
"icon": "fa fa-eye-open",
"name": "Permitted Documents For User",
"description": _("Check which Documents are readable by a User")
},
{
"type": "report",
"doctype": "DocShare",
"icon": "fa fa-share",
"name": "Document Share Report",
"description": _("Report of all document shares")
}
]
},
{
"label": _("Settings"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "System Settings",
"label": _("System Settings"),
"description": _("Language, Date and Time settings"),
"hide_count": True
},
{
"type": "doctype",
"name": "Error Log",
"description": _("Log of error on automated events (scheduler).")
},
{
"type": "doctype",
"name": "Error Snapshot",
"description": _("Log of error during requests.")
},
]
},
{
"label": _("Data"),
"icon": "fa fa-th",
"items": [
{
"type": "page",
"name": "data-import-tool",
"label": _("Import / Export Data"),
"icon": "fa fa-upload",
"description": _("Import / Export Data from .csv files.")
},
{
"type": "doctype",
"name": "Naming Series",
"description": _("Set numbering series for transactions."),
"hide_count": True
},
{
"type": "doctype",
"name": "Rename Tool",
"label": _("Bulk Rename"),
"description": _("Rename many items by uploading a .csv file."),
"hide_count": True
},
{
"type": "doctype",
"name": "Bulk Update",
"label": _("Bulk Update"),
"description": _("Update many values at one time."),
"hide_count": True
},
{
"type": "page",
"name": "backups",
"label": _("Download Backups"),
"description": _("List of backups available for download"),
"icon": "fa fa-download"
},
]
},
{
"label": _("Email"),
"icon": "fa fa-envelope",
"items": [
{
"type": "doctype",
"name": "Email Account",
"description": _("Add / Manage Email Accounts.")
},
{
"type": "doctype",
"name": "Email Domain",
"description": _("Add / Manage Email Domains.")
},
{
"type": "doctype",
"name": "Email Alert",
"description": _("Setup Email Alert based on various criteria.")
},
{
"type": "doctype",
"name": "Standard Reply",
"description": _("Standard replies to common queries.")
},
{
"type": "doctype",
"name": "Auto Email Report",
"description": _("Setup Reports to be emailed at regular intervals"),
},
]
},
{
"label": _("Printing"),
"icon": "fa fa-print",
"items": [
{
"type": "page",
"label": "Print Format Builder",
"name": "print-format-builder",
"description": _("Drag and Drop tool to build and customize Print Formats.")
},
{
"type": "doctype",
"name": "Print Settings",
"description": _("Set default format, page size, print style etc.")
},
{
"type": "doctype",
"name": "Print Format",
"description": _("Customized HTML Templates for printing transactions.")
},
]
},
{
"label": _("Workflow"),
"icon": "fa fa-random",
"items": [
{
"type": "doctype",
"name": "Workflow",
"description": _("Define workflows for forms.")
},
{
"type": "doctype",
"name": "Workflow State",
"description": _("States for workflow (e.g. Draft, Approved, Cancelled).")
},
{
"type": "doctype",
"name": "Workflow Action",
"description": _("Actions for workflow (e.g. Approve, Cancel).")
},
]
},
{
"label": _("Integrations"),
"icon": "fa fa-star",
"items": [
{
"type": "page",
"name": "applications",
"label": _("Application Installer"),
"description": _("Install Applications."),
"icon": "fa fa-download"
},
{
"type": "doctype",
"name": "Social Login Keys",
"description": _("Enter keys to enable login via Facebook, Google, GitHub."),
},
{
"type": "doctype",
"name": "Integration Service",
"description": _("Centralize access to Integrations"),
},
{
"type": "doctype",
"name": "OAuth Client",
"description": _("Register OAuth Client App"),
},
{
"type": "doctype",
"name": "OAuth Provider Settings",
"description": _("Settings for OAuth Provider"),
},
]
},
{
"label": _("Customize"),
"icon": "fa fa-glass",
"items": [
{
"type": "doctype",
"name": "Customize Form",
"description": _("Change field properties (hide, readonly, permission etc.)"),
"hide_count": True
},
{
"type": "doctype",
"name": "Custom Field",
"description": _("Add fields to forms.")
},
{
"type": "doctype",
"label": _("Custom Translations"),
"name": "Translation",
"description": _("Add your own translations")
},
{
"type": "doctype",
"name": "Custom Script",
"description": _("Add custom javascript to forms.")
},
{
"type": "doctype",
"name": "DocType",
"description": _("Add custom forms.")
},
{
"type": "doctype",
"label": _("Custom Tags"),
"name": "Tag Category",
"description": _("Add your own Tag Categories")
}
]
},
]
add_setup_section(data, "frappe", "website", _("Website"), "fa fa-globe")
return data
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.compat.v1.nn.dynamic_rnn variant, built on the Recurrent class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.recurrent.python.ops import recurrent
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def _GetDTypesFromStructure(struct):
dtypes_list = []
for x in nest.flatten(struct):
x = ops.convert_to_tensor(x)
dtypes_list.append(x.dtype)
return dtypes_list
def _SetShapeFromTemplate(struct, struct_template):
as_list = nest.flatten(struct)
template_as_list = nest.flatten(struct_template)
for element, template in zip(as_list, template_as_list):
element.set_shape(template.shape)
class _FunctionalRnnCell(object):
"""Wrapper around RNNCell which separates state from computation.
This class accomplishes the following:
* Turn the cell's `__call__` function into a pure function. The global
side effects are separated as `theta`. They are the variables created
for the weights of the computation.
* Unless the output is aliased as part of the state, extend the state to
contain the output so that we store the history in `Recurrent`.
* Set static shapes as required.
"""
def __init__(self, rnn_cell, seq_inputs, initial_state):
assert initial_state is not None
# TODO(drpng): Dtype needs to be configurable.
input_dtypes = [seq_inputs.dtype] + _GetDTypesFromStructure(initial_state)
# See _index.
like_inputs_t = nest.map_structure(
lambda x: array_ops.stop_gradient(array_ops.gather(x, 0)), seq_inputs)
input_structure = (like_inputs_t, initial_state)
@function.Defun(*input_dtypes)
def FlatCellStep(*flat_inputs):
"""The flattened version of `rnn_cell`."""
inputs_t, state0 = nest.pack_sequence_as(input_structure, flat_inputs)
_SetShapeFromTemplate(state0, initial_state)
_SetShapeFromTemplate(inputs_t, like_inputs_t)
outputs_t, state1 = rnn_cell(inputs_t, state0)
state_list = nest.flatten(state1)
self._output_shape = outputs_t.shape
if outputs_t in state_list:
output_index_in_state = state_list.index(outputs_t)
else:
output_index_in_state = None
if output_index_in_state is None:
self._prepend_output = True
self._output_state_idx = 0
return [outputs_t] + state_list
else:
self._output_state_idx = output_index_in_state
self._prepend_output = False
# To save memory, we don't store return the output separately
# from the state list, since we know it's the same.
return state_list
def _ToPureFunction(func):
# NOTE: This forces the creating of the function.
if func.captured_inputs:
pure_func = copy.copy(func)
# pylint: disable=protected-access
pure_func._extra_inputs = []
return pure_func
return func
pure_flat_cell_step = _ToPureFunction(FlatCellStep)
def CellStep(theta, extended_state0, inputs_t):
"""Performs one time steps on structured inputs.
The purpose of this function is to turn the parameters into flattened
versions, and to resolve the parameter order difference between
`Recurrent` and `RNNCell`.
In the event the cell returns a transformed output that is not aliased
within its state, the `extended_state0` also contains the output as its
first element.
Args:
theta: Weights required for the computation. A structure of tensors.
extended_state0: the state0, and possibly the output at the previous
time step. A structure of tensors.
inputs_t: the inputs at time t.
Returns:
A pair of the next state (inclusive of the output), and an empty list
(unused `extras`).
The next state is congruent to state0.
"""
extended_state0_flat = nest.flatten(extended_state0)
state0_flat = self.MaybeRemoveOutputFromState(extended_state0_flat)
full_inputs = [inputs_t] + state0_flat + theta
# Note that the thetas are additional inputs appeneded as extra
# parameters.
cell_out = pure_flat_cell_step(*full_inputs)
return cell_out, []
self._cell_step = CellStep
self._theta = FlatCellStep.captured_inputs
self._zero_state = rnn_cell.zero_state
self._state_template = initial_state
self._output_size = rnn_cell.output_size
@property
def extended_initial_state(self):
if self._prepend_output:
return [
array_ops.zeros(
self._output_shape,
dtype=_GetDTypesFromStructure(self._state_template)[0]),
self._state_template
]
else:
# The base case, where the output is just the hidden state.
return self._state_template
@property
def cell_step(self):
return self._cell_step
@property
def theta(self):
return self._theta
@property
def state_template(self):
return self._state_template
@property
def output_shape(self):
return self._output_shape
def GetOutputFromState(self, state):
return nest.flatten(state)[self._output_state_idx]
def MaybeRemoveOutputFromState(self, flat_state):
if self._prepend_output:
return flat_state[1:]
return flat_state
def _ApplyLengthsToBatch(sequence_lengths, tf_output):
# TODO(drpng): just use Update so that we don't carry over the gradients?
"""Sets the output to be zero at the end of the sequence."""
# output is batch major.
shape = array_ops.shape(tf_output)
batch_size, max_time, vector_size = shape[0], shape[1], shape[2]
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_lengths, [-1, 1]), [1, max_time])
is_less = math_ops.cast(
math_ops.less(output_time, lengths), dtype=tf_output.dtype)
keep_mask = array_ops.tile(
array_ops.expand_dims(is_less, -1), [1, 1, vector_size])
final_output = keep_mask * tf_output
return final_output
def _PickFinalStateFromHistory(acc_state, sequence_length):
"""Implements acc_state[sequence_length - 1]."""
# This will work on all platforms, unlike the regular slice.
last_value = []
for state_var in nest.flatten(acc_state):
# We compute the following with matrix operations:
# last_var = state_var[sequence_length - 1]
shape = array_ops.shape(state_var)
max_time, batch_size = shape[0], shape[1]
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_length, [-1, 1]), [1, max_time])
last_idx = math_ops.cast(
math_ops.equal(output_time, lengths - 1), dtype=state_var.dtype)
last_idx = array_ops.transpose(last_idx)
last_idx_for_bcast = array_ops.expand_dims(last_idx, -1)
sliced = math_ops.multiply(last_idx_for_bcast, state_var)
last_var = math_ops.reduce_sum(sliced, 0)
last_value += [last_var]
return nest.pack_sequence_as(acc_state, last_value)
def _PostProcessOutput(extended_acc_state, extended_final_state, func_cell,
total_time, inputs_lengths, is_reversed):
"""Post-process output of recurrent.
This function takes the accumulated extended state and extracts the requested
state and output.
When `inputs_lengths` has been set, it extracts the output from the
accumulated state. It also sets outputs past.
When `is_reversed` is true, the output will be reversed in this function.
It also sets the static shape information.
Args:
extended_acc_state: A structure containing the accumulated state at each
time. It may contain the output at each time as well.
extended_final_state: A structure containing the final state. It may contain
the output at the final time.
func_cell: The functional wrapper around the cell.
total_time: A scalar integer tensor.
inputs_lengths: An integer tensor with one entry per input.
is_reversed: A boolean to indicate if the sequence is reversed.
Returns:
A tuple with the outputs at each time, and the final state.
"""
if inputs_lengths is None or is_reversed:
flat_final_state = func_cell.MaybeRemoveOutputFromState(
nest.flatten(extended_final_state))
tf_state = nest.pack_sequence_as(func_cell.state_template, flat_final_state)
else:
# The accumulated state is over the entire sequence, so we pick it
# out from the acc_state sequence.
flat_acc_state = func_cell.MaybeRemoveOutputFromState(
nest.flatten(extended_acc_state))
acc_state = nest.pack_sequence_as(func_cell.state_template, flat_acc_state)
tf_state = _PickFinalStateFromHistory(acc_state, inputs_lengths)
output_from_state = func_cell.GetOutputFromState(extended_acc_state)
if is_reversed:
output_from_state = array_ops.reverse(output_from_state, [0])
tf_output = array_ops.transpose(output_from_state, [1, 0, 2])
tf_output.set_shape(
[func_cell.output_shape[0], total_time, func_cell.output_shape[1]])
if inputs_lengths is not None:
# Need set the outputs to zero.
tf_output = _ApplyLengthsToBatch(inputs_lengths, tf_output)
_SetShapeFromTemplate(tf_state, func_cell.state_template)
return tf_output, tf_state
# pylint: disable=invalid-name
def functional_rnn(cell,
inputs,
sequence_length=None,
initial_state=None,
dtype=None,
time_major=False,
scope=None,
use_tpu=False,
reverse=False):
"""Same interface as `tf.compat.v1.nn.dynamic_rnn`."""
with variable_scope.variable_scope(scope or 'rnn'):
if not time_major:
inputs = nest.map_structure(lambda t: array_ops.transpose(t, [1, 0, 2]),
inputs)
inputs_flat = nest.flatten(inputs)
batch_size = array_ops.shape(inputs_flat[0])[1]
if initial_state is None:
initial_state = cell.zero_state(batch_size, dtype)
func_cell = _FunctionalRnnCell(cell, inputs, initial_state)
if sequence_length is not None:
max_length = math_ops.reduce_max(sequence_length)
else:
max_length = None
if reverse:
inputs = array_ops.reverse(inputs, [0])
extended_acc_state, extended_final_state = recurrent.Recurrent(
theta=func_cell.theta,
state0=func_cell.extended_initial_state,
inputs=inputs,
cell_fn=func_cell.cell_step,
max_input_length=max_length,
use_tpu=use_tpu,
aligned_end=reverse)
tf_output, tf_state = _PostProcessOutput(
extended_acc_state,
extended_final_state,
func_cell,
inputs_flat[0].shape[0],
sequence_length,
is_reversed=reverse)
if time_major:
tf_output = array_ops.transpose(tf_output, [1, 0, 2])
return tf_output, tf_state
def bidirectional_functional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
time_major=False,
use_tpu=False,
fast_reverse=False,
scope=None):
"""Creates a bidirectional recurrent neural network.
Performs fully dynamic unrolling of inputs in both directions. Built to be API
compatible with `tf.compat.v1.nn.bidirectional_dynamic_rnn`, but implemented
with
functional control flow for TPU compatibility.
Args:
cell_fw: An instance of `tf.compat.v1.nn.rnn_cell.RNNCell`.
cell_bw: An instance of `tf.compat.v1.nn.rnn_cell.RNNCell`.
inputs: The RNN inputs. If time_major == False (default), this must be a
Tensor (or hierarchical structure of Tensors) of shape [batch_size,
max_time, ...]. If time_major == True, this must be a Tensor
(or hierarchical structure of Tensors) of shape: [max_time, batch_size,
...]. The first two dimensions must match across all the inputs, but
otherwise the ranks and other shape components may differ.
initial_state_fw: An optional initial state for `cell_fw`. Should match
`cell_fw.zero_state` in structure and type.
initial_state_bw: An optional initial state for `cell_bw`. Should match
`cell_bw.zero_state` in structure and type.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_states are not provided or RNN state has a
heterogeneous dtype.
sequence_length: An optional int32/int64 vector sized [batch_size]. Used to
copy-through state and zero-out outputs when past a batch element's
sequence length. So it's more for correctness than performance.
time_major: Whether the `inputs` tensor is in "time major" format.
use_tpu: Whether to enable TPU-compatible operation. If True, does not truly
reverse `inputs` in the backwards RNN. Once b/69305369 is fixed, we can
remove this flag.
fast_reverse: Whether to use fast tf.reverse to replace tf.reverse_sequence.
This is only possible when either all sequence lengths are the same inside
the batch, or when the cell function does not change the state on padded
input.
scope: An optional scope name for the dynamic RNN.
Returns:
outputs: A tuple of `(output_fw, output_bw)`. The output of the forward and
backward RNN. If time_major == False (default), these will
be Tensors shaped: [batch_size, max_time, cell.output_size]. If
time_major == True, these will be Tensors shaped:
[max_time, batch_size, cell.output_size]. Note, if cell.output_size is a
(possibly nested) tuple of integers or TensorShape objects, then the
output for that direction will be a tuple having the same structure as
cell.output_size, containing Tensors having shapes corresponding to the
shape data in cell.output_size.
final_states: A tuple of `(final_state_fw, final_state_bw)`. A Tensor or
hierarchical structure of Tensors indicating the final cell state in each
direction. Must have the same structure and shape as cell.zero_state.
Raises:
ValueError: If `initial_state_fw` is None or `initial_state_bw` is None and
`dtype` is not provided.
"""
# Keep this code in sync with tf.compat.v1.nn.dynamic_rnn for compatibility.
with variable_scope.variable_scope(scope or 'bidirectional_rnn'):
# Forward direction
with variable_scope.variable_scope('fw') as fw_scope:
output_fw, output_state_fw = functional_rnn(
cell=cell_fw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_fw,
dtype=dtype,
time_major=time_major,
scope=fw_scope,
use_tpu=use_tpu)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_,
seq_lengths=seq_lengths,
seq_dim=seq_dim,
batch_dim=batch_dim)
else:
# See b/69305369.
assert not use_tpu, (
'Bidirectional with variable sequence lengths unsupported on TPU')
return array_ops.reverse(input_, axis=[seq_dim])
with variable_scope.variable_scope('bw') as bw_scope:
if not fast_reverse:
inputs = _reverse(
inputs,
seq_lengths=sequence_length,
seq_dim=time_dim,
batch_dim=batch_dim)
output_bw, output_state_bw = functional_rnn(
cell=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
initial_state=initial_state_bw,
dtype=dtype,
time_major=time_major,
scope=bw_scope,
use_tpu=use_tpu,
reverse=fast_reverse)
if not fast_reverse:
output_bw = _reverse(
output_bw,
seq_lengths=sequence_length,
seq_dim=time_dim,
batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
# pylint: enable=invalid-name
|
|
"""
Functionality to read and write the Newick serialization format for trees.
.. seealso:: https://en.wikipedia.org/wiki/Newick_format
"""
import re
import pathlib
__version__ = "1.0.1.dev0"
RESERVED_PUNCTUATION = ':;,()'
COMMENT = re.compile(r'\[[^\]]*\]')
def length_parser(x):
return float(x or 0.0)
def length_formatter(x):
return '%s' % x
class Node(object):
"""
A Node may be a tree, a subtree or a leaf.
A Node has optional name and length (from parent) and a (possibly empty) list of
descendants. It further has an ancestor, which is *None* if the node is the
root node of a tree.
"""
def __init__(self, name=None, length=None, **kw):
"""
:param name: Node label.
:param length: Branch length from the new node to its parent.
:param kw: Recognized keyword arguments:\
`length_parser`: Custom parser for the `length` attribute of a Node.\
`length_formatter`: Custom formatter for the branch length when formatting a\
Node as Newick string.
"""
for char in RESERVED_PUNCTUATION:
if (name and char in name) or (length and char in length):
raise ValueError(
'Node names or branch lengths must not contain "%s"' % char)
self.name = name
self._length = length
self.descendants = []
self.ancestor = None
self._length_parser = kw.pop('length_parser', length_parser)
self._length_formatter = kw.pop('length_formatter', length_formatter)
def __repr__(self):
return 'Node("%s")' % self.name
@property
def length(self):
return self._length_parser(self._length)
@length.setter
def length(self, l):
if l is None:
self._length = l
else:
self._length = self._length_formatter(l)
@classmethod
def create(cls, name=None, length=None, descendants=None, **kw):
"""
Create a new `Node` object.
:param name: Node label.
:param length: Branch length from the new node to its parent.
:param descendants: list of descendants or `None`.
:param kw: Additonal keyword arguments are passed through to `Node.__init__`.
:return: `Node` instance.
"""
node = cls(name=name, length=length, **kw)
for descendant in descendants or []:
node.add_descendant(descendant)
return node
def add_descendant(self, node):
node.ancestor = self
self.descendants.append(node)
@property
def newick(self):
"""The representation of the Node in Newick format."""
label = self.name or ''
if self._length:
label += ':' + self._length
descendants = ','.join([n.newick for n in self.descendants])
if descendants:
descendants = '(' + descendants + ')'
return descendants + label
def _ascii_art(self, char1='\u2500', show_internal=True, maxlen=None):
if maxlen is None:
maxlen = max(
len(n.name) for n in self.walk()
if n.name and (show_internal or n.is_leaf)) + 4
pad = ' ' * (maxlen - 1)
namestr = '\u2500' + (self.name or '')
if self.descendants:
mids = []
result = []
for i, c in enumerate(self.descendants):
if len(self.descendants) == 1:
char2 = '\u2500'
elif i == 0:
char2 = '\u250c'
elif i == len(self.descendants) - 1:
char2 = '\u2514'
else:
char2 = '\u2500'
clines, mid = c._ascii_art(
char1=char2, show_internal=show_internal, maxlen=maxlen)
mids.append(mid + len(result))
result.extend(clines)
result.append('')
result.pop()
lo, hi, end = mids[0], mids[-1], len(result)
prefixes = [pad] * (lo + 1) +\
[pad + '\u2502'] * (hi - lo - 1) + \
[pad] * (end - hi)
mid = (lo + hi) // 2
prefixes[mid] = char1 + '\u2500' * (len(prefixes[mid]) - 2) + prefixes[mid][-1]
result = [p + l for p, l in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + namestr + stem[len(namestr) + 1:]
return result, mid
return [char1 + namestr], 0
def ascii_art(self, strict=False, show_internal=True):
"""
Return a unicode string representing a tree in ASCII art fashion.
:param strict: Use ASCII characters strictly (for the tree symbols).
:param show_internal: Show labels of internal nodes.
:return: unicode string
>>> node = loads('((A,B)C,((D,E)F,G,H)I)J;')[0]
>>> print(node.ascii_art(show_internal=False, strict=True))
/-A
/---|
| \-B
----| /-D
| /---|
| | \-E
\---|
|-G
\-H
"""
cmap = {
'\u2500': '-',
'\u2502': '|',
'\u250c': '/',
'\u2514': '\\',
'\u251c': '|',
'\u2524': '|',
'\u253c': '+',
}
def normalize(line):
m = re.compile('(?<=\u2502)(?P<s>\s+)(?=[\u250c\u2514\u2502])')
line = m.sub(lambda m: m.group('s')[1:], line)
line = re.sub('\u2500\u2502', '\u2500\u2524', line) # -|
line = re.sub('\u2502\u2500', '\u251c', line) # |-
line = re.sub('\u2524\u2500', '\u253c', line) # -|-
if strict:
for u, a in cmap.items():
line = line.replace(u, a)
return line
return '\n'.join(
normalize(l) for l in self._ascii_art(show_internal=show_internal)[0]
if set(l) != {' ', '\u2502'}) # remove lines of only spaces and pipes
@property
def is_leaf(self):
return not bool(self.descendants)
@property
def is_binary(self):
return all([len(n.descendants) in (0, 2) for n in self.walk()])
def walk(self, mode=None):
"""
Traverses the (sub)tree rooted at self, yielding each visited Node.
.. seealso:: https://en.wikipedia.org/wiki/Tree_traversal
:param mode: Specifies the algorithm to use when traversing the subtree rooted \
at self. `None` for breadth-first, `'postorder'` for post-order depth-first \
search.
:return: Generator of the visited Nodes.
"""
if mode == 'postorder':
for n in self._postorder():
yield n
else: # default to a breadth-first search
yield self
for node in self.descendants:
for n in node.walk():
yield n
def visit(self, visitor, predicate=None, **kw):
"""
Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk.
"""
predicate = predicate or bool
for n in self.walk(**kw):
if predicate(n):
visitor(n)
def _postorder(self):
stack = [self]
descendant_map = {id(node): [n for n in node.descendants] for node in self.walk()}
while stack:
node = stack[-1]
descendants = descendant_map[id(node)]
# if we are at a leave-node, we remove the item from the stack
if not descendants:
stack.pop()
yield node
if stack:
descendant_map[id(stack[-1])].pop(0)
else:
stack.append(descendants[0])
def get_leaves(self):
"""
Get all the leaf nodes of the subtree descending from this node.
:return: List of Nodes with no descendants.
"""
return [n for n in self.walk() if n.is_leaf]
def get_node(self, label):
"""
Gets the specified node by name.
:return: Node or None if name does not exist in tree
"""
for n in self.walk():
if n.name == label:
return n
def get_leaf_names(self):
"""
Get the names of all the leaf nodes of the subtree descending from
this node.
:return: List of names of Nodes with no descendants.
"""
return [n.name for n in self.get_leaves()]
def prune(self, leaves, inverse=False):
"""
Remove all those nodes in the specified list, or if inverse=True,
remove all those nodes not in the specified list. The specified nodes
must be leaves and distinct from the root node.
:param nodes: A list of Node objects
:param inverse: Specifies whether to remove nodes in the list or not\
in the list.
"""
self.visit(
lambda n: n.ancestor.descendants.remove(n),
# We won't prune the root node, even if it is a leave and requested to
# be pruned!
lambda n: ((not inverse and n in leaves) or
(inverse and n.is_leaf and n not in leaves)) and n.ancestor,
mode="postorder")
def prune_by_names(self, leaf_names, inverse=False):
"""
Perform an (inverse) prune, with leaves specified by name.
:param node_names: A list of leaaf Node names (strings)
:param inverse: Specifies whether to remove nodes in the list or not\
in the list.
"""
self.prune([l for l in self.walk() if l.name in leaf_names], inverse)
def remove_redundant_nodes(self, preserve_lengths=True):
"""
Remove all nodes which have only a single child, and attach their
grandchildren to their parent. The resulting tree has the minimum
number of internal nodes required for the number of leaves.
:param preserve_lengths: If true, branch lengths of removed nodes are \
added to those of their children.
"""
for n in self.walk(mode='postorder'):
while n.ancestor and len(n.ancestor.descendants) == 1:
grandfather = n.ancestor.ancestor
father = n.ancestor
if preserve_lengths:
n.length += father.length
if grandfather:
for i, child in enumerate(grandfather.descendants):
if child is father:
del grandfather.descendants[i]
grandfather.add_descendant(n)
father.ancestor = None
else:
self.descendants = n.descendants
if preserve_lengths:
self.length = n.length
def resolve_polytomies(self):
"""
Insert additional nodes with length=0 into the subtree in such a way
that all non-leaf nodes have only 2 descendants, i.e. the tree becomes
a fully resolved binary tree.
"""
def _resolve_polytomies(n):
new = Node(length=self._length_formatter(self._length_parser('0')))
while len(n.descendants) > 1:
new.add_descendant(n.descendants.pop())
n.descendants.append(new)
self.visit(_resolve_polytomies, lambda n: len(n.descendants) > 2)
def remove_names(self):
"""
Set the name of all nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None))
def remove_internal_names(self):
"""
Set the name of all non-leaf nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None), lambda n: not n.is_leaf)
def remove_leaf_names(self):
"""
Set the name of all leaf nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None), lambda n: n.is_leaf)
def remove_lengths(self):
"""
Set the length of all nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'length', None))
def loads(s, strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted string.
:param s: Newick formatted string.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return [parse_node(ss.strip(), **kw) for ss in s.split(';') if ss.strip()]
def dumps(trees):
"""
Serialize a list of trees in Newick format.
:param trees: List of Node objects or a single Node object.
:return: Newick formatted string.
"""
if isinstance(trees, Node):
trees = [trees]
return ';\n'.join([tree.newick for tree in trees]) + ';'
def load(fp, strip_comments=False, **kw):
"""
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return loads(fp.read(), **kw)
def dump(tree, fp):
fp.write(dumps(tree))
def read(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted file.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
with pathlib.Path(fname).open(encoding=encoding) as fp:
return load(fp, **kw)
def write(tree, fname, encoding='utf8'):
with pathlib.Path(fname).open(encoding=encoding, mode='w') as fp:
dump(tree, fp)
def _parse_name_and_length(s):
length = None
if ':' in s:
s, length = s.split(':', 1)
return s or None, length or None
def _parse_siblings(s, **kw):
"""
http://stackoverflow.com/a/26809037
"""
bracket_level = 0
current = []
# trick to remove special-case of trailing chars
for c in (s + ","):
if c == "," and bracket_level == 0:
yield parse_node("".join(current), **kw)
current = []
else:
if c == "(":
bracket_level += 1
elif c == ")":
bracket_level -= 1
current.append(c)
def parse_node(s, strip_comments=False, **kw):
"""
Parse a Newick formatted string into a `Node` object.
:param s: Newick formatted string to parse.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: `Node` instance.
"""
if strip_comments:
s = COMMENT.sub('', s)
s = s.strip()
parts = s.split(')')
if len(parts) == 1:
descendants, label = [], s
else:
if not parts[0].startswith('('):
raise ValueError('unmatched braces %s' % parts[0][:100])
descendants = list(_parse_siblings(')'.join(parts[:-1])[1:], **kw))
label = parts[-1]
name, length = _parse_name_and_length(label)
return Node.create(name=name, length=length, descendants=descendants, **kw)
|
|
import contextlib
import re
import urllib
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import BACKEND_SESSION_KEY, logout
from django.core.exceptions import MiddlewareNotUsed
from django.core.urlresolvers import is_valid_path
from django.core.validators import validate_ipv4_address, ValidationError
from django.db.utils import DatabaseError
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseForbidden)
from django.http.request import split_domain_port
from django.shortcuts import render
from django.utils import translation
from django.utils.cache import add_never_cache_headers, patch_response_headers, patch_vary_headers
from django.utils.encoding import iri_to_uri, smart_str, smart_unicode
import mobility
from mozilla_django_oidc.middleware import SessionRefresh
from enforce_host import EnforceHostMiddleware
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.urlresolvers import Prefixer, set_url_prefixer, split_path
from kitsune.sumo.views import handle403
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class EnforceHostIPMiddleware(EnforceHostMiddleware):
"""Modify the `EnforceHostMiddleware` to allow IP addresses"""
def process_request(self, request):
host = request.get_host()
domain, port = split_domain_port(host)
try:
validate_ipv4_address(domain)
except ValidationError:
# not an IP address. Call the superclass
return super(EnforceHostIPMiddleware, self).process_request(request)
# it is an IP address
return
class HttpResponseRateLimited(HttpResponse):
status_code = 429
class SUMORefreshIDTokenAdminMiddleware(SessionRefresh):
def __init__(self, *args, **kwargs):
if not settings.OIDC_ENABLE or settings.DEV:
raise MiddlewareNotUsed
def process_request(self, request):
"""Only allow refresh and enforce OIDC auth on admin URLs"""
# If the admin is targeted let's check the backend used, if any
if request.path.startswith('/admin/') and request.path != '/admin/login/':
backend = request.session.get(BACKEND_SESSION_KEY)
if backend and backend.split('.')[-1] != 'SumoOIDCAuthBackend':
logout(request)
messages.error(request, 'OIDC login required for admin access')
return HttpResponseRedirect('/admin/login/')
return super(SUMORefreshIDTokenAdminMiddleware, self).process_request(request)
class LocaleURLMiddleware(object):
"""
Based on zamboni.amo.middleware.
Tried to use localeurl but it choked on 'en-US' with capital letters.
1. Search for the locale.
2. Save it in the request.
3. Strip them from the URL.
"""
def process_request(self, request):
prefixer = Prefixer(request)
set_url_prefixer(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if request.GET.get('lang', '') in settings.SUMO_LANGUAGES:
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), v) for
k, v in request.GET.iteritems() if k != 'lang')
# 'lang' is only used on the language selection page. If this is
# present it is safe to set language preference for the current
# user.
if request.user.is_anonymous():
cookie = settings.LANGUAGE_COOKIE_NAME
request.session[cookie] = request.GET['lang']
return HttpResponseRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponseRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.LANGUAGE_CODE = prefixer.locale
translation.activate(prefixer.locale)
def process_response(self, request, response):
"""Unset the thread-local var we set during `process_request`."""
# This makes mistaken tests (that should use LocalizingClient but
# use Client instead) fail loudly and reliably. Otherwise, the set
# prefixer bleeds from one test to the next, making tests
# order-dependent and causing hard-to-track failures.
set_url_prefixer(None)
return response
def process_exception(self, request, exception):
set_url_prefixer(None)
class Forbidden403Middleware(object):
"""
Renders a 403.html page if response.status_code == 403.
"""
def process_response(self, request, response):
if isinstance(response, HttpResponseForbidden):
return handle403(request)
# If not 403, return response unmodified
return response
class VaryNoCacheMiddleware(MiddlewareMixin):
"""
If enabled this will set headers to prevent the CDN (or other caches) from
caching the response if the response was set to vary on accept-langauge.
This should be near the top of the list of middlewares so it will be able
to inspect the near-final response since response middleware is processed
in reverse.
"""
def __init__(self):
if not settings.ENABLE_VARY_NOCACHE_MIDDLEWARE:
raise MiddlewareNotUsed
@staticmethod
def process_response(request, response):
if 'vary' in response and 'accept-language' in response['vary'].lower():
add_never_cache_headers(response)
return response
class CacheHeadersMiddleware(MiddlewareMixin):
"""
Sets no-cache headers normally, and cache for some time in READ_ONLY mode.
"""
def process_response(self, request, response):
if 'cache-control' in response or response.status_code >= 400:
return response
if (request.method in ('GET', 'HEAD') and
settings.CACHE_MIDDLEWARE_SECONDS):
# uses CACHE_MIDDLEWARE_SECONDS by default
patch_response_headers(response)
else:
add_never_cache_headers(response)
return response
class PlusToSpaceMiddleware(object):
"""Replace old-style + with %20 in URLs."""
def process_request(self, request):
p = re.compile(r'\+')
if p.search(request.path_info):
new = p.sub(' ', request.path_info)
if request.META.get('QUERY_STRING'):
new = u'%s?%s' % (new,
smart_unicode(request.META['QUERY_STRING']))
if hasattr(request, 'LANGUAGE_CODE'):
new = u'/%s%s' % (request.LANGUAGE_CODE, new)
return HttpResponsePermanentRedirect(new)
class ReadOnlyMiddleware(object):
def __init__(self):
if not settings.READ_ONLY:
raise MiddlewareNotUsed
def process_request(self, request):
if request.method == 'POST':
return render(request, 'sumo/read-only.html', status=503)
def process_exception(self, request, exception):
if isinstance(exception, DatabaseError):
return render(request, 'sumo/read-only.html', status=503)
class RemoveSlashMiddleware(object):
"""
Middleware that tries to remove a trailing slash if there was a 404.
If the response is a 404 because url resolution failed, we'll look for a
better url without a trailing slash.
"""
def process_response(self, request, response):
if (response.status_code == 404 and
request.path_info.endswith('/') and
not is_valid_path(request.path_info) and
is_valid_path(request.path_info[:-1])):
# Use request.path because we munged app/locale in path_info.
newurl = request.path[:-1]
if request.GET:
with safe_query_string(request):
newurl += '?' + request.META['QUERY_STRING']
return HttpResponsePermanentRedirect(newurl)
return response
@contextlib.contextmanager
def safe_query_string(request):
"""
Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed URL, but it has to be
ascii to go in a Location header. iri_to_uri seems like a good compromise.
"""
qs = request.META['QUERY_STRING']
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
# Mobile user agents.
MOBILE_UAS = re.compile('android|fennec|mobile|iphone|opera (?:mini|mobi)')
# Tablet user agents. User agents matching tablets will not be considered
# to be mobile (for tablets, request.MOBILE = False).
TABLET_UAS = re.compile('tablet|ipad')
# This is a modified version of 'mobility.middleware.DetectMobileMiddleware'.
# We want to exclude tablets from being detected as MOBILE and there is
# no way to do that by just overriding the detection regex.
class DetectMobileMiddleware(MiddlewareMixin):
"""Looks at user agent and decides whether the device is mobile."""
def __init__(self, *args, **kwargs):
if settings.SKIP_MOBILE_DETECTION:
raise MiddlewareNotUsed()
def process_request(self, request):
ua = request.META.get('HTTP_USER_AGENT', '').lower()
mc = request.COOKIES.get(settings.MOBILE_COOKIE)
is_tablet = TABLET_UAS.search(ua)
is_mobile = not is_tablet and MOBILE_UAS.search(ua)
if (is_mobile and mc != 'off') or mc == 'on':
request.META['HTTP_X_MOBILE'] = '1'
def process_response(self, request, response):
patch_vary_headers(response, ['User-Agent'])
return response
class MobileSwitchMiddleware(object):
"""Looks for query string parameters to switch to the mobile site."""
def process_request(self, request):
mobile = request.GET.get('mobile')
if mobile == '0':
request.MOBILE = False
elif mobile == '1':
request.MOBILE = True
def process_response(self, request, response):
mobile = request.GET.get('mobile')
if mobile == '0':
response.set_cookie(mobility.middleware.COOKIE, 'off')
elif mobile == '1':
response.set_cookie(mobility.middleware.COOKIE, 'on')
return response
class HostnameMiddleware(MiddlewareMixin):
def __init__(self):
if getattr(settings, 'DISABLE_HOSTNAME_MIDDLEWARE', False):
raise MiddlewareNotUsed()
values = [getattr(settings, x) for x in ['PLATFORM_NAME', 'K8S_DOMAIN']]
self.backend_server = '.'.join(x for x in values if x)
def process_response(self, request, response):
response['X-Backend-Server'] = self.backend_server
return response
class FilterByUserAgentMiddleware(MiddlewareMixin):
"""Looks at user agent and decides whether the device is allowed on the site."""
def __init__(self, *args, **kwargs):
if not settings.USER_AGENT_FILTERS:
raise MiddlewareNotUsed()
def process_request(self, request):
client_ua = request.META.get('HTTP_USER_AGENT', '').lower()
# get only ascii chars
ua = ''.join(i for i in client_ua if ord(i) < 128)
if any(x in ua for x in settings.USER_AGENT_FILTERS):
response = HttpResponseRateLimited()
patch_vary_headers(response, ['User-Agent'])
return response
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging mechanisms for use in OpenHTF.
Below is an illustration of the tree of OpenHTF loggers:
+---------+
| openhtf |
+---------+
|
| +--------------------------------+
|--| Framework logs: |
| | - openhtf.core.test_executor |
| | - openhtf.util.threads |
| | - etc. etc. |
| +--------------------------------+
|
| +----------------------------------+
+--| Test record logs: |
| - openhtf.test_record.<test_uid> |
+----------------------------------+
|
| +-----------------------------------------------------+
+--| Test subsystem logs: |
| - openhtf.test_record.<test_uid>.phase.<phase_name> |
| - openhtf.test_record.<test_uid>.plug.<plug_name> |
+-----------------------------------------------------+
All of our logging handlers are attached to the top-level `openhtf` logger. The
other loggers in the tree have their logs propagate up to the top level.
------------------------------ Test record output ------------------------------
The test record loggers are loggers specific to a running test, with names
prefixed by `openhtf.test_record.<test_uid>`. These logs are saved only to the
record of the test with a matching test_uid. We call the other loggers in the
tree the "framework loggers". Their logs are saved to the records of all
currently running tests.
Test record logs are typically generated by test authors. Often this is done
with the logger attached to the TestApi object passed into test phases, e.g.:
def MyLoggingPhase(test):
test.logger.info('My log line.')
To facilitate emitting test record logs outside of test phases (without forcing
the author to pass the logger around), we provide the get_record_logger_for()
function which takes a Test UID and returns a logger, e.g.:
from openhtf.util import logs
class MyHelperClass(object):
def __init__(self)
self.test_uid = ''
def MyRandomMethod(self):
logs.get_record_logger_for(self.test_uid).info(
'Log this to currently running test.')
def MyPhase(test, helper):
helper.MyRandomMethod()
if __name__ == '__main__':
helper = MyHelperClass()
my_test = openhtf.Test(MyPhase.with_args(helper=helper))
helper.test_uid = my_test.uid
my_test.execute()
------------------------------ Command-line output -----------------------------
By default, logs are not sent to stdout. This is done to allow test authors to
provide a more streamlined and predictable console interface for test operators.
See the util.console_output module for tools for printing to the CLI.
During development you will probably want to run OpenHTF at a higher verbosity
level in order to view logs and full tracebacks of errors. The verbosity flag
can be used as follows:
- Default: Logs are not printed.
- `-v`: Logs are printed at the INFO level and up.
- `-vv`: Logs are printed at the DEBUG level and up.
Additionally, the --quiet flag and CLI_QUIET variable from the console_output
module will override the verbosity setting and suppress all CLI output.
"""
import collections
import datetime
import logging
import os
import re
import sys
import textwrap
from openhtf.util import argv
from openhtf.util import console_output
from openhtf.util import functions
from openhtf.util import threads
import six
# The number of v's provided as command line arguments to control verbosity.
# Will be overridden if the ARG_PARSER below parses the -v argument.
CLI_LOGGING_VERBOSITY = 0
ARG_PARSER = argv.ModuleParser()
ARG_PARSER.add_argument(
'-v', action=argv.StoreRepsInModule,
target='%s.CLI_LOGGING_VERBOSITY' % __name__,
help=textwrap.dedent('''\
CLI logging verbosity. Can be repeated to increase verbosity (i.e. -v,
-vv, -vvv).'''))
LOGGER_PREFIX = 'openhtf'
RECORD_LOGGER_PREFIX = '.'.join((LOGGER_PREFIX, 'test_record'))
RECORD_LOGGER_RE = re.compile(
r'%s\.(?P<test_uid>[^.]*)\.?' % RECORD_LOGGER_PREFIX)
SUBSYSTEM_LOGGER_RE = re.compile(
r'%s\.[^.]*\.(?P<subsys>plug|phase)\.(?P<id>[^.]*)' % RECORD_LOGGER_PREFIX)
_LOG_ONCE_SEEN = set()
LogRecord = collections.namedtuple(
'LogRecord', 'level logger_name source lineno timestamp_millis message')
def get_record_logger_for(test_uid):
"""Return the child logger associated with the specified test UID."""
return logging.getLogger(RECORD_LOGGER_PREFIX).getChild(test_uid)
def initialize_record_handler(test_uid, test_record, notify_update):
"""Initialize the record handler for a test.
For each running test, we attach a record handler to the top-level OpenHTF
logger. The handler will append OpenHTF logs to the test record, while
filtering out logs that are specific to any other test run.
"""
htf_logger = logging.getLogger(LOGGER_PREFIX)
htf_logger.addHandler(RecordHandler(test_uid, test_record, notify_update))
def remove_record_handler(test_uid):
handlers = logging.getLogger(LOGGER_PREFIX).handlers
for handler in handlers:
if isinstance(handler, RecordHandler) and handler.test_uid is test_uid:
handlers.remove(handler)
break
def log_once(log_func, msg, *args, **kwargs):
""""Logs a message only once."""
if msg not in _LOG_ONCE_SEEN:
log_func(msg, *args, **kwargs)
# Key on the message, ignoring args. This should fit most use cases.
_LOG_ONCE_SEEN.add(msg)
class MacAddressLogFilter(logging.Filter):
"""A filter which redacts MAC addresses."""
MAC_REPLACE_RE = re.compile(r"""
((?:[\dA-F]{2}:){3}) # 3-part prefix, f8:8f:ca means google
(?:[\dA-F]{2}(:|\b)){3} # the remaining octets
""", re.IGNORECASE | re.VERBOSE)
MAC_REPLACEMENT = r'\1<REDACTED>'
def filter(self, record):
if self.MAC_REPLACE_RE.search(record.getMessage()):
# Update all the things to have no mac address in them
if isinstance(record.msg, six.string_types):
record.msg = self.MAC_REPLACE_RE.sub(self.MAC_REPLACEMENT, record.msg)
record.args = tuple([
self.MAC_REPLACE_RE.sub(self.MAC_REPLACEMENT, str(arg))
if isinstance(arg, six.string_types)
else arg for arg in record.args])
else:
record.msg = self.MAC_REPLACE_RE.sub(
self.MAC_REPLACEMENT, record.getMessage())
return True
# We use one shared instance of this, it has no internal state.
MAC_FILTER = MacAddressLogFilter()
class TestUidFilter(logging.Filter):
"""Exclude logs emitted by the record loggers of other tests."""
def __init__(self, test_uid):
super(TestUidFilter, self).__init__()
self.test_uid = test_uid
def filter(self, record):
match = RECORD_LOGGER_RE.match(record.name)
# Keep framework logs.
if not match:
return True
# Exclude logs emitted by the record loggers of other tests.
return match.group('test_uid') == self.test_uid
class KillableThreadSafeStreamHandler(logging.StreamHandler):
def handle(self, record):
# logging.Handler objects have an internal lock attribute that is a
# threading.RLock instance; it can cause deadlocks in Python 2.7 when a
# KillableThread is killed while its release method is running.
with threads.safe_lock_release_context(self.lock):
return super(KillableThreadSafeStreamHandler, self).handle(record)
class RecordHandler(logging.Handler):
"""A handler to save logs to an HTF TestRecord."""
def __init__(self, test_uid, test_record, notify_update):
super(RecordHandler, self).__init__()
self.test_uid = test_uid
self._test_record = test_record
self._notify_update = notify_update
self.addFilter(MAC_FILTER)
self.addFilter(TestUidFilter(test_uid))
def handle(self, record):
# logging.Handler objects have an internal lock attribute that is a
# threading.RLock instance; it can cause deadlocks in Python 2.7 when a
# KillableThread is killed while its release method is running.
with threads.safe_lock_release_context(self.lock):
return super(RecordHandler, self).handle(record)
def emit(self, record):
"""Save a logging.LogRecord to our test record.
Logs carry useful metadata such as the logger name and level information.
We capture this in a structured format in the test record to enable
filtering by client applications.
Args:
record: A logging.LogRecord to record.
"""
try:
message = self.format(record)
log_record = LogRecord(
record.levelno, record.name, os.path.basename(record.pathname),
record.lineno, int(record.created * 1000), message,
)
self._test_record.log_records.append(log_record)
self._notify_update()
except Exception: # pylint: disable=broad-except
self.handleError(record)
class CliFormatter(logging.Formatter):
"""Formats log messages for printing to the CLI."""
def format(self, record):
"""Format the record as tersely as possible but preserve info."""
super(CliFormatter, self).format(record)
localized_time = datetime.datetime.fromtimestamp(record.created)
terse_time = localized_time.strftime(u'%H:%M:%S')
terse_level = record.levelname[0]
terse_name = record.name.split('.')[-1]
match = RECORD_LOGGER_RE.match(record.name)
if match:
# Figure out which OpenHTF subsystem the record came from.
subsys_match = SUBSYSTEM_LOGGER_RE.match(record.name)
if subsys_match:
terse_name = '<{subsys}: {id}>'.format(
subsys=subsys_match.group('subsys'),
id=subsys_match.group('id'))
else:
# Fall back to using the last five characters of the test UUID.
terse_name = '<test %s>' % match.group('test_uid')[-5:]
return '{lvl} {time} {logger} - {msg}'.format(lvl=terse_level,
time=terse_time,
logger=terse_name,
msg=record.message)
@functions.call_once
def configure_logging():
"""One-time initialization of loggers. See module docstring for more info."""
# Define the top-level logger.
htf_logger = logging.getLogger(LOGGER_PREFIX)
htf_logger.propagate = False
htf_logger.setLevel(logging.DEBUG)
# By default, don't print any logs to the CLI.
if CLI_LOGGING_VERBOSITY == 0:
htf_logger.addHandler(logging.NullHandler())
return
if CLI_LOGGING_VERBOSITY == 1:
logging_level = logging.INFO
else:
logging_level = logging.DEBUG
# Configure a handler to print to the CLI.
cli_handler = KillableThreadSafeStreamHandler(stream=sys.stdout)
cli_handler.setFormatter(CliFormatter())
cli_handler.setLevel(logging_level)
cli_handler.addFilter(MAC_FILTER)
htf_logger.addHandler(cli_handler)
# Suppress CLI logging if the --quiet flag is used, or while CLI_QUIET is set
# in the console_output module.
cli_handler.addFilter(console_output.CliQuietFilter())
|
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.ext.httplib import trace
from opencensus.trace import span as span_module
from opencensus.trace.propagation import trace_context_http_header_format
class Test_httplib_trace(unittest.TestCase):
def tearDown(self):
from opencensus.trace import execution_context
execution_context.clear()
def test_trace_integration(self):
mock_wrap_request = mock.Mock()
mock_wrap_response = mock.Mock()
mock_httplib = mock.Mock()
wrap_request_result = 'wrap request result'
wrap_response_result = 'wrap response result'
mock_wrap_request.return_value = wrap_request_result
mock_wrap_response.return_value = wrap_response_result
mock_request_func = mock.Mock()
mock_response_func = mock.Mock()
mock_request_func.__name__ = 'request'
mock_response_func.__name__ = 'getresponse'
setattr(mock_httplib.HTTPConnection, 'request', mock_request_func)
setattr(mock_httplib.HTTPConnection, 'getresponse', mock_response_func)
patch_wrap_request = mock.patch(
'opencensus.ext.httplib.trace.wrap_httplib_request',
mock_wrap_request)
patch_wrap_response = mock.patch(
'opencensus.ext.httplib.trace.wrap_httplib_response',
mock_wrap_response)
patch_httplib = mock.patch(
'opencensus.ext.httplib.trace.httplib', mock_httplib)
with patch_wrap_request, patch_wrap_response, patch_httplib:
trace.trace_integration()
self.assertEqual(
getattr(mock_httplib.HTTPConnection, 'request'),
wrap_request_result)
self.assertEqual(
getattr(mock_httplib.HTTPConnection, 'getresponse'),
wrap_response_result)
def test_wrap_httplib_request(self):
mock_span = mock.Mock()
span_id = '1234'
mock_span.span_id = span_id
mock_tracer = MockTracer(mock_span)
mock_request_func = mock.Mock()
mock_request_func.__name__ = 'request'
patch = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=False)
wrapped = trace.wrap_httplib_request(mock_request_func)
mock_self = mock.Mock()
method = 'GET'
url = 'http://localhost:8080'
body = None
headers = {}
with patch, patch_thread:
wrapped(mock_self, method, url, body, headers)
expected_attributes = {'component': 'HTTP',
'http.url': url, 'http.method': method}
expected_name = '[httplib]request'
mock_request_func.assert_called_with(mock_self, method, url, body, {
'traceparent': '00-123-456-01',
})
self.assertEqual(expected_attributes, mock_tracer.span.attributes)
self.assertEqual(expected_name, mock_tracer.span.name)
self.assertEqual(span_module.SpanKind.CLIENT,
mock_tracer.span.span_kind)
def test_wrap_httplib_request_excludelist_ok(self):
mock_span = mock.Mock()
span_id = '1234'
mock_span.span_id = span_id
mock_tracer = MockTracer(mock_span)
mock_request_func = mock.Mock()
mock_request_func.__name__ = 'request'
patch_tracer = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
patch_attr = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_attr',
return_value=None)
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=False)
wrapped = trace.wrap_httplib_request(mock_request_func)
mock_self = mock.Mock()
method = 'GET'
url = 'http://localhost:8080'
body = None
headers = {}
with patch_tracer, patch_attr, patch_thread:
wrapped(mock_self, method, url, body, headers)
mock_request_func.assert_called_with(mock_self, method, url, body, {
'traceparent': '00-123-456-01',
})
def test_wrap_httplib_request_excludelist_nok(self):
mock_span = mock.Mock()
span_id = '1234'
mock_span.span_id = span_id
mock_tracer = MockTracer(mock_span)
mock_request_func = mock.Mock()
mock_request_func.__name__ = 'request'
patch_tracer = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
patch_attr = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_attr',
return_value=['localhost:8080'])
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=False)
wrapped = trace.wrap_httplib_request(mock_request_func)
mock_self = mock.Mock()
mock_self.host = 'localhost'
mock_self.port = '8080'
method = 'GET'
url = 'http://{}:{}'.format(mock_self.host, mock_self.port)
body = None
headers = {}
with patch_tracer, patch_attr, patch_thread:
wrapped(mock_self, method, url, body, headers)
mock_request_func.assert_called_with(mock_self, method, url, body, {})
def test_wrap_httplib_request_exporter_thread(self):
mock_request_func = mock.Mock()
mock_request_func.__name__ = 'request'
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=True)
mock_self = mock.Mock()
mock_self.host = 'localhost'
mock_self.port = '8080'
method = 'GET'
url = 'http://{}:{}'.format(mock_self.host, mock_self.port)
body = None
headers = {}
wrapped = trace.wrap_httplib_request(mock_request_func)
with patch_thread:
wrapped(mock_self, method, url, body, headers)
mock_request_func.assert_called_with(mock_self, method, url, body, {})
def test_wrap_httplib_response(self):
mock_span = mock.Mock()
span_id = '1234'
mock_span.span_id = span_id
mock_span.attributes = {}
mock_tracer = MockTracer(mock_span)
mock_response_func = mock.Mock()
mock_result = mock.Mock()
mock_result.status = '200'
mock_response_func.return_value = mock_result
patch_tracer = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
patch_attr = mock.patch(
'opencensus.ext.httplib.trace.'
'execution_context.get_opencensus_attr',
return_value=span_id)
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=False)
wrapped = trace.wrap_httplib_response(mock_response_func)
with patch_tracer, patch_attr, patch_thread:
wrapped(mock.Mock())
expected_attributes = {'http.status_code': '200'}
self.assertEqual(expected_attributes, mock_tracer.span.attributes)
def test_wrap_httplib_response_no_open_span(self):
mock_span = mock.Mock()
span_id = '1234'
mock_span.span_id = span_id
mock_span.attributes = {}
mock_tracer = MockTracer(mock_span)
mock_response_func = mock.Mock()
mock_result = mock.Mock()
mock_result.status = '200'
mock_response_func.return_value = mock_result
patch_tracer = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
patch_attr = mock.patch(
'opencensus.ext.httplib.trace.'
'execution_context.get_opencensus_attr',
return_value='1111')
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=False)
wrapped = trace.wrap_httplib_response(mock_response_func)
with patch_tracer, patch_attr, patch_thread:
wrapped(mock.Mock())
# Attribute should be empty as there is no matching span
expected_attributes = {}
self.assertEqual(expected_attributes, mock_tracer.span.attributes)
def test_wrap_httplib_response_exporter_thread(self):
mock_span = mock.Mock()
span_id = '1234'
mock_span.span_id = span_id
mock_span.attributes = {}
mock_tracer = MockTracer(mock_span)
mock_response_func = mock.Mock()
mock_result = mock.Mock()
mock_result.status = '200'
mock_response_func.return_value = mock_result
patch_tracer = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
patch_attr = mock.patch(
'opencensus.ext.httplib.trace.'
'execution_context.get_opencensus_attr',
return_value='1111')
patch_thread = mock.patch(
'opencensus.ext.requests.trace.execution_context.'
'is_exporter',
return_value=True)
wrapped = trace.wrap_httplib_response(mock_response_func)
with patch_tracer, patch_attr, patch_thread:
wrapped(mock.Mock())
expected_attributes = {}
self.assertEqual(expected_attributes, mock_tracer.span.attributes)
class MockTracer(object):
def __init__(self, span=None):
self.span = span
self.propagator = (
trace_context_http_header_format.TraceContextPropagator())
def current_span(self):
return self.span
def start_span(self):
span = mock.Mock()
span.attributes = {}
span.context_tracer = mock.Mock()
span.context_tracer.span_context = mock.Mock()
span.context_tracer.span_context.trace_id = '123'
span.context_tracer.span_context.span_id = '456'
span.context_tracer.span_context.tracestate = None
self.span = span
return span
def end_span(self):
pass
def add_attribute_to_current_span(self, key, value):
self.span.attributes[key] = value
|
|
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
SeeAlso:
routes.turk_identification
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from ibeis.control import controller_inject
from flask import url_for, request, current_app # NOQA
import numpy as np # NOQA
import utool as ut
import uuid
import requests
import six
ut.noinject('[apis_sync]')
CLASS_INJECT_KEY, register_ibs_method = (
controller_inject.make_ibs_register_decorator(__name__))
register_api = controller_inject.get_ibeis_flask_api(__name__)
REMOTE_TESTING = False
if REMOTE_TESTING:
REMOTE_DOMAIN = '127.0.0.1'
REMOTE_PORT = '5555'
REMOTE_UUID = None
else:
REMOTE_DOMAIN = '35.161.135.191'
REMOTE_PORT = '5555'
REMOTE_UUID = 'e468d14b-3a39-4165-8f62-16f9e3deea39'
remote_args = ut.get_arg_dict()
REMOTE_DOMAIN = remote_args.get('sync-domain', REMOTE_DOMAIN)
REMOTE_PORT = remote_args.get('sync-port', REMOTE_PORT)
REMOTE_UUID = remote_args.get('sync-uuid', REMOTE_UUID)
if REMOTE_UUID in [True, '', 'none', 'None']:
REMOTE_UUID = None
REMOTE_URL = 'http://%s:%s' % (REMOTE_DOMAIN, REMOTE_PORT, )
REMOTE_UUID = None if REMOTE_UUID is None else uuid.UUID(REMOTE_UUID)
def _construct_route_url(route_rule):
if not route_rule.startswith('/'):
route_rule = '/' + route_rule
if not route_rule.endswith('/'):
route_rule = route_rule + '/'
route_url = '%s%s' % (REMOTE_URL, route_rule, )
return route_url
def _verify_response(response):
try:
response_dict = ut.from_json(response.text)
except ValueError:
raise AssertionError('Could not get valid JSON response from server')
status = response_dict.get('status', {})
assert status.get('success', False)
response = response_dict.get('response', None)
return response
def _get(route_rule, **kwargs):
route_url = _construct_route_url(route_rule)
response = requests.get(route_url, **kwargs)
return _verify_response(response)
def _assert_remote_online(ibs):
try:
version = _get('/api/core/db/version/')
uuid = _get('/api/core/db/uuid/init/')
assert version == ibs.get_database_version()
if REMOTE_UUID is not None:
assert uuid == REMOTE_UUID
except:
raise IOError('Remote IBEIS DETECT database offline at %s' % (REMOTE_URL, ))
@register_ibs_method
def _detect_remote_push_images(ibs, gid_list):
route_url = _construct_route_url('/api/upload/image/')
num_images = len(gid_list)
image_path_list = ibs.get_image_paths(gid_list)
for index, image_path in enumerate(image_path_list):
print('\tSending %d / %d: %r' % (index, num_images, image_path, ))
file_dict = {
'image': open(image_path, 'rb'),
}
response = requests.post(route_url, files=file_dict)
_verify_response(response)
print('\t...sent')
@register_ibs_method
def _detect_remote_push_imageset(ibs, image_uuid_list):
route_url = _construct_route_url('/api/image/imageset/text/json/')
db_name = ibs.get_dbname()
db_uuid = ibs.get_db_init_uuid()
time_str = ut.get_timestamp()
imageset_text = 'Sync from %s (%s) at %s' % (db_name, db_uuid, time_str)
imageset_text_list = [imageset_text] * len(image_uuid_list)
data_dict = {
'image_uuid_list': image_uuid_list,
'imageset_text_list': imageset_text_list,
}
for key in data_dict:
data_dict[key] = ut.to_json(data_dict[key])
response = requests.put(route_url, data=data_dict)
_verify_response(response)
@register_ibs_method
def _detect_remote_push_annots(ibs, aid_list):
route_url = _construct_route_url('/api/annot/json/')
print('\tSending...')
data_dict = {
'image_uuid_list': ibs.get_annot_image_uuids(aid_list),
'annot_uuid_list': ibs.get_annot_uuids(aid_list),
'annot_bbox_list': ibs.get_annot_bboxes(aid_list),
}
for key in data_dict:
data_dict[key] = ut.to_json(data_dict[key])
response = requests.post(route_url, data=data_dict)
_verify_response(response)
print('\t...sent')
@register_ibs_method
def _detect_remote_push_metadata(ibs, route_rule, uuid_str, value_str,
uuid_list, value_list):
route_url = _construct_route_url(route_rule)
print('\tSetting %s metadata for %s' % (route_rule, uuid_str, ))
data_dict = {
uuid_str: uuid_list,
value_str: value_list,
}
for key in data_dict:
data_dict[key] = ut.to_json(data_dict[key])
response = requests.put(route_url, data=data_dict)
_verify_response(response)
print('\t...set')
@register_ibs_method
def _detect_remote_push_annot_metadata(ibs, annot_uuid_list):
aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)
ibs._detect_remote_push_metadata('/api/annot/bbox/json/',
'annot_uuid_list',
'bbox_list',
annot_uuid_list,
ibs.get_annot_bboxes(aid_list))
ibs._detect_remote_push_metadata('/api/annot/theta/json/',
'annot_uuid_list',
'theta_list',
annot_uuid_list,
ibs.get_annot_thetas(aid_list))
ibs._detect_remote_push_metadata('/api/annot/viewpoint/json/',
'annot_uuid_list',
'viewpoint_list',
annot_uuid_list,
ibs.get_annot_viewpoints(aid_list))
ibs._detect_remote_push_metadata('/api/annot/quality/text/json/',
'annot_uuid_list',
'quality_text_list',
annot_uuid_list,
ibs.get_annot_quality_texts(aid_list))
ibs._detect_remote_push_metadata('/api/annot/species/json/',
'annot_uuid_list',
'species_text_list',
annot_uuid_list,
ibs.get_annot_species_texts(aid_list))
ibs._detect_remote_push_metadata('/api/annot/multiple/json/',
'annot_uuid_list',
'flag_list',
annot_uuid_list,
ibs.get_annot_multiple(aid_list))
ibs._detect_remote_push_metadata('/api/annot/interest/json/',
'annot_uuid_list',
'flag_list',
annot_uuid_list,
ibs.get_annot_interest(aid_list))
ibs._detect_remote_push_metadata('/api/annot/tags/json/',
'annot_uuid_list',
'annot_tags_list',
annot_uuid_list,
ibs.get_annot_tag_text(aid_list))
ibs._detect_remote_push_metadata('/api/annot/name/text/json/',
'annot_uuid_list',
'name_text_list',
annot_uuid_list,
ibs.get_annot_name_texts(aid_list))
@register_ibs_method
def _detect_remote_push_parts(ibs, part_rowid_list):
route_url = _construct_route_url('/api/part/json/')
print('\tSending...')
data_dict = {
'annot_uuid_list': ibs.get_part_annot_uuids(part_rowid_list),
'part_uuid_list': ibs.get_part_uuids(part_rowid_list),
'part_bbox_list': ibs.get_part_bboxes(part_rowid_list),
}
for key in data_dict:
data_dict[key] = ut.to_json(data_dict[key])
response = requests.post(route_url, data=data_dict)
_verify_response(response)
print('\t...sent')
@register_ibs_method
def _detect_remote_push_part_metadata(ibs, part_uuid_list):
part_rowid_list = ibs.get_part_rowids_from_uuid(part_uuid_list)
ibs._detect_remote_push_metadata('/api/part/bbox/json/',
'part_uuid_list',
'bbox_list',
part_uuid_list,
ibs.get_part_bboxes(part_rowid_list))
ibs._detect_remote_push_metadata('/api/part/theta/json/',
'part_uuid_list',
'theta_list',
part_uuid_list,
ibs.get_part_thetas(part_rowid_list))
ibs._detect_remote_push_metadata('/api/part/viewpoint/json/',
'part_uuid_list',
'viewpoint_list',
part_uuid_list,
ibs.get_part_viewpoints(part_rowid_list))
ibs._detect_remote_push_metadata('/api/part/quality/text/json/',
'part_uuid_list',
'quality_text_list',
part_uuid_list,
ibs.get_part_quality_texts(part_rowid_list)) # NOQA
ibs._detect_remote_push_metadata('/api/part/type/json/',
'part_uuid_list',
'type_text_list',
part_uuid_list,
ibs.get_part_types(part_rowid_list))
ibs._detect_remote_push_metadata('/api/part/tags/json/',
'part_uuid_list',
'part_tags_list',
part_uuid_list,
ibs.get_part_tag_text(part_rowid_list))
@register_ibs_method
@register_api('/api/sync/', methods=['GET'])
def detect_remote_sync_images(ibs, gid_list=None,
only_sync_missing_images=True):
_assert_remote_online(ibs)
if gid_list is None:
gid_list = ibs.get_valid_gids()
confirm_list = [
ut.random_nonce()[:5]
for _ in range(3)
]
confirm_str = '-'.join(confirm_list)
print('You are about to submit %d images to a remote DETECT database at %r with UUID=%r.' % (len(gid_list), REMOTE_URL, REMOTE_UUID, ))
print('Only do this action if you are confident in the detection accuracy of the images, annotations, annotation metadata, parts and part metadata.')
print('In order to continue, please type exactly the confirmation string %r' % (confirm_str, ))
if six.PY2:
input_func = raw_input
else:
input_func = input
response_str = input_func('Confirmation string [Empty to abort]: ')
response_str = response_str.lower()
assert confirm_str == response_str, 'Confirmation string mismatch, aborting...'
############################################################################
# Sync images
image_uuid_list = ibs.get_image_uuids(gid_list)
image_uuid_list_ = _get('/api/image/json/')
missing_gid_list = [
gid
for gid, image_uuid in list(zip(gid_list, image_uuid_list))
if image_uuid not in image_uuid_list_
]
num_missing = len(missing_gid_list)
if num_missing > 0:
print('Need to push %d images...' % (num_missing, ))
ibs._detect_remote_push_images(missing_gid_list)
print('...pushed')
# Filter only missing
gid_list_ = missing_gid_list if only_sync_missing_images else gid_list
image_uuid_list_ = ibs.get_image_uuids(gid_list_)
############################################################################
# Sync imageset
print('Setting imageset...')
ibs._detect_remote_push_imageset(image_uuid_list_)
print('...set')
############################################################################
# Sync annots
aid_list = ut.flatten(ibs.get_image_aids(gid_list_))
annot_uuid_list = ibs.get_annot_uuids(aid_list)
annot_uuid_list_ = _get('/api/annot/json/')
missing_aid_list = [
aid
for aid, annot_uuid in list(zip(aid_list, annot_uuid_list))
if annot_uuid not in annot_uuid_list_
]
num_missing = len(missing_aid_list)
if num_missing > 0:
print('Need to push %d annots...' % (num_missing, ))
ibs._detect_remote_push_annots(missing_aid_list)
print('...pushed')
############################################################################
# Sync annotation metadata
print('Synching annotation metadata...')
if len(annot_uuid_list) > 0:
ibs._detect_remote_push_annot_metadata(annot_uuid_list)
print('...synched')
############################################################################
# Sync parts
part_rowid_list = ut.flatten(ibs.get_annot_part_rowids(aid_list))
part_uuid_list = ibs.get_part_uuids(part_rowid_list)
part_uuid_list_ = _get('/api/part/json/')
missing_part_rowid_list = [
part_rowid
for part_rowid, part_uuid in list(zip(part_rowid_list, part_uuid_list))
if part_uuid not in part_uuid_list_
]
num_missing = len(missing_part_rowid_list)
if num_missing > 0:
print('Need to push %d parts...' % (num_missing, ))
ibs._detect_remote_push_parts(missing_part_rowid_list)
print('...pushed')
############################################################################
# Sync part metadata
print('Synching part metadata...')
if len(part_uuid_list) > 0:
ibs._detect_remote_push_part_metadata(part_uuid_list)
print('...synched')
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
import json
from django.http.response import HttpResponseServerError
from corehq.apps.commtrack.exceptions import DuplicateProductCodeException
from corehq.util.files import file_extention_from_filename
from couchexport.writers import Excel2007ExportWriter
from couchexport.models import Format
from couchdbkit import ResourceNotFound
from corehq.apps.commtrack.util import get_or_create_default_program
from django.views.decorators.http import require_POST
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.utils.translation import ugettext as _, ugettext_noop
from django.contrib import messages
from soil.exceptions import TaskFailedError
from soil.util import expose_cached_download, get_download_context
from StringIO import StringIO
from dimagi.utils.web import json_response
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.products.tasks import import_products_async
from corehq.apps.products.models import Product, SQLProduct
from corehq.apps.products.forms import ProductForm
from corehq.apps.commtrack.views import BaseCommTrackManageView
from corehq.apps.commtrack.util import encode_if_needed
from corehq.apps.programs.models import Program
from corehq.apps.custom_data_fields import (CustomDataFieldsDefinition,
CustomDataEditor,
CustomDataModelMixin)
from corehq.apps.hqwebapp.utils import get_bulk_upload_form
from corehq.apps.domain.decorators import (
domain_admin_required,
login_and_domain_required,
)
@require_POST
@domain_admin_required
def archive_product(request, domain, prod_id, archive=True):
"""
Archive product
"""
product = Product.get(prod_id)
product.archive()
return json_response({
'success': True,
'message': _("Product '{product_name}' has successfully been {action}.").format(
product_name=product.name,
action="archived",
)
})
@require_POST
@domain_admin_required
def unarchive_product(request, domain, prod_id, archive=True):
"""
Unarchive product
"""
product = Product.get(prod_id)
try:
product.unarchive()
except DuplicateProductCodeException:
success = False
message = _("Another product is already using the Product ID '{product_id}'").format(
product_id=product.code
)
else:
success = True
message = _("Product '{product_name}' has successfully been {action}.").format(
product_name=product.name,
action="unarchived",
)
return json_response({
'success': success,
'message': message,
'product_id': prod_id
})
class ProductListView(BaseCommTrackManageView):
# todo mobile workers shares this type of view too---maybe there should be a class for this?
urlname = 'commtrack_product_list'
template_name = 'products/manage/products.html'
page_title = ugettext_noop("Products")
DEFAULT_LIMIT = 10
@property
def page(self):
return int(self.request.GET.get('page', 1))
@property
def limit(self):
return int(self.request.GET.get('limit', self.DEFAULT_LIMIT))
@property
def show_only_inactive(self):
return bool(json.loads(self.request.GET.get('show_inactive', 'false')))
@property
def product_queryset(self):
return (SQLProduct.objects
.filter(domain=self.domain,
is_archived=self.show_only_inactive)
.order_by('name'))
@property
@memoized
def total(self):
return self.product_queryset.count()
@property
def page_context(self):
return {
'data_list': {
'page': self.page,
'limit': self.limit,
'total': self.total
},
'archive_help_text': _(
"Archive a product to stop showing data for it in \
reports and on mobile applications. Archiving is \
completely reversible, so you can always reactivate \
it later."
),
'show_inactive': self.show_only_inactive,
'pagination_limit_options': range(self.DEFAULT_LIMIT, 51, self.DEFAULT_LIMIT)
}
class FetchProductListView(ProductListView):
urlname = 'commtrack_product_fetch'
@property
def product_data(self):
start = (self.page - 1) * self.limit
end = start + self.limit
return map(self.make_product_dict, self.product_queryset[start:end])
def make_product_dict(self, product):
archive_config = self.get_archive_config()
return {
'name': product.name,
'product_id': product.product_id,
'code': product.code,
'unit': product.units,
'description': product.description,
'program': self.program_name(product),
'edit_url': reverse(
'commtrack_product_edit',
kwargs={'domain': self.domain, 'prod_id': product.product_id}
),
'archive_action_desc': archive_config['archive_text'],
'archive_action_text': archive_config['archive_action'],
'archive_url': reverse(
archive_config['archive_url'],
kwargs={'domain': self.domain, 'prod_id': product.product_id}
),
}
@property
@memoized
def programs_by_id(self):
return {p._id: p.name for p in Program.by_domain(self.domain)}
def program_name(self, product):
if product.program_id:
return self.programs_by_id[product.program_id]
else:
program = get_or_create_default_program(self.domain)
product.program_id = program.get_id
product.save()
return program.name
def get_archive_config(self):
if self.show_only_inactive:
return {
'archive_action': _("Un-Archive"),
'archive_url': 'unarchive_product',
'archive_text': _(
"This will re-activate the product, and the product will "
"show up in reports again."
),
}
else:
return {
'archive_action': _("Archive"),
'archive_url': 'archive_product',
'archive_text': _(
"As a result of archiving, this product will no longer "
"appear in reports. This action is reversable; you can "
"reactivate this product by viewing Show Archived "
"Products and clicking 'Unarchive'."
),
}
def get(self, request, *args, **kwargs):
return HttpResponse(json.dumps({
'success': True,
'current_page': int(self.page),
'data_list': self.product_data,
}), 'text/json')
class NewProductView(BaseCommTrackManageView):
urlname = 'commtrack_product_new'
page_title = ugettext_noop("New Product")
template_name = 'products/manage/product.html'
@property
@memoized
def product(self):
return Product(domain=self.domain)
@property
def parent_pages(self):
return [{
'title': ProductListView.page_title,
'url': reverse(ProductListView.urlname, args=[self.domain]),
}]
@property
@memoized
def new_product_form(self):
if self.request.method == 'POST':
return ProductForm(self.product, self.request.POST)
return ProductForm(self.product)
@property
def page_context(self):
return {
'product': self.product,
'form': self.new_product_form,
'data_fields_form': self.custom_data.form,
}
@property
@memoized
def custom_data(self):
return CustomDataEditor(
field_view=ProductFieldsView,
domain=self.domain,
required_only=True,
post_dict=self.request.POST if self.request.method == "POST" else None,
)
def post(self, request, *args, **kwargs):
if all([self.new_product_form.is_valid(),
self.custom_data.is_valid()]):
self.product.product_data = self.custom_data.get_data_to_save()
self.new_product_form.save(self.product)
messages.success(request, _("Product saved!"))
return HttpResponseRedirect(reverse(ProductListView.urlname, args=[self.domain]))
return self.get(request, *args, **kwargs)
class UploadProductView(BaseCommTrackManageView):
urlname = 'commtrack_upload_products'
page_title = ugettext_noop("Import Products")
template_name = 'products/manage/upload_products.html'
@property
def page_context(self):
context = {
'bulk_upload': {
"download_url": reverse("product_export", args=(self.domain,)),
"adjective": _("product"),
"plural_noun": _("products"),
},
}
context.update({
'bulk_upload_form': get_bulk_upload_form(context),
})
return context
@property
def parent_pages(self):
return [{
'title': ProductListView.page_title,
'url': reverse(ProductListView.urlname, args=[self.domain]),
}]
def post(self, request, *args, **kwargs):
upload = request.FILES.get('bulk_upload_file')
if not upload:
messages.error(request, _('no file uploaded'))
return self.get(request, *args, **kwargs)
elif not upload.name.endswith('.xlsx'):
messages.error(request, _('please use xlsx format only'))
return self.get(request, *args, **kwargs)
domain = args[0]
# stash this in soil to make it easier to pass to celery
file_ref = expose_cached_download(
upload.read(),
expiry=1*60*60,
file_extension=file_extention_from_filename(upload.name)
)
task = import_products_async.delay(
domain,
file_ref.download_id,
)
file_ref.set_task(task)
return HttpResponseRedirect(
reverse(
ProductImportStatusView.urlname,
args=[domain, file_ref.download_id]
)
)
class ProductImportStatusView(BaseCommTrackManageView):
urlname = 'product_import_status'
page_title = ugettext_noop('Product Import Status')
def get(self, request, *args, **kwargs):
context = super(ProductImportStatusView, self).main_context
context.update({
'domain': self.domain,
'download_id': kwargs['download_id'],
'poll_url': reverse('product_importer_job_poll', args=[self.domain, kwargs['download_id']]),
'title': _("Product Import Status"),
'progress_text': _("Importing your data. This may take some time..."),
'error_text': _("Problem importing data! Please try again or report an issue."),
})
return render(request, 'style/soil_status_full.html', context)
def page_url(self):
return reverse(self.urlname, args=self.args, kwargs=self.kwargs)
@login_and_domain_required
def product_importer_job_poll(request, domain, download_id,
template="products/manage/partials/product_upload_status.html"):
try:
context = get_download_context(download_id, check_state=True)
except TaskFailedError:
return HttpResponseServerError()
context.update({
'on_complete_short': _('Import complete.'),
'on_complete_long': _('Product importing has finished'),
})
return render(request, template, context)
def download_products(request, domain):
def _parse_custom_properties(product):
product_data_model = CustomDataFieldsDefinition.get_or_create(
domain,
ProductFieldsView.field_type
)
product_data_fields = [f.slug for f in product_data_model.fields]
model_data = {}
uncategorized_data = {}
for prop, val in product.product_data.iteritems():
if prop in product_data_fields:
model_data['data: ' + prop] = encode_if_needed(val)
else:
uncategorized_data['uncategorized_data: ' + prop] = encode_if_needed(val)
return model_data, uncategorized_data
def _get_products(domain):
for p_doc in iter_docs(Product.get_db(), Product.ids_by_domain(domain)):
# filter out archived products from export
if not ('is_archived' in p_doc and p_doc['is_archived']):
yield Product.wrap(p_doc)
def _build_row(keys, product):
row = []
for key in keys:
row.append(product.get(key, '') or '')
return row
file = StringIO()
writer = Excel2007ExportWriter()
product_keys = [
'id',
'name',
'unit',
'product_id',
'description',
'category',
'program_id',
'cost',
]
model_data = set()
uncategorized_data = set()
products = []
for product in _get_products(domain):
product_dict = product.to_dict()
product_model, product_uncategorized = _parse_custom_properties(product)
model_data.update(product_model.keys())
uncategorized_data.update(product_uncategorized.keys())
product_dict.update(product_model)
product_dict.update(product_uncategorized)
products.append(product_dict)
keys = product_keys + list(model_data) + list(uncategorized_data)
writer.open(
header_table=[
('products', [keys])
],
file=file,
)
for product in products:
writer.write([('products', [_build_row(keys, product)])])
writer.close()
response = HttpResponse(content_type=Format.from_format('xlsx').mimetype)
response['Content-Disposition'] = 'attachment; filename="products.xlsx"'
response.write(file.getvalue())
return response
class EditProductView(NewProductView):
urlname = 'commtrack_product_edit'
page_title = ugettext_noop("Edit Product")
@property
def product_id(self):
try:
return self.kwargs['prod_id']
except KeyError:
raise Http404()
@property
@memoized
def product(self):
try:
return Product.get(self.product_id)
except ResourceNotFound:
raise Http404()
@property
def page_name(self):
return _("Edit %s") % self.product.name
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.product_id])
@property
@memoized
def custom_data(self):
return CustomDataEditor(
field_view=ProductFieldsView,
domain=self.domain,
existing_custom_data=self.product.product_data,
post_dict=self.request.POST if self.request.method == "POST" else None,
)
class ProductFieldsView(CustomDataModelMixin, BaseCommTrackManageView):
urlname = 'product_fields_view'
field_type = 'ProductFields'
entity_string = _("Product")
template_name = "custom_data_fields/custom_data_fields.html"
|
|
"""
Utilities for the manager cli's db operations
"""
import copy
import importlib
import json
import time
import anchore_engine.db
from anchore_engine.db.entities.common import normalize_db_params
from anchore_engine.subsys import logger
ENGINE_UPGRADE_MODULE_NAME = "anchore_engine.db.entities.upgrade"
_db_context = {"params": {}, "retries": 3}
def init_db_context(db_connect, db_use_ssl, db_timeout, db_connect_timeout, db_retries):
"""
Initialize the db context
:param config:
:param db_connect:
:param db_use_ssl:
:param db_timeout:
:param db_connect_timeout:
:param db_retries:
:return:
"""
global _db_context
# do some DB connection/pre-checks here
_db_context["params"].update(
make_db_params(
db_connect=db_connect,
db_use_ssl=db_use_ssl,
db_timeout=db_timeout,
db_connect_timeout=db_connect_timeout,
)
)
_db_context["retries"] = db_retries
return _db_context
def db_context():
return _db_context
def make_db_params(
db_connect=None,
db_use_ssl=False,
db_timeout=30,
db_connect_timeout=120,
db_pool_size=30,
db_pool_max_overflow=100,
):
db_connect_args = {
"timeout": db_timeout,
"ssl": db_use_ssl,
}
db_params = {
"db_connect": db_connect,
"db_connect_args": db_connect_args,
"db_pool_size": db_pool_size,
"db_pool_max_overflow": db_pool_max_overflow,
}
return normalize_db_params(db_params)
def connect_database(db_params, db_retries=1):
# db_connect can have secrets - remove them before logging
loggable_db_params = copy.deepcopy(db_params)
del loggable_db_params["db_connect"]
logger.info("DB params: %s", json.dumps(loggable_db_params))
rc = anchore_engine.db.entities.common.do_connect(db_params)
logger.info("DB connection configured: %s", str(rc))
db_connected = False
last_db_connect_err = ""
for i in range(0, int(db_retries)):
logger.info("DB attempting to connect...")
try:
rc = anchore_engine.db.entities.common.test_connection()
logger.info("DB connected: %s", str(rc))
db_connected = True
break
except Exception as err:
last_db_connect_err = str(err)
if db_retries > 1:
logger.warn(
"DB connection failed, retrying - exception: %s",
str(last_db_connect_err),
)
time.sleep(5)
if not db_connected:
raise Exception(
"DB connection failed - exception: %s" + str(last_db_connect_err)
)
def init_database(
upgrade_module=None, localconfig=None, do_db_compatibility_check=False
):
code_versions = db_versions = None
if upgrade_module:
if do_db_compatibility_check and "do_db_compatibility_check" in dir(
upgrade_module
):
logger.info("DB compatibility check: running...")
upgrade_module.do_db_compatibility_check()
logger.info("DB compatibility check success")
else:
logger.info("DB compatibility check: skipping...")
code_versions, db_versions = upgrade_module.get_versions()
if code_versions and not db_versions:
logger.info("DB not initialized: initializing tables...")
upgrade_module.do_create_tables()
upgrade_module.do_db_bootstrap(
localconfig=localconfig,
db_versions=db_versions,
code_versions=code_versions,
)
# upgrade_module.do_version_update(db_versions, code_versions)
code_versions, db_versions = upgrade_module.get_versions()
if localconfig and "do_db_post_actions" in dir(upgrade_module):
logger.info("DB post actions: running...")
upgrade_module.do_db_post_actions(localconfig=localconfig)
return code_versions, db_versions
def db_preflight(db_params: dict = None, db_retries=3):
"""
Check the configuration and verify the db is running
:param config:
:param db_connect:
:param db_use_ssl:
:param db_timeout:
:param db_connect_timeout:
:param db_retries:
:return:
"""
# do some DB connection/pre-checks here
connected_db_params = connect_database(db_params, db_retries=db_retries)
return connected_db_params
def needs_upgrade(code_versions, db_versions):
"""
Check if an upgrade is needed
:param code_versions:
:param db_versions:
:return: None if no db upgrade needed, or tuple of (code db version (str), running db version (str)) if an upgrade is needed
"""
code_db_version = code_versions.get("db_version", None)
running_db_version = db_versions.get("db_version", None)
if not code_db_version or not running_db_version:
raise Exception(
"cannot get version information (code_db_version=%s running_db_version=%s)",
code_db_version,
running_db_version,
)
if code_db_version == running_db_version:
return None
else:
return code_db_version, running_db_version
def load_upgrade_module(module_name: str):
"""
Load the named module, verifying it, and return it loaded
:param module_name:
:return:
"""
try:
logger.info("Loading DB upgrade routines from module %s", module_name)
return importlib.import_module(module_name)
except Exception as err:
raise Exception(
"Input module ("
+ str(module_name)
+ ") cannot be found/imported - exception: "
+ str(err)
)
def upgrade_db(code_versions: dict, db_versions: dict, upgrade_module):
"""
Run the upgrade process for the given module. Raises exception on errors, caller must handle end exit cleanly.
Expects that the db has been initialized already via call to init_database() or similar
:param code_versions: dict with versions for the code found installed
:param db_versions: dict with versions for the versions found stored in the db (typically returned from init_database() call
:param upgrade_module:
:return: running db_version after upgrade
"""
# Load the module for upgrade (provides the upgrade routines etc
module = upgrade_module
versions_tuple = needs_upgrade(code_versions, db_versions)
if versions_tuple:
code_db_version = versions_tuple[0]
running_db_version = versions_tuple[1]
logger.info(
"Detected anchore-engine version %s, running DB version %s.",
code_db_version,
running_db_version,
)
logger.info("Performing upgrade.")
# perform the upgrade logic here
rc = module.run_upgrade()
if rc:
logger.info("Upgrade completed")
else:
logger.info("No upgrade necessary. Completed.")
else:
logger.info("Code and DB versions are in sync. No upgrade required")
return True
def do_upgrade(skip_db_compat_check: False, no_auto_upgrade=False):
"""
:param skip_db_compat_check: boole to indicate if a preflight check for the db engine type and version (e.g. postgres v9.6+) should be skipped
:return:
"""
upgrade_module = load_upgrade_module(ENGINE_UPGRADE_MODULE_NAME)
code_versions, db_versions = init_database(
upgrade_module=upgrade_module,
do_db_compatibility_check=(not skip_db_compat_check),
)
if not no_auto_upgrade:
upgrade_db(code_versions, db_versions, upgrade_module)
return True
|
|
# Test case for the os.poll() function
import os
import random
import select
import _testcapi
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.support import TESTFN, run_unittest, reap_threads
try:
select.poll
except AttributeError:
raise unittest.SkipTest("select.poll not defined -- skipping test_poll")
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError("no pipes ready for reading")
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
pollster = select.poll()
# Issue 15989
self.assertRaises(OverflowError, pollster.register, 0,
_testcapi.SHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.register, 0,
_testcapi.USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
|
|
import sys
import pytest
import pprint
import pylibefp
from qcelemental.testing import compare, compare_recursive, compare_values
from systems import *
def blank_ene():
fields = [
'charge_penetration', 'disp', 'dispersion', 'elec', 'electrostatic', 'electrostatic_point_charges',
'exchange_repulsion', 'pol', 'polarization', 'xr'
]
ene = {f: 0.0 for f in fields}
return ene
def test_elec_1a():
asdf = system_1()
asdf.set_opts({'elec': True, 'elec_damp': 'screen'})
asdf.compute()
ene = asdf.get_energy()
expected_ene = blank_ene()
expected_ene['elec'] = expected_ene['electrostatic'] = expected_ene['total'] = 0.0002900482
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_elec_1b():
asdf = system_1()
asdf.set_opts({'elec': True, 'elec_damp': 'overlap'})
asdf.compute()
ene = asdf.get_energy()
elst = 0.0002910961
cp = -8.066354689359154e-07
expected_ene = blank_ene()
expected_ene['elec'] = expected_ene['total'] = elst
expected_ene['charge_penetration'] = cp
expected_ene['electrostatic'] = elst - cp
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_pol_1a():
asdf = system_1()
opts = {'elec': True, 'pol': True, 'elec_damp': 'screen'}
asdf.set_opts(opts)
asdf.compute()
ene = asdf.get_energy()
elec = 0.0002900482
pol = 0.0002777238 - elec
expected_ene = blank_ene()
expected_ene['elec'] = expected_ene['electrostatic'] = elec
expected_ene['pol'] = expected_ene['polarization'] = pol
expected_ene['total'] = elec + pol
pprint.pprint(opts)
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_pol_1b():
asdf = system_1()
asdf.set_opts({'pol': True, 'elec_damp': 'screen', 'elec': True, 'pol_driver': 'direct'})
asdf.compute()
ene = asdf.get_energy()
elec = 0.0002900478
pol = 0.0002777238 - elec
expected_ene = blank_ene()
expected_ene['elec'] = expected_ene['electrostatic'] = elec
expected_ene['pol'] = expected_ene['polarization'] = pol
expected_ene['total'] = elec + pol
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_disp_1a():
asdf = system_1()
asdf.set_opts({'disp': True, 'disp_damp': 'tt'})
asdf.compute()
ene = asdf.get_energy()
expected_ene = blank_ene()
expected_ene['disp'] = expected_ene['dispersion'] = expected_ene['total'] = -0.0000989033
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_disp_1b():
asdf = system_1()
asdf.set_opts({'disp': True, 'disp_damp': 'overlap'})
asdf.compute()
ene = asdf.get_energy()
expected_ene = blank_ene()
expected_ene['disp'] = expected_ene['dispersion'] = expected_ene['total'] = -0.0001007275
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_xr_1():
asdf = system_1()
asdf.set_opts({'xr': True})
asdf.compute()
ene = asdf.get_energy()
expected_ene = blank_ene()
expected_ene['xr'] = expected_ene['exchange_repulsion'] = expected_ene['total'] = 0.0000134716
assert compare_recursive(expected_ene, ene, atol=1.e-6)
def test_total_1a():
asdf = system_1()
asdf.set_opts({
'elec': True,
'elec_damp': 'screen',
'xr': True,
'pol': True, # 'pol_damp': 'tt',
'disp': True,
'disp_damp': 'tt'
})
asdf.compute()
ene = asdf.get_energy()
pprint.pprint(ene)
print('<<< get_opts(): ', asdf.get_opts(), '>>>')
#print('<<< summary(): ', asdf.summary(), '>>>')
print('<<< get_energy():', ene, '>>>')
print('<<< get_atoms(): ', asdf.get_atoms(), '>>>')
print(asdf.energy_summary())
print(asdf.geometry_summary(units_to_bohr=b2a))
print(asdf.geometry_summary(units_to_bohr=1.0))
expected_ene = blank_ene()
expected_ene['elec'] = expected_ene['electrostatic'] = 0.0002900482
expected_ene['xr'] = expected_ene['exchange_repulsion'] = 0.0000134716
expected_ene['pol'] = expected_ene['polarization'] = 0.0002777238 - expected_ene['electrostatic']
expected_ene['disp'] = expected_ene['dispersion'] = -0.0000989033
expected_ene['total'] = 0.0001922903
assert compare(2, asdf.get_frag_count(), sys._getframe().f_code.co_name + ': nfrag')
assert compare_values(0.0, asdf.get_frag_charge(1), sys._getframe().f_code.co_name + ': f_chg', atol=1.e-6)
assert compare(1, asdf.get_frag_multiplicity(1), sys._getframe().f_code.co_name + ': f_mult')
assert compare('NH3', asdf.get_frag_name(1), sys._getframe().f_code.co_name + ': f_name')
assert compare_recursive(expected_ene, ene, sys._getframe().f_code.co_name + ': ene', atol=1.e-6)
def test_elec_2a():
asdf = system_2()
asdf.set_opts({'elec': True, 'elec_damp': 'screen'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(0.0015865516, ene['elec'], atol=1.e-6)
def test_elec_2b():
asdf = system_2()
asdf.set_opts({'elec': True, 'elec_damp': 'overlap'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(0.0017049246, ene['elec'], atol=1.e-6)
def test_pol_2a():
asdf = system_2()
asdf.set_opts({'elec': True, 'pol': True, 'elec_damp': 'screen'})
asdf.compute()
ene = asdf.get_energy()
pprint.pprint(ene)
assert compare_values(0.0013685212, ene['total'], atol=1.e-6)
def test_pol_2b():
asdf = system_2()
asdf.set_opts({'elec': True, 'pol': True, 'elec_damp': 'screen', 'pol_driver': 'direct'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(0.0013685212, ene['total'], atol=1.e-6)
def test_disp_2a():
asdf = system_2()
asdf.set_opts({'disp': True, 'disp_damp': 'tt'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0014688094, ene['disp'], atol=1.e-6)
def test_disp_2b():
asdf = system_2()
asdf.set_opts({'disp': True, 'disp_damp': 'overlap'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0015801770, ene['disp'], atol=1.e-6)
def test_xr_2():
asdf = system_2()
asdf.set_opts({'xr': True})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(0.0008443933, ene['xr'], atol=1.e-6)
def test_total_2a():
asdf = system_2()
asdf.set_opts({'elec': True, 'pol': True, 'disp': True, 'xr': True, 'elec_damp': 'screen', 'disp_damp': 'tt'})
asdf.compute()
ene = asdf.get_energy()
assert compare(5, asdf.get_frag_count(), sys._getframe().f_code.co_name + ': nfrag')
assert compare_values(0.0007440865, ene['total'], atol=1.e-6)
def test_elec_3a():
asdf = system_3()
asdf.set_opts({'elec': True, 'elec_damp': 'screen'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0039531505, ene['elec'], atol=1.e-6)
def test_elec_3b():
asdf = system_3()
asdf.set_opts({'elec': True, 'elec_damp': 'overlap'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(0.0023592829, ene['elec'], atol=1.e-6)
def test_pol_3a():
asdf = system_3()
asdf.set_opts({'elec': True, 'pol': True, 'elec_damp': 'screen', 'pol_damp': 'off'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0066095992, ene['total'], atol=1.e-6)
def test_pol_3b():
asdf = system_3()
asdf.set_opts({'elec': True, 'pol': True, 'elec_damp': 'screen', 'pol_damp': 'off', 'pol_driver': 'direct'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0066095992, ene['total'], atol=1.e-6)
def test_disp_3a():
asdf = system_3()
asdf.set_opts({'disp': True, 'disp_damp': 'tt'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0173897265, ene['disp'], atol=1.e-6)
def test_disp_3b():
asdf = system_3()
asdf.set_opts({'disp': True, 'disp_damp': 'overlap'})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0220107872, ene['disp'], atol=1.e-6)
def test_xr_3():
asdf = system_3()
asdf.set_opts({'xr': True})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(0.0301402098, ene['xr'], atol=1.e-5)
def test_total_3a():
asdf = system_3()
asdf.set_opts({
'elec': True,
'pol': True,
'disp': True,
'xr': True,
'elec_damp': 'screen',
'disp_damp': 'tt',
'pol_damp': 'tt'
})
asdf.compute()
ene = asdf.get_energy()
assert compare(9, asdf.get_frag_count(), sys._getframe().f_code.co_name + ': nfrag')
assert compare_values(0.0061408841, ene['total'], sys._getframe().f_code.co_name, atol=1.e-5)
def test_total_4a():
asdf = system_4()
asdf.set_opts({
'elec': True,
'pol': True,
'disp': True,
'xr': True,
'elec_damp': 'screen',
'disp_damp': 'tt',
'pol_damp': 'tt'
})
asdf.compute()
ene = asdf.get_energy()
nfrags = ['ACETONE', 'C2H5OH', 'C6H6', 'CCL4', 'CH3OH', 'CH4', 'CL2', 'DCM', 'DMSO', 'H2', 'H2O', 'NH3']
mfrags = [1 for fr in range(12)]
cfrags = [0.0 for fr in range(12)]
tnm = sys._getframe().f_code.co_name
assert compare(12, asdf.get_frag_count(), sys._getframe().f_code.co_name + ': nfrag')
assert compare_recursive({'dummy': cfrags}, {'dummy': asdf.get_frag_charge()}, tnm + ': f_chg', atol=1.e-2)
assert compare_recursive({'dummy': mfrags}, {'dummy': asdf.get_frag_multiplicity()}, tnm + ': f_mult', atol=1.e-2)
assert compare_recursive({'dummy': nfrags}, {'dummy': asdf.get_frag_name()}, tnm + ': f_names', atol=1.e-2)
assert compare_values(-0.0095597483, ene['total'], sys._getframe().f_code.co_name, atol=1.e-5)
def test_total_4b():
asdf = system_4()
asdf.set_opts({
'elec': True,
'pol': True,
'disp': True,
'xr': True,
'elec_damp': 'overlap',
'disp_damp': 'overlap',
'pol_damp': 'tt'
})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0092400662, ene['total'], atol=1.e-5)
def test_total_4c():
asdf = system_4()
asdf.set_opts({
'elec': True,
'pol': True,
'disp': True,
'xr': True,
'elec_damp': 'off',
'disp_damp': 'off',
'pol_damp': 'tt'
})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0091278725, ene['total'], atol=1.e-5)
def test_total_4d():
asdf = system_4()
asdf.set_opts({
'elec': True,
'pol': True,
'disp': True,
'xr': True,
'elec_damp': 'screen',
'disp_damp': 'tt',
'pol_damp': 'tt',
'pol_driver': 'direct'
})
asdf.compute()
ene = asdf.get_energy()
assert compare_values(-0.0095597483, ene['total'], atol=1.e-5)
if __name__ == '__main__':
test_total_4d()
|
|
# coding: utf-8
from __future__ import unicode_literals
from admitad.items.base import Item
__all__ = (
'Websites',
'WebsitesManage'
)
class Websites(Item):
"""
List of websites
"""
SCOPE = 'websites'
URL = Item.prepare_url('websites')
SINGLE_URL = Item.prepare_url('websites/%(website_id)s')
STATUS_NEW = 'new'
STATUS_PENDING = 'pending'
STATUS_ACTIVE = 'active'
STATUS_SUSPENDED = 'suspended'
STATUS_DECLINED = 'declined'
CAMPAIGN_STATUS_PENDING = 'pending'
CAMPAIGN_STATUS_ACTIVE = 'active'
CAMPAIGN_STATUS_DECLINED = 'declined'
CAMPAIGN_STATUS_DISABLED = 'disabled'
STATUS_LIST = [
STATUS_NEW, STATUS_PENDING, STATUS_ACTIVE,
STATUS_SUSPENDED, STATUS_DECLINED
]
CAMPAIGN_STATUS_LIST = [
CAMPAIGN_STATUS_PENDING, CAMPAIGN_STATUS_ACTIVE,
CAMPAIGN_STATUS_DECLINED, CAMPAIGN_STATUS_DISABLED
]
def get(self, **kwargs):
"""
Args:
status (str)
campaign_status (str)
limit (int)
offset (int)
"""
filtering = {
'filter_by': kwargs,
'available': {
'status': lambda x: x if x in self.STATUS_LIST else None,
'campaign_status': lambda x: x if x in self.CAMPAIGN_STATUS_LIST else None
}
}
return self.transport.get() \
.set_pagination(**kwargs) \
.set_filtering(filtering) \
.request(url=self.URL)
def getOne(self, _id, **kwargs):
"""
Args:
_id (int)
"""
requests_data = {
'url': self.SINGLE_URL,
'website_id': Item.sanitize_id(_id)
}
return self.transport.get().request(**requests_data)
class WebsitesManage(Item):
"""
Manage websites
"""
SCOPE = 'manage_websites'
CREATE_URL = Item.prepare_url('website/create')
UPDATE_URL = Item.prepare_url('website/update/%(website_id)s')
VERIFY_URL = Item.prepare_url('website/verify/%(website_id)s')
DELETE_URL = Item.prepare_url('website/delete/%(website_id)s')
CREATE_FIELDS = {
'name': lambda x: Item.sanitize_string_value(
x, 'name', max_length=200),
'kind': lambda x: Item.sanitize_string_value(
x, 'kind', max_length=20),
'language': lambda x: Item.sanitize_string_value(
x, 'language', max_length=2),
'adservice': lambda x: Item.sanitize_integer_value(
x, 'adservice', blank=True),
'site_url': lambda x: Item.sanitize_string_value(
x, 'site_url', max_length=255),
'description': lambda x: Item.sanitize_string_value(
x, 'description', max_length=20000, min_length=100),
'categories': lambda x: Item.sanitize_integer_array(
x, 'categories'),
'regions': lambda x: Item.sanitize_string_array(
x, 'regions', max_length=2),
'mailing_targeting': lambda x: Item.sanitize_bool_integer_value(
x, 'mailing_targeting', blank=True)
}
UPDATE_FIELDS = {
'name': lambda x: Item.sanitize_string_value(
x, 'name', max_length=200, blank=True),
'kind': lambda x: Item.sanitize_string_value(
x, 'kind', max_length=20, blank=True),
'language': lambda x: Item.sanitize_string_value(
x, 'language', max_length=2, blank=True),
'adservice': lambda x: Item.sanitize_integer_value(
x, 'adservice', blank=True),
'site_url': lambda x: Item.sanitize_string_value(
x, 'site_url', max_length=255, blank=True),
'description': lambda x: Item.sanitize_string_value(
x, 'description', max_length=20000, min_length=100, blank=True),
'categories': lambda x: Item.sanitize_integer_array(
x, 'categories', blank=True),
'regions': lambda x: Item.sanitize_string_array(
x, 'regions', max_length=2, blank=True),
'mailing_targeting': lambda x: Item.sanitize_bool_integer_value(
x, 'mailing_targeting', blank=True)
}
def create(self, **kwargs):
"""
Args:
name (str)
kind (str)
language (str)
adservice (int)
site_url (str)
description (str)
categories (list of int)
regions (list of str)
mailing_targeting (bool)
"""
data = Item.sanitize_fields(self.CREATE_FIELDS, **kwargs)
return self.transport.post().set_data(data).request(url=self.CREATE_URL)
def update(self, _id, **kwargs):
"""
Args:
_id (int)
name (str)
kind (str)
language (str)
adservice (int)
site_url (str)
description (str)
categories (list of int)
regions (list of str)
mailing_targeting (bool)
"""
data = Item.sanitize_fields(self.UPDATE_FIELDS, **kwargs)
request_data = {
'url': self.UPDATE_URL,
'website_id': Item.sanitize_id(_id)
}
return self.transport.post().set_data(data).request(**request_data)
def verify(self, _id):
"""
Args:
_id (int)
"""
request_data = {
'url': self.VERIFY_URL,
'website_id': Item.sanitize_id(_id)
}
return self.transport.post().request(**request_data)
def delete(self, _id):
"""
Args:
_id (int)
"""
request_data = {
'url': self.DELETE_URL,
'website_id': Item.sanitize_id(_id)
}
return self.transport.post().request(**request_data)
|
|
#!/usr/bin/env python
#
# xferfcn_input_test.py - test inputs to TransferFunction class
# jed-frey, 18 Feb 2017 (based on xferfcn_test.py)
import unittest
import numpy as np
from numpy import int, int8, int16, int32, int64
from numpy import float, float16, float32, float64, longdouble
from numpy import all, ndarray, array
from control.xferfcn import _clean_part
class TestXferFcnInput(unittest.TestCase):
"""These are tests for functionality of cleaning and validating XferFcnInput."""
# Tests for raising exceptions.
def test_clean_part_bad_input_type(self):
"""Give the part cleaner invalid input type."""
self.assertRaises(TypeError, _clean_part, [[0., 1.], [2., 3.]])
def test_clean_part_bad_input_type2(self):
"""Give the part cleaner another invalid input type."""
self.assertRaises(TypeError, _clean_part, [1, "a"])
def test_clean_part_scalar(self):
"""Test single scalar value."""
num = 1
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_scalar(self):
"""Test single scalar value in list."""
num = [1]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_tuple_scalar(self):
"""Test single scalar value in tuple."""
num = (1)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list(self):
"""Test multiple values in a list."""
num = [1, 2]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_tuple(self):
"""Test multiple values in tuple."""
num = (1, 2)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_all_scalar_types(self):
"""Test single scalar value for all valid data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = dtype(1)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_np_array(self):
"""Test multiple values in numpy array."""
num = np.array([1, 2])
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_all_np_array_types(self):
"""Test scalar value in numpy array of ndim=0 for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = np.array(1, dtype=dtype)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_all_np_array_types2(self):
"""Test numpy array for all types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = np.array([1, 2], dtype=dtype)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_list_all_types(self):
"""Test list of a single value for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = [dtype(1)]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_all_types2(self):
"""List of list of numbers of all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = [dtype(1), dtype(2)]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 2.0], dtype=float))
def test_clean_part_tuple_all_types(self):
"""Test tuple of a single value for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = (dtype(1),)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_tuple_all_types2(self):
"""Test tuple of a single value for all data types."""
for dtype in [int, int8, int16, int32, int64, float, float16, float32, float64, longdouble]:
num = (dtype(1), dtype(2))
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1, 2], dtype=float))
def test_clean_part_list_list_list_int(self):
""" Test an int in a list of a list of a list."""
num = [[[1]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_list_list_float(self):
""" Test a float in a list of a list of a list."""
num = [[[1.0]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0], dtype=float))
def test_clean_part_list_list_list_ints(self):
"""Test 2 lists of ints in a list in a list."""
num = [[[1, 1], [2, 2]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_list_list_floats(self):
"""Test 2 lists of ints in a list in a list."""
num = [[[1.0, 1.0], [2.0, 2.0]]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_list_array(self):
"""List of list of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [[array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)]]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_tuple_list_array(self):
"""Tuple of list of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = ([array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)],)
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_tuple_array(self):
"""List of tuple of numpy array for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [(array([1, 1], dtype=dtype), array([2, 2], dtype=dtype))]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_tuple_tuples_arrays(self):
"""Tuple of tuples of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = ((array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)),
(array([3, 4], dtype=dtype), array([4, 4], dtype=dtype)))
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_tuples_arrays(self):
"""List of tuples of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [(array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)),
(array([3, 4], dtype=dtype), array([4, 4], dtype=dtype))]
num_ = _clean_part(num)
assert isinstance(num_, list)
assert np.all([isinstance(part, list) for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
def test_clean_part_list_list_arrays(self):
"""List of list of numpy arrays for all valid types."""
for dtype in int, int8, int16, int32, int64, float, float16, float32, float64, longdouble:
num = [[array([1, 1], dtype=dtype), array([2, 2], dtype=dtype)],
[array([3, 3], dtype=dtype), array([4, 4], dtype=dtype)]]
num_ = _clean_part(num)
assert len(num_) == 2
assert np.all([isinstance(part, list) for part in num_])
assert np.all([len(part) == 2 for part in num_])
np.testing.assert_array_equal(num_[0][0], array([1.0, 1.0], dtype=float))
np.testing.assert_array_equal(num_[0][1], array([2.0, 2.0], dtype=float))
np.testing.assert_array_equal(num_[1][0], array([3.0, 3.0], dtype=float))
np.testing.assert_array_equal(num_[1][1], array([4.0, 4.0], dtype=float))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
#from tensorflow.models.image.cifar10 import cifar10_input
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
#NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
NUM_EPOCHS_PER_DECAY = 50.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.001 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.01 # Initial learning rate.
# If a model is trained with multiple GPU's prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 1, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# conv2
"""
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
"""
# pool2
pool2 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 64, 128],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv3)
"""
with tf.variable_scope('conv4') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 128, 128],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv4)
"""
pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool3')
"""
with tf.variable_scope('conv5') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 128, 256],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv5)
"""
with tf.variable_scope('conv6') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 128, 256],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv6 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv6)
"""
with tf.variable_scope('conv7') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 256, 256],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv6, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv7 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv7)
"""
pool4 = tf.nn.max_pool(conv6, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool4')
with tf.variable_scope('conv8') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 256, 512],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv8 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv8)
"""
with tf.variable_scope('conv9') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 512, 512],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv8, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv9 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv9)
with tf.variable_scope('conv10') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 512, 512],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv9, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv10 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv10)
"""
pool5 = tf.nn.max_pool(conv8, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool5')
"""
with tf.variable_scope('conv11') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 512, 512],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(pool5, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv11 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv11)
with tf.variable_scope('conv12') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 512, 512],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv11, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv12 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv12)
with tf.variable_scope('conv13') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 512, 512],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(conv12, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv13 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv13)
pool6 = tf.nn.max_pool(conv13, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool6')
"""
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = 1
for d in pool5.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool5, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 1024],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [1024], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
"""
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[4096, 4096],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [4096], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
"""
with tf.variable_scope('local5') as scope:
weights = _variable_with_weight_decay('weights', shape=[1024, NUM_CLASSES],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.1))
local5 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local5)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [NUM_CLASSES, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local5, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
|
"""Data template classes for discovery used to generate additional data for setup."""
from __future__ import annotations
from collections.abc import Iterable
from dataclasses import dataclass
from typing import Any
from zwave_js_server.const import CommandClass
from zwave_js_server.const.command_class.meter import (
CURRENT_METER_TYPES,
ENERGY_TOTAL_INCREASING_METER_TYPES,
POWER_FACTOR_METER_TYPES,
POWER_METER_TYPES,
VOLTAGE_METER_TYPES,
ElectricScale,
MeterScaleType,
)
from zwave_js_server.const.command_class.multilevel_sensor import (
CO2_SENSORS,
CO_SENSORS,
CURRENT_SENSORS,
ENERGY_MEASUREMENT_SENSORS,
HUMIDITY_SENSORS,
ILLUMINANCE_SENSORS,
POWER_SENSORS,
PRESSURE_SENSORS,
SIGNAL_STRENGTH_SENSORS,
TEMPERATURE_SENSORS,
TIMESTAMP_SENSORS,
VOLTAGE_SENSORS,
MultilevelSensorType,
)
from zwave_js_server.model.node import Node as ZwaveNode
from zwave_js_server.model.value import Value as ZwaveValue, get_value_id
from zwave_js_server.util.command_class import (
get_meter_scale_type,
get_multilevel_sensor_type,
)
from .const import (
ENTITY_DESC_KEY_BATTERY,
ENTITY_DESC_KEY_CO,
ENTITY_DESC_KEY_CO2,
ENTITY_DESC_KEY_CURRENT,
ENTITY_DESC_KEY_ENERGY_MEASUREMENT,
ENTITY_DESC_KEY_ENERGY_TOTAL_INCREASING,
ENTITY_DESC_KEY_HUMIDITY,
ENTITY_DESC_KEY_ILLUMINANCE,
ENTITY_DESC_KEY_MEASUREMENT,
ENTITY_DESC_KEY_POWER,
ENTITY_DESC_KEY_POWER_FACTOR,
ENTITY_DESC_KEY_PRESSURE,
ENTITY_DESC_KEY_SIGNAL_STRENGTH,
ENTITY_DESC_KEY_TARGET_TEMPERATURE,
ENTITY_DESC_KEY_TEMPERATURE,
ENTITY_DESC_KEY_TIMESTAMP,
ENTITY_DESC_KEY_TOTAL_INCREASING,
ENTITY_DESC_KEY_VOLTAGE,
)
METER_DEVICE_CLASS_MAP: dict[str, set[MeterScaleType]] = {
ENTITY_DESC_KEY_CURRENT: CURRENT_METER_TYPES,
ENTITY_DESC_KEY_VOLTAGE: VOLTAGE_METER_TYPES,
ENTITY_DESC_KEY_ENERGY_TOTAL_INCREASING: ENERGY_TOTAL_INCREASING_METER_TYPES,
ENTITY_DESC_KEY_POWER: POWER_METER_TYPES,
ENTITY_DESC_KEY_POWER_FACTOR: POWER_FACTOR_METER_TYPES,
}
MULTILEVEL_SENSOR_DEVICE_CLASS_MAP: dict[str, set[MultilevelSensorType]] = {
ENTITY_DESC_KEY_CO: CO_SENSORS,
ENTITY_DESC_KEY_CO2: CO2_SENSORS,
ENTITY_DESC_KEY_CURRENT: CURRENT_SENSORS,
ENTITY_DESC_KEY_ENERGY_MEASUREMENT: ENERGY_MEASUREMENT_SENSORS,
ENTITY_DESC_KEY_HUMIDITY: HUMIDITY_SENSORS,
ENTITY_DESC_KEY_ILLUMINANCE: ILLUMINANCE_SENSORS,
ENTITY_DESC_KEY_POWER: POWER_SENSORS,
ENTITY_DESC_KEY_PRESSURE: PRESSURE_SENSORS,
ENTITY_DESC_KEY_SIGNAL_STRENGTH: SIGNAL_STRENGTH_SENSORS,
ENTITY_DESC_KEY_TEMPERATURE: TEMPERATURE_SENSORS,
ENTITY_DESC_KEY_TIMESTAMP: TIMESTAMP_SENSORS,
ENTITY_DESC_KEY_VOLTAGE: VOLTAGE_SENSORS,
}
@dataclass
class ZwaveValueID:
"""Class to represent a value ID."""
property_: str | int
command_class: int
endpoint: int | None = None
property_key: str | int | None = None
class BaseDiscoverySchemaDataTemplate:
"""Base class for discovery schema data templates."""
def resolve_data(self, value: ZwaveValue) -> Any:
"""
Resolve helper class data for a discovered value.
Can optionally be implemented by subclasses if input data needs to be
transformed once discovered Value is available.
"""
# pylint: disable=no-self-use
return {}
def values_to_watch(self, resolved_data: Any) -> Iterable[ZwaveValue]:
"""
Return list of all ZwaveValues resolved by helper that should be watched.
Should be implemented by subclasses only if there are values to watch.
"""
# pylint: disable=no-self-use
return []
def value_ids_to_watch(self, resolved_data: Any) -> set[str]:
"""
Return list of all Value IDs resolved by helper that should be watched.
Not to be overwritten by subclasses.
"""
return {val.value_id for val in self.values_to_watch(resolved_data) if val}
@staticmethod
def _get_value_from_id(
node: ZwaveNode, value_id_obj: ZwaveValueID
) -> ZwaveValue | None:
"""Get a ZwaveValue from a node using a ZwaveValueDict."""
value_id = get_value_id(
node,
value_id_obj.command_class,
value_id_obj.property_,
endpoint=value_id_obj.endpoint,
property_key=value_id_obj.property_key,
)
return node.values.get(value_id)
@dataclass
class DynamicCurrentTempClimateDataTemplate(BaseDiscoverySchemaDataTemplate):
"""Data template class for Z-Wave JS Climate entities with dynamic current temps."""
lookup_table: dict[str | int, ZwaveValueID]
dependent_value: ZwaveValueID
def resolve_data(self, value: ZwaveValue) -> dict[str, Any]:
"""Resolve helper class data for a discovered value."""
data: dict[str, Any] = {
"lookup_table": {},
"dependent_value": self._get_value_from_id(
value.node, self.dependent_value
),
}
for key in self.lookup_table:
data["lookup_table"][key] = self._get_value_from_id(
value.node, self.lookup_table[key]
)
return data
def values_to_watch(self, resolved_data: dict[str, Any]) -> Iterable[ZwaveValue]:
"""Return list of all ZwaveValues resolved by helper that should be watched."""
return [
*resolved_data["lookup_table"].values(),
resolved_data["dependent_value"],
]
@staticmethod
def current_temperature_value(resolved_data: dict[str, Any]) -> ZwaveValue | None:
"""Get current temperature ZwaveValue from resolved data."""
lookup_table: dict[str | int, ZwaveValue | None] = resolved_data["lookup_table"]
dependent_value: ZwaveValue | None = resolved_data["dependent_value"]
if dependent_value and dependent_value.value is not None:
lookup_key = dependent_value.metadata.states[
str(dependent_value.value)
].split("-")[0]
return lookup_table.get(lookup_key)
return None
class NumericSensorDataTemplate(BaseDiscoverySchemaDataTemplate):
"""Data template class for Z-Wave Sensor entities."""
def resolve_data(self, value: ZwaveValue) -> str | None:
"""Resolve helper class data for a discovered value."""
if value.command_class == CommandClass.BATTERY:
return ENTITY_DESC_KEY_BATTERY
if value.command_class == CommandClass.METER:
scale_type = get_meter_scale_type(value)
# We do this because even though these are energy scales, they don't meet
# the unit requirements for the energy device class.
if scale_type in (
ElectricScale.PULSE_COUNT,
ElectricScale.KILOVOLT_AMPERE_HOUR,
ElectricScale.KILOVOLT_AMPERE_REACTIVE_HOUR,
):
return ENTITY_DESC_KEY_TOTAL_INCREASING
# We do this because even though these are power scales, they don't meet
# the unit requirements for the power device class.
if scale_type == ElectricScale.KILOVOLT_AMPERE_REACTIVE:
return ENTITY_DESC_KEY_MEASUREMENT
for key, scale_type_set in METER_DEVICE_CLASS_MAP.items():
if scale_type in scale_type_set:
return key
if value.command_class == CommandClass.SENSOR_MULTILEVEL:
sensor_type = get_multilevel_sensor_type(value)
if sensor_type == MultilevelSensorType.TARGET_TEMPERATURE:
return ENTITY_DESC_KEY_TARGET_TEMPERATURE
for (
key,
sensor_type_set,
) in MULTILEVEL_SENSOR_DEVICE_CLASS_MAP.items():
if sensor_type in sensor_type_set:
return key
return None
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# pylint: disable=g-import-not-at-top
# TODO(jart): #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.training.python.training import training
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testUseGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# After 10 updates global_step should be 10.
self.assertAllClose(global_step, 10)
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
class TrainBNClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = os.path.join(self.get_temp_dir(), 'tmp_bnlogs/')
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
self._logdir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
logdir = os.path.join(self.get_temp_dir(), 'can_achieve_zero_loss')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
logdir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
logdir = os.path.join(self.get_temp_dir(), 'train_with_local_variable')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
logdir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
assign_fn = variables_lib.assign_from_checkpoint_fn(model_path,
model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
logdir2,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)])
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
return loss_ops.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
])
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
])
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=400),
])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with session_lib.Session() as sess:
# Initialize the variables.
sess.run(variables_lib2.global_variables_initializer())
# Get the intial weights and biases values.
weights_values, biases_values = sess.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = sess.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = sess.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = sess.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs6/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs7/')
if gfile.Exists(logdir1):
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2):
gfile.DeleteRecursively(logdir2)
multipliers = [1., 1000.]
number_of_steps = 10
losses = []
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=50, saver=saver),
])
losses.append(loss)
self.assertGreater(loss, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir2,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
basic_session_run_hooks.CheckpointSaverHook(
logdir2, save_steps=50, saver=saver),
])
losses.append(loss)
self.assertIsNotNone(loss)
self.assertLess(loss, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(losses[0], losses[1])
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy backend for wxPython.
"""
from __future__ import division
from time import sleep
import gc
import warnings
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...util import keys, logger
from ...util.ptime import time
from ... import config
USE_EGL = config['gl_backend'].lower().startswith('es')
# -------------------------------------------------------------------- init ---
try:
# avoid silly locale warning on OSX
with warnings.catch_warnings(record=True):
import wx
from wx import glcanvas
from wx.glcanvas import GLCanvas
# Map native keys to vispy keys
KEYMAP = {
wx.WXK_SHIFT: keys.SHIFT,
wx.WXK_CONTROL: keys.CONTROL,
wx.WXK_ALT: keys.ALT,
wx.WXK_WINDOWS_MENU: keys.META,
wx.WXK_LEFT: keys.LEFT,
wx.WXK_UP: keys.UP,
wx.WXK_RIGHT: keys.RIGHT,
wx.WXK_DOWN: keys.DOWN,
wx.WXK_PAGEUP: keys.PAGEUP,
wx.WXK_PAGEDOWN: keys.PAGEDOWN,
wx.WXK_INSERT: keys.INSERT,
wx.WXK_DELETE: keys.DELETE,
wx.WXK_HOME: keys.HOME,
wx.WXK_END: keys.END,
wx.WXK_ESCAPE: keys.ESCAPE,
wx.WXK_BACK: keys.BACKSPACE,
wx.WXK_F1: keys.F1,
wx.WXK_F2: keys.F2,
wx.WXK_F3: keys.F3,
wx.WXK_F4: keys.F4,
wx.WXK_F5: keys.F5,
wx.WXK_F6: keys.F6,
wx.WXK_F7: keys.F7,
wx.WXK_F8: keys.F8,
wx.WXK_F9: keys.F9,
wx.WXK_F10: keys.F10,
wx.WXK_F11: keys.F11,
wx.WXK_F12: keys.F12,
wx.WXK_SPACE: keys.SPACE,
wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN
wx.WXK_NUMPAD_ENTER: keys.ENTER,
wx.WXK_TAB: keys.TAB,
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
class GLCanvas(object):
pass
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
else:
available, testable, why_not = True, True, None
which = 'wxPython ' + str(wx.__version__)
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=True,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs
# ------------------------------------------------------------- application ---
_wx_app = None
_timers = []
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._event_loop = wx.EventLoop()
wx.EventLoop.SetActive(self._event_loop)
def _vispy_get_backend_name(self):
return 'wx'
def _vispy_process_events(self):
# inpsired by https://github.com/wxWidgets/wxPython/blob/master/
# samples/mainloop/mainloop.py
for _ in range(3): # trial-and-error found this to work (!)
while self._event_loop.Pending():
self._event_loop.Dispatch()
_wx_app.ProcessIdle()
sleep(0.01)
def _vispy_run(self):
return _wx_app.MainLoop()
def _vispy_quit(self):
global _wx_app
_wx_app.ExitMainLoop()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
global _wx_app
_wx_app = wx.GetApp() # in case the user already has one
if _wx_app is None:
_wx_app = wx.PySimpleApp()
_wx_app.SetExitOnFrameDelete(True)
return _wx_app
# ------------------------------------------------------------------ canvas ---
def _get_mods(evt):
"""Helper to extract list of mods from event"""
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods
def _process_key(evt):
"""Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None
class DummySize(object):
def __init__(self, size):
self.size = size
def GetSize(self):
return self.size
def Skip(self):
pass
class CanvasBackend(GLCanvas, BaseCanvasBackend):
""" wxPython backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
# WX supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
# Set config
self._gl_attribs = _set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('wx', self)
if p.context.shared.ref is self:
self._gl_context = None # set for real once we init the GLCanvas
else:
self._gl_context = p.context.shared.ref._gl_context
if p.parent is None:
style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX |
wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN)
style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER
style |= wx.STAY_ON_TOP if p.always_on_top else 0
self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position,
p.size, style)
if not p.resizable:
self._frame.SetSizeHints(p.size[0], p.size[1],
p.size[0], p.size[1])
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for wx '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
_wx_app.SetTopWindow(self._frame)
parent = self._frame
self._frame.Raise()
self._frame.Bind(wx.EVT_CLOSE, self.on_close)
else:
parent = p.parent
self._frame = None
self._fullscreen = False
self._init = False
GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position,
size=p.size, style=0, name='GLCanvas',
attribList=self._gl_attribs)
if self._gl_context is None:
self._gl_context = glcanvas.GLContext(self)
self.SetFocus()
self._vispy_set_title(p.title)
self._size = None
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_PAINT, self.on_draw)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self._size_init = p.size
self._vispy_set_visible(p.show)
def on_resize(self, event):
if self._vispy_canvas is None or not self._init:
event.Skip()
return
size = event.GetSize()
self._vispy_canvas.events.resize(size=size)
self.Refresh()
event.Skip()
def on_draw(self, event):
if self._vispy_canvas is None:
return
dc = wx.PaintDC(self) # needed for wx
if not self._init:
self._initialize()
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
del dc
event.Skip()
def _initialize(self):
if self._vispy_canvas is None:
return
self._init = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
self.on_resize(DummySize(self._size_init))
def _vispy_set_current(self):
self.SetCurrent(self._gl_context)
def _vispy_warmup(self):
etime = time() + 0.3
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_swap_buffers(self):
# Swap front and back buffer
self._vispy_canvas.set_current()
self.SwapBuffers()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._frame is not None:
self._frame.SetLabel(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
if not self._init:
self._size_init = (w, h)
self.SetSizeWH(w, h)
def _vispy_set_position(self, x, y):
# Set positionof the widget or window. May have no effect for widgets
if self._frame is not None:
self._frame.SetPosition((x, y))
def _vispy_get_fullscreen(self):
return self._fullscreen
def _vispy_set_fullscreen(self, fullscreen):
if self._frame is not None:
self._fullscreen = bool(fullscreen)
self._vispy_set_visible(True)
def _vispy_set_visible(self, visible):
# Show or hide the window or widget
self.Show(visible)
if visible:
if self._frame is not None:
self._frame.ShowFullScreen(self._fullscreen)
def _vispy_update(self):
# Invoke a redraw
self.Refresh()
def _vispy_close(self):
if self._vispy_canvas is None:
return
# Force the window or widget to shut down
canvas = self
frame = self._frame
self._gl_context = None # let RC destroy this in case it's shared
canvas.Close()
canvas.Destroy()
if frame:
frame.Close()
frame.Destroy()
gc.collect() # ensure context gets destroyed if it should be
def _vispy_get_size(self):
if self._vispy_canvas is None:
return
w, h = self.GetClientSize()
return w, h
def _vispy_get_position(self):
if self._vispy_canvas is None:
return
x, y = self.GetPosition()
return x, y
def on_close(self, evt):
if not self: # wx control evaluates to false if C++ part deleted
return
if self._vispy_canvas is None:
return
self._vispy_canvas.close()
def on_mouse_event(self, evt):
if self._vispy_canvas is None:
return
pos = (evt.GetX(), evt.GetY())
mods = _get_mods(evt)
if evt.GetWheelRotation() != 0:
delta = (0., float(evt.GetWheelRotation()))
self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos,
modifiers=mods)
elif evt.Moving() or evt.Dragging(): # mouse move event
self._vispy_mouse_move(pos=pos, modifiers=mods)
elif evt.ButtonDown():
if evt.LeftDown():
button = 0
elif evt.MiddleDown():
button = 1
elif evt.RightDown():
button = 2
else:
evt.Skip()
self._vispy_mouse_press(pos=pos, button=button, modifiers=mods)
elif evt.ButtonUp():
if evt.LeftUp():
button = 0
elif evt.MiddleUp():
button = 1
elif evt.RightUp():
button = 2
else:
evt.Skip()
self._vispy_mouse_release(pos=pos, button=button, modifiers=mods)
elif evt.ButtonDClick():
if evt.LeftDClick():
button = 0
elif evt.MiddleDClick():
button = 1
elif evt.RightDClick():
button = 2
else:
evt.Skip()
self._vispy_mouse_press(pos=pos, button=button, modifiers=mods)
self._vispy_mouse_double_click(pos=pos, button=button,
modifiers=mods)
evt.Skip()
def on_key_down(self, evt):
if self._vispy_canvas is None:
return
key, text = _process_key(evt)
self._vispy_canvas.events.key_press(key=key, text=text,
modifiers=_get_mods(evt))
def on_key_up(self, evt):
if self._vispy_canvas is None:
return
key, text = _process_key(evt)
self._vispy_canvas.events.key_release(key=key, text=text,
modifiers=_get_mods(evt))
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
BaseTimerBackend.__init__(self, vispy_timer)
assert _wx_app is not None
parent = _wx_app.GetTopWindow() # assume it's the parent window
self._timer = wx.Timer(parent, -1)
parent.Bind(wx.EVT_TIMER, self._vispy_timeout, self._timer)
def _vispy_start(self, interval):
self._timer.Start(interval * 1000., False)
def _vispy_stop(self):
self._timer.Stop()
def _vispy_timeout(self, evt):
self._vispy_timer._timeout()
evt.Skip()
|
|
import sys # for exception info
DEFAULT_CONN_ENDPOINT = 'https://outlook.office365.com/EWS/Exchange.asmx'
param_connector = Parameter({'title': 'Connector', 'schema': {'type': 'object', 'properties': {
'ewsEndPoint': {'title': 'EWS end-point', 'type': 'string', 'hint': DEFAULT_CONN_ENDPOINT, 'order': 1},
'username': {'title': 'Username', 'type': 'string', 'order': 2},
'password': {'title': 'Password', 'type': 'string', 'format': 'password', 'order': 3},
'address': {'title': 'Address', 'type': 'string', 'hint': 'e.g. [email protected]', 'order': 4}
}}})
param_calendars = Parameter({'title': 'Calendars', 'schema': {'type': 'array', 'items': { 'type': 'object', 'properties': {
'name': {'type': 'string', 'order': 1},
'folderName': {'title': 'Folder name (if not default calendar)', 'type': 'string', 'desc': 'If default calendar is not being used, the exact name of the folder holding the group of calendars.', 'order': 2}
}}}})
local_event_RawItems = LocalEvent({'title': 'Raw Items', 'group': 'Raw', 'schema': {'type': 'array', 'items': {
'type': 'object', 'properties': {
'subject': {'type': 'string', 'order': 1},
'location': {'type': 'string', 'order': 2},
'start': {'type': 'string', 'order': 3},
'end': {'type': 'string', 'order': 4}
}}}})
local_event_RawFolders = LocalEvent({'title': 'Raw Folders', 'group': 'Raw', 'schema': {'type': 'array', 'items': {
'type': 'object', 'properties': {
'displayName': {'type': 'string', 'order': 1}
}}}})
# Booking / raw mappings
# 'title' taken from 'subject'
# 'member' taken from 'location'
# 'signal' extracted from 'subject' (e.g. '... {signal}')
# 'state' extracted from 'subject' (e.g. '... {signal: state}')
ITEM_SCHEMA = { 'type': 'object', 'title': '...', 'properties': {
'title': {'type': 'string', 'order': 1},
'start': {'type': 'string', 'order': 2},
'end': {'type': 'string', 'order': 3},
'member': {'type': 'string', 'order': 4},
'signal': {'type': 'string', 'order': 5},
'state': {'type': 'string', 'order': 6}
}}
import xml.etree.ElementTree as ET
import base64
# (uses a safe default)
connector = { 'ewsEndPoint': DEFAULT_CONN_ENDPOINT,
'username': None,
'password': None,
'address': None }
# some pre-constructed XML elements:
# - FolderId elements (by folder name)
resolvedFolderElements = {}
# DistinguishedFolderId element (including mailbox info if relevant)
distinguishedFolderIdElement = None
def main():
username, password = None, None
try:
if param_connector == None:
raise Exception('No connector config set!')
username = tryGet(param_connector, 'username')
if isBlank(username):
raise Exception('No username set!')
password = tryGet(param_connector, 'password')
if isBlank(password):
raise Exception('No password set!')
# go through calendars...
# ... ensure at least one exists
if isEmpty(param_calendars):
raise Exception('At least one calendar must be configured.')
# ... and no more than one default calendar is configured
# ... and the folder name is not configured twice
calendarMap = set()
for calendarParam in param_calendars:
if isEmpty(calendarParam.get('name')):
raise Exception('A calendar must have a unique name given to it')
folderName = calendarParam.get('folderName')
if isEmpty(folderName):
folderName = '<DEFAULT>'
# raise an error if the calendar is already in the set
if folderName in calendarMap:
raise Exception('The same calendar has been referred to more than once in the calendars config - "%s"' % folderName)
# add to the set
calendarMap.add(folderName)
except Exception, exc:
console.warn(str(exc))
return
ewsEndPoint = tryGet(param_connector, 'ewsEndPoint')
if isBlank(ewsEndPoint):
ewsEndPoint = DEFAULT_CONN_ENDPOINT
connector['ewsEndPoint'] = ewsEndPoint
connector['username'] = username
connector['password'] = password
connector['address'] = param_connector.get('address')
# pre-construct some of the re-useable XML elements
global distinguishedFolderIdElement
distinguishedFolderIdElement = ET.fromstring('<DistinguishedFolderId Id="calendar" xmlns="http://schemas.microsoft.com/exchange/services/2006/types"></DistinguishedFolderId>')
# update mailbox and inject if present
if not isBlank(connector['address']):
mailboxElement = ET.fromstring('<Mailbox xmlns="http://schemas.microsoft.com/exchange/services/2006/types"><EmailAddress>SMTP_ADDRESS_HERE</EmailAddress></Mailbox>')
searchElement(mailboxElement, 'type:EmailAddress').text = connector['address']
distinguishedFolderIdElement.append(mailboxElement)
# create signals for each calendar
for calendarParam in param_calendars:
name = calendarParam['name']
Event('Calendar %s Items' % name, {'title': '"%s"' % (name), 'group': 'Calendars', 'order': next_seq(), 'schema': {'type': 'array', 'title': '...', 'items': ITEM_SCHEMA}})
console.info('Started! Will poll folders and items now (then items every minute)')
# folder resolution might not even be necessary, but no harm in doing it once anyway
call(lambda: lookup_local_action('PollFolders').call())
# timer responsible for continually polling items
# (every min, first after 10s)
timer_poller = Timer(lambda: lookup_local_action('PollItems').call(), 60, 10)
def local_action_PollItems(arg=None):
try:
now = date_now()
rawBookings = query_ews(now, now.plusDays(7))
trace('Raw:')
for raw in rawBookings:
trace(raw)
# emit raw bookings
local_event_RawItems.emitIfDifferent(rawBookings)
# go through the raw list
bookingsByCalendar = {}
for raw in rawBookings:
calendarIndex = raw['calendar']
subject = raw['subject']
booking = { 'title': subject,
'start': str(raw['start']),
'end': str(raw['end']),
'member': raw['location'],
'signal': None,
'state': None }
# extract optional fields in the subject line
# e.g. subject': 'Peace and quiet! {Power: Off}
fieldName, fieldValue = extractField(subject)
# override the signal name if it's present
if not isBlank(fieldName):
booking['signal'] = fieldName
# override the value if it's present
if not isBlank(fieldValue):
booking['state'] = fieldValue
bookings = bookingsByCalendar.get(calendarIndex)
if bookings == None:
bookings = list()
bookingsByCalendar[calendarIndex] = bookings
bookings.append(booking)
# emit clean bookings
for index, info in enumerate(param_calendars):
trace('index:%s, info:%s' % (index, info))
lookup_local_event('Calendar %s Items' % info['name']).emitIfDifferent(bookingsByCalendar.get(index) or [])
# indicate a successful poll cycle
lastSuccess[0] = system_clock()
except:
eType, eValue, eTraceback = sys.exc_info()
console.warn('Failed to poll items; exception was [%s]' % eValue)
def query_ews(start, end):
'''Date-range query of calendar items. Returns false if calendar resolution has not been completed yet.'''
# prepare named folder elements if in use
folderElements = list()
for calendar in param_calendars or '':
folderName = calendar['folderName']
if not isEmpty(folderName):
# lookup the folder by display name
folderElement = resolvedFolderElements.get(folderName)
if folderElement == None:
raise Exception('At least one named-calendar has not been located yet; (folder name is "%s")' % folderName)
folderElements.append(folderElement)
else:
# use distinguished folder
folderElements.append(distinguishedFolderIdElement)
request = prepareQueryRequest(start, end, resolvedFolders=folderElements)
xmlRequest = ET.tostring(request)
trace('Requesting... request:%s' % xmlRequest)
response = get_url(connector['ewsEndPoint'],
username=connector['username'],
password=connector['password'],
contentType='text/xml',
post=xmlRequest)
trace('Got response. data:%s' % response)
warnings = list()
items = parse_query_response(response, warnings)
return items
def parse_query_response(responseXML, warnHandler):
'''Parses a response, given the full envelope (as XML string)'''
# no way to specify string encoding using this version of Python APIs
# so need to pre-encode UTF8. Inner parser only deals with plain ASCII.
root = ET.fromstring(responseXML.encode('utf-8'))
# ensure header exists
header = getElement(root, 'env:Header')
# ensure body exists
body = getElement(root, 'env:Body')
# get major response part
if len(body) <= 0:
raise ParseException('Expected a major response with the Body')
majorResponseTag = body[0].tag
calendarItems = list()
# (tag can be {m:FindItemResponse}, etc.)
if majorResponseTag == expandPath('message:FindItemResponse'):
findItemResponse = body[0]
for responseMessage in findItemResponse:
for responseIndex, findItemResponseMessage in enumerate(responseMessage):
if getAttrib(findItemResponseMessage, "ResponseClass") != "Success":
raise DataException("FindItemResponseMessage response class was not 'Success' (was %s)" % ET.tostring(findItemResponseMessage))
responseCode = getElementText(findItemResponseMessage, 'message:ResponseCode')
if responseCode != 'NoError':
raise DataException("Response code was not 'NoError'")
rootFolder = getElement(findItemResponseMessage, 'message:RootFolder')
# warning...
includesLastItemInRange = rootFolder.get('IncludesLastItemInRange')
if includesLastItemInRange == False and warnings:
warnings.append('IncludesLastItemInRange=false but this parser does not support paged responses')
rootFolderItems = getElement(rootFolder, 'type:Items')
for item in rootFolderItems:
itemTag = item.tag
# interpret calendar items only
if itemTag == expandPath('type:CalendarItem'):
itemIDElement = getElement(item, 'type:ItemId')
itemID = {'id': getAttrib(itemIDElement, 'Id'),
'changeKey': getAttrib(itemIDElement, 'ChangeKey')}
subject = tryGetElementText(item, 'type:Subject', default='')
sensitivity = tryGetElementText(item, 'type:Sensitivity', default='') # TODO: interpret 'Sensitivity'
start = getElementText(item, 'type:Start')
end = getElementText(item, 'type:End')
location = tryGetElementText(item, 'type:Location', default='')
organiserElement = tryGetElement(item, 'type:Organizer')
if organiserElement != None:
organiserMailboxElement = getElement(organiserElement, 'type:Mailbox')
organiserName = tryGetElementText(organiserMailboxElement, 'type:Name', default='')
else:
organiserName = ''
calendarItems.append({ # 'id': itemID,
'calendar': responseIndex,
'subject': subject,
'sensitivity': sensitivity,
'start': date_instant(date_parse(start).getMillis()), # trick to convert into local timezone for display convenience (instead of GMT)
'end': date_instant(date_parse(end).getMillis()), # trick to convert into local timezone for display convenience (instead of GMT)
'location': location,
'organiser': organiserName })
else:
raise DataException('Unexpected major response element - got %s' % majorResponseTag)
return calendarItems
def local_action_PollFolders(arg=None):
try:
updateFolderMap()
except:
eType, eValue, eTraceback = sys.exc_info()
console.warn('Failed to poll folders (will self retry in 5 mins); exception was [%s]' % eValue)
call(lambda: lookup_local_action('PollFolders').call(), 5*60)
def updateFolderMap():
folderItems = find_folders()
local_event_RawFolders.emit(folderItems)
# set up the lookup map
for item in folderItems:
resolvedFolderElements[item['displayName']] = item['folderIDElement']
def find_folders():
'''Find all general calendar folders, returns array of {folderIDElement: ___, displayName: '___'}'''
request = prepareGetFoldersRequest(smtpAddress=connector['address'])
xmlRequest = ET.tostring(request)
trace('find_folders requesting... data:%s' % xmlRequest)
response = get_url(connector['ewsEndPoint'],
username=connector['username'],
password=connector['password'],
contentType='text/xml',
post=xmlRequest)
trace('find_folders. got response. data:%s' % response)
warnings = list()
items = parse_find_folders_response(response, warnings)
return items
def parse_find_folders_response(responseXML, warnHandler):
'''Parses a response, given the full envelope (as XML string)'''
# see previous comment RE UTF-8 encoding
root = ET.fromstring(responseXML.encode('utf-8'))
# ensure header exists
header = getElement(root, 'env:Header')
# ensure body exists
body = getElement(root, 'env:Body')
# get major response part
if len(body) <= 0:
raise ParseException('Expected a major response with the Body')
majorResponseTag = body[0].tag
calendarFolders = list()
# (tag can be {m:FindItemResponse}, etc.)
if majorResponseTag == expandPath('message:FindFolderResponse'):
findItemResponse = body[0]
for responseMessage in findItemResponse:
for findItemResponseMessage in responseMessage:
if getAttrib(findItemResponseMessage, "ResponseClass") != "Success":
raise DataException("FindFolderResponseMessage response class was not 'Success' (was %s)" % ET.tostring(findItemResponseMessage))
responseCode = getElementText(findItemResponseMessage, 'message:ResponseCode')
if responseCode != 'NoError':
raise DataException("Response code was not 'NoError'")
rootFolder = getElement(findItemResponseMessage, 'message:RootFolder')
# warning...
includesLastItemInRange = rootFolder.get('IncludesLastItemInRange')
if includesLastItemInRange == False and warnings:
warnings.append('IncludesLastItemInRange=false but this parser does not support paged responses')
rootFolderFolders = getElement(rootFolder, 'type:Folders')
for folder in rootFolderFolders:
folderTag = folder.tag
# interpret calendar folders only
if folderTag == expandPath('type:CalendarFolder'):
folderIDElement = getElement(folder, 'type:FolderId')
folderID = {'id': getAttrib(folderIDElement, 'Id'),
'changeKey': getAttrib(folderIDElement, 'ChangeKey')}
displayName = getElementText(folder, 'type:DisplayName')
calendarFolders.append({ 'folderIDElement': folderIDElement,
'displayName': displayName })
else:
raise DataException('Unexpected major response element - got %s' % majorResponseTag)
return calendarFolders
# <SOAP/XML operations ---
# XML namespace lookups
NS = { 'env': 'http://schemas.xmlsoap.org/soap/envelope/',
'message': 'http://schemas.microsoft.com/exchange/services/2006/messages',
'type': 'http://schemas.microsoft.com/exchange/services/2006/types' }
# %STARTDATE% example: 2017-02-02T17:09:08.967+11:00
# %ENDDATE% example: 2017-02-03T17:09:09.099+11:00
REQ_QUERY_TEMPLATE_XML = '''<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Header>
<h:DateTimePrecisionType xmlns:h="http://schemas.microsoft.com/exchange/services/2006/types">Seconds</h:DateTimePrecisionType>
<h:RequestServerVersion Version="Exchange2010_SP2" xmlns:h="http://schemas.microsoft.com/exchange/services/2006/types"/>
</s:Header>
<s:Body>
<FindItem Traversal="Shallow" xmlns="http://schemas.microsoft.com/exchange/services/2006/messages">
<ItemShape>
<BaseShape xmlns="http://schemas.microsoft.com/exchange/services/2006/types">Default</BaseShape>
<AdditionalProperties xmlns="http://schemas.microsoft.com/exchange/services/2006/types">
<FieldURI FieldURI="item:Sensitivity"/>
</AdditionalProperties>
</ItemShape>
<CalendarView StartDate="START_DATE_HERE" EndDate="END_DATE_HERE"/>
<ParentFolderIds><!-- important folder options end up here --></ParentFolderIds>
</FindItem>
</s:Body>
</s:Envelope>
'''
# NOTE:
# For normal calendar folder add:
# <FindItem ...>
# <ParentFolderIds>
# <DistinguishedFolderId Id="calendar" xmlns="http://schemas.microsoft.com/exchange/services/2006/types"/>
# </ParentFolderIds>
# ...
#
# And with different mailbox:
# <FindItem ...>
# <ParentFolderIds>
# <DistinguishedFolderId Id="calendar" xmlns="http://schemas.microsoft.com/exchange/services/2006/types">
# <Mailbox><EmailAddress>%SMTPADDRESS%</EmailAddress></Mailbox>
# <DistinguishedFolderId />
# </ParentFolderIds>
# ...
#
# And with specific folders
# <FindItem ...>
# <ParentFolderIds>
# <FolderId xmlns="http://schemas.microsoft.com/exchange/services/2006/types"/>
# Id="AAMkAGVkOTNmM2I5LTkzM2EtNGE2NC05N2JjLTFhOTU2ZmJkOTIzOQAuAAAAAAB6Kun2T1UaS7SeML/WWukdAQCKlTYVK0L1S4NbyOQ4sSbQAALZU7ffAAA="
# ChangeKey="AgAAABQAAAD8vWH6ONfhT7eqjuZ+hFA+AAAEQA==" />
# <FolderId Id=... />
# </ParentFolderIds>
# ...
def prepareQueryRequest(start, end, resolvedFolders=None):
'''(folders contain XML objects)'''
# construct a new FindItem request
request = ET.fromstring(REQ_QUERY_TEMPLATE_XML)
# specify date range
calendarView = searchElement(request, 'message:CalendarView')
calendarView.set('StartDate', str(start))
calendarView.set('EndDate', str(end))
# specify folder options
parentFolderIds = searchElement(request, 'message:ParentFolderIds')
# use pre-constructed (resolved) folder elements
for element in resolvedFolders:
parentFolderIds.append(element)
return request
REQ_GETFOLDERS_TEMPLATE_XML = '''<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Header>
<h:DateTimePrecisionType xmlns:h="http://schemas.microsoft.com/exchange/services/2006/types">Seconds</h:DateTimePrecisionType>
<h:RequestServerVersion Version="Exchange2010_SP2" xmlns:h="http://schemas.microsoft.com/exchange/services/2006/types"/>
</s:Header>
<s:Body>
<FindFolder Traversal="Deep" xmlns="http://schemas.microsoft.com/exchange/services/2006/messages">
<FolderShape>
<BaseShape xmlns="http://schemas.microsoft.com/exchange/services/2006/types">Default</BaseShape>
</FolderShape>
<ParentFolderIds><!-- important folder options end up here --></ParentFolderIds>
</FindFolder>
</s:Body>
</s:Envelope>'''
def prepareGetFoldersRequest(smtpAddress=None):
# construct a new request type
request = ET.fromstring(REQ_GETFOLDERS_TEMPLATE_XML)
# specify folder options
parentFolderIds = searchElement(request, 'message:ParentFolderIds')
# use pre-constructed distinguished folder element
parentFolderIds.append(distinguishedFolderIdElement)
return request
# SOAP/XML operations --->
# <XML parsing convenience functions ---
# NOTE: unfortunately in this version of Jython/Python, ElementTree.find(...) does not
# support any namespace assistance so some of the missing functionality is covered
# by the convenience functions below.
class DataException(Exception):
'''A specialized exception related to data parsing this XML'''
pass
def getElement(root, path):
'''Strictly gets an element'''
result = root.find(expandPath(path))
if result == None:
raise DataException('Missing element %s' % path)
return result
def searchElement(root, path):
'''Recursively searches for the first matching element'''
def _searchElement(root, fullPath):
result = root.find(fullPath)
if result == None:
for sub in root:
result = _searchElement(sub, fullPath)
if result != None:
break
return result
return _searchElement(root, expandPath(path))
def tryGetElement(root, path):
'''Tries to get an optional element'''
result = root.find(expandPath(path))
return result
def getElementText(root, path):
'''Strictly gets the text part of an element e.g. <e>Text</e>'''
result = root.find(expandPath(path))
if result == None:
raise DataException('Missing element %s' % path)
return result.text
def tryGetElementText(root, path, default=None):
'''Gets the text part of an element (optionally)'''
result = root.find(expandPath(path))
if result != None:
result = result.text
if result == None:
result = default
return result
def getElements(root, path):
results = root.findall(expandPath(path))
if results == None:
raise DataException('Missing elements %s' % path)
return results
def tryGetAttrib(root, name):
value = root.get(name)
if value == None:
return
return value
def getAttrib(root, name):
value = root.get(name)
if value == None:
raise DataException('Missing attribute %s' % name)
return value
def expandPath(path):
if ':' not in path:
return path
parts = path.split(':')
return '{%s}%s' % (NS[parts[0]], parts[1])
# XML parsing convenience functions --->
# <--- simple parsing
def extractField(s):
'''e.g. "Peace and quiet! {Power: Off}" returns {'Power': 'Off'}'''
if not s.endswith('}'):
return None, None
lastIndex = s.rfind('{')
if lastIndex < 0:
return None, None
inner = s[lastIndex+1:-1]
# e.g. Power: Off
parts = inner.split(':')
if len(parts) == 1:
return parts[0].strip(), None
if len(parts) == 2:
return parts[0].strip(), parts[1].strip()
# simple parsing --->
# <--- status, errors and debug
local_event_Trace = LocalEvent({'group': 'Status, Errors & Debug', 'order': 9999+next_seq(), 'schema': {'type': 'boolean'}})
def trace(msg):
if local_event_Trace.getArg():
console.info(msg)
def traceWarn(msg):
if local_event_Trace.getArg():
console.warn(msg)
local_event_Status = LocalEvent({'group': 'Status, Errors & Debug', 'order': 9999+next_seq(), 'schema': {'type': 'object', 'properties': {
'level': {'type': 'integer', 'order': 1},
'message': {'type': 'string', 'order': 2}
}}})
# for status checks
lastSuccess = [0]
# roughly, the last contact
local_event_LastContactDetect = LocalEvent({'group': 'Status, Errors & Debug', 'order': 9999+next_seq(), 'schema': {'type': 'string'}})
def statusCheck():
diff = (system_clock() - lastSuccess[0])/1000.0 # (in secs)
now = date_now()
if diff > status_check_interval+15:
previousContactValue = local_event_LastContactDetect.getArg()
if previousContactValue == None:
message = 'A successful poll has never taken place.'
else:
previousContact = date_parse(previousContactValue)
roughDiff = (now.getMillis() - previousContact.getMillis())/1000/60
if roughDiff < 60:
message = 'Continual failures for approx. %s mins' % roughDiff
elif roughDiff < (60*24):
message = 'Continual failures since %s' % previousContact.toString('h:mm:ss a')
else:
message = 'Continual failures since %s' % previousContact.toString('h:mm:ss a, E d-MMM')
local_event_Status.emit({'level': 2, 'message': message})
else:
local_event_LastContactDetect.emit(str(now))
local_event_Status.emit({'level': 0, 'message': 'OK'})
status_check_interval = 75
status_timer = Timer(statusCheck, status_check_interval)
# status, errors and debug --->
# <--- convenience functions
def isBlank(s):
'Safely checks whether a string is blank. False otherwise, no exceptions.'
if s == None:
return True
if len(s) == 0:
return True
if len(s.strip()) == 0:
return True
def isEmpty(o):
if o == None or len(o) == 0:
return True
def tryGet(d, key, default=None):
'Safely get a value from a dictionary, otherwise returning a default (if specified) or None (no exceptions).'
if d == None:
return default
result = d.get(key)
if result == None:
return default
return result
# convenience functions --->
# <--- examples
# (see examples.py for XML snippets)
# examples --->
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import List, Optional
import cx_Oracle
import numpy
from airflow.hooks.dbapi import DbApiHook
class OracleHook(DbApiHook):
"""
Interact with Oracle SQL.
:param oracle_conn_id: The :ref:`Oracle connection id <howto/connection:oracle>`
used for Oracle credentials.
:type oracle_conn_id: str
"""
conn_name_attr = 'oracle_conn_id'
default_conn_name = 'oracle_default'
conn_type = 'oracle'
hook_name = 'Oracle'
supports_autocommit = False
def get_conn(self) -> 'OracleHook':
"""
Returns a oracle connection object
Optional parameters for using a custom DSN connection
(instead of using a server alias from tnsnames.ora)
The dsn (data source name) is the TNS entry
(from the Oracle names server or tnsnames.ora file)
or is a string like the one returned from makedsn().
:param dsn: the data source name for the Oracle server
:param service_name: the db_unique_name of the database
that you are connecting to (CONNECT_DATA part of TNS)
:param sid: Oracle System ID that identifies a particular
database on a system
You can set these parameters in the extra fields of your connection
as in
.. code-block:: python
{
"dsn": (
"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)"
"(HOST=host)(PORT=1521))(CONNECT_DATA=(SID=sid)))"
)
}
see more param detail in
`cx_Oracle.connect <https://cx-oracle.readthedocs.io/en/latest/module.html#cx_Oracle.connect>`_
"""
conn = self.get_connection(self.oracle_conn_id) # type: ignore[attr-defined]
conn_config = {'user': conn.login, 'password': conn.password}
sid = conn.extra_dejson.get('sid')
mod = conn.extra_dejson.get('module')
service_name = conn.extra_dejson.get('service_name')
port = conn.port if conn.port else 1521
if conn.host and sid and not service_name:
conn_config['dsn'] = cx_Oracle.makedsn(conn.host, port, sid)
elif conn.host and service_name and not sid:
conn_config['dsn'] = cx_Oracle.makedsn(conn.host, port, service_name=service_name)
else:
dsn = conn.extra_dejson.get('dsn')
if dsn is None:
dsn = conn.host
if conn.port is not None:
dsn += ":" + str(conn.port)
if service_name or conn.schema:
dsn += "/" + (service_name or conn.schema)
conn_config['dsn'] = dsn
if 'encoding' in conn.extra_dejson:
conn_config['encoding'] = conn.extra_dejson.get('encoding')
# if `encoding` is specific but `nencoding` is not
# `nencoding` should use same values as `encoding` to set encoding, inspired by
# https://github.com/oracle/python-cx_Oracle/issues/157#issuecomment-371877993
if 'nencoding' not in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('encoding')
if 'nencoding' in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('nencoding')
if 'threaded' in conn.extra_dejson:
conn_config['threaded'] = conn.extra_dejson.get('threaded')
if 'events' in conn.extra_dejson:
conn_config['events'] = conn.extra_dejson.get('events')
mode = conn.extra_dejson.get('mode', '').lower()
if mode == 'sysdba':
conn_config['mode'] = cx_Oracle.SYSDBA
elif mode == 'sysasm':
conn_config['mode'] = cx_Oracle.SYSASM
elif mode == 'sysoper':
conn_config['mode'] = cx_Oracle.SYSOPER
elif mode == 'sysbkp':
conn_config['mode'] = cx_Oracle.SYSBKP
elif mode == 'sysdgd':
conn_config['mode'] = cx_Oracle.SYSDGD
elif mode == 'syskmt':
conn_config['mode'] = cx_Oracle.SYSKMT
elif mode == 'sysrac':
conn_config['mode'] = cx_Oracle.SYSRAC
purity = conn.extra_dejson.get('purity', '').lower()
if purity == 'new':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_NEW
elif purity == 'self':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_SELF
elif purity == 'default':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_DEFAULT
conn = cx_Oracle.connect(**conn_config)
if mod is not None:
conn.module = mod
return conn
def insert_rows(
self,
table: str,
rows: List[tuple],
target_fields=None,
commit_every: int = 1000,
replace: Optional[bool] = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
Changes from standard DbApiHook implementation:
- Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)
- Replace NaN values with NULL using `numpy.nan_to_num` (not using
`is_nan()` because of input types error for strings)
- Coerce datetime cells to Oracle DATETIME format during insert
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table
:type target_fields: iterable of str
:param commit_every: the maximum number of rows to insert in one transaction
Default 1000, Set greater than 0.
Set 1 to insert each row in each single transaction
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ', '.join(target_fields)
target_fields = f'({target_fields})'
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor() # type: ignore[attr-defined]
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit() # type: ignore[attr-defined]
i = 0
for row in rows:
i += 1
lst = []
for cell in row:
if isinstance(cell, str):
lst.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
lst.append('NULL')
elif isinstance(cell, float) and numpy.isnan(cell): # coerce numpy NaN to NULL
lst.append('NULL')
elif isinstance(cell, numpy.datetime64):
lst.append("'" + str(cell) + "'")
elif isinstance(cell, datetime):
lst.append(
"to_date('" + cell.strftime('%Y-%m-%d %H:%M:%S') + "','YYYY-MM-DD HH24:MI:SS')"
)
else:
lst.append(str(cell))
values = tuple(lst)
sql = f"INSERT /*+ APPEND */ INTO {table} {target_fields} VALUES ({','.join(values)})"
cur.execute(sql)
if i % commit_every == 0:
conn.commit() # type: ignore[attr-defined]
self.log.info('Loaded %s into %s rows so far', i, table)
conn.commit() # type: ignore[attr-defined]
cur.close()
conn.close() # type: ignore[attr-defined]
self.log.info('Done loading. Loaded a total of %s rows', i)
def bulk_insert_rows(
self,
table: str,
rows: List[tuple],
target_fields: Optional[List[str]] = None,
commit_every: int = 5000,
):
"""
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
"""
if not rows:
raise ValueError("parameter rows could not be None or empty iterable")
conn = self.get_conn()
cursor = conn.cursor() # type: ignore[attr-defined]
values_base = target_fields if target_fields else rows[0]
prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(
tablename=table,
columns='({})'.format(', '.join(target_fields)) if target_fields else '',
values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),
)
row_count = 0
# Chunk the rows
row_chunk = []
for row in rows:
row_chunk.append(row)
row_count += 1
if row_count % commit_every == 0:
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit() # type: ignore[attr-defined]
self.log.info('[%s] inserted %s rows', table, row_count)
# Empty chunk
row_chunk = []
# Commit the leftover chunk
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit() # type: ignore[attr-defined]
self.log.info('[%s] inserted %s rows', table, row_count)
cursor.close()
conn.close() # type: ignore[attr-defined]
|
|
import math
import os
import unittest
import sys
import _ast
import tempfile
import types
from test import support
from test.support import script_helper
class TestSpecifics(unittest.TestCase):
def compile_single(self, source):
compile(source, "<single>", "single")
def assertInvalidSingle(self, source):
self.assertRaises(SyntaxError, self.compile_single, source)
def test_no_ending_newline(self):
compile("hi", "<test>", "exec")
compile("hi\r", "<test>", "exec")
def test_empty(self):
compile("", "<test>", "exec")
def test_other_newlines(self):
compile("\r\n", "<test>", "exec")
compile("\r", "<test>", "exec")
compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec")
compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec")
def test_debug_assignment(self):
# catch assignments to __debug__
self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single')
import builtins
prev = builtins.__debug__
setattr(builtins, '__debug__', 'sure')
setattr(builtins, '__debug__', prev)
def test_argument_handling(self):
# detect duplicate positional and keyword arguments
self.assertRaises(SyntaxError, eval, 'lambda a,a:0')
self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0')
self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0')
self.assertRaises(SyntaxError, exec, 'def f(a, a): pass')
self.assertRaises(SyntaxError, exec, 'def f(a = 0, a = 1): pass')
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_syntax_error(self):
self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec")
def test_none_keyword_arg(self):
self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec")
def test_duplicate_global_local(self):
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_exec_with_general_mapping_for_locals(self):
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec('z = a', g, m)
self.assertEqual(m.results, ('z', 12))
try:
exec('z = b', g, m)
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec('z = dir()', g, m)
self.assertEqual(m.results, ('z', list('xyz')))
exec('z = globals()', g, m)
self.assertEqual(m.results, ('z', g))
exec('z = locals()', g, m)
self.assertEqual(m.results, ('z', m))
self.assertRaises(TypeError, exec, 'z = b', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, exec, 'z = a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec('z = a', g, d)
self.assertEqual(d['z'], 12)
@unittest.skipIf(sys.getrecursionlimit() <= 500, "requires recursion limit > 500")
def test_extended_arg(self):
longexpr = 'x = x or ' + '-x' * 2500
g = {}
code = '''
def f(x):
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
# the expressions above have no effect, x == argument
while x:
x -= 1
# EXTENDED_ARG/JUMP_ABSOLUTE here
return x
''' % ((longexpr,)*10)
exec(code, g)
self.assertEqual(g['f'](5), 0)
def test_argument_order(self):
self.assertRaises(SyntaxError, exec, 'def f(a=1, b): pass')
def test_float_literals(self):
# testing bad float literals
self.assertRaises(SyntaxError, eval, "2e")
self.assertRaises(SyntaxError, eval, "2.0e+")
self.assertRaises(SyntaxError, eval, "1e-")
self.assertRaises(SyntaxError, eval, "3-4e/21")
def test_indentation(self):
# testing compile() of indented block w/o trailing newline"
s = """
if 1:
if 2:
pass"""
compile(s, "<string>", "exec")
# This test is probably specific to CPython and may not generalize
# to other implementations. We are trying to ensure that when
# the first line of code starts after 256, correct line numbers
# in tracebacks are still produced.
def test_leading_newlines(self):
s256 = "".join(["\n"] * 256 + ["spam"])
co = compile(s256, 'fn', 'exec')
self.assertEqual(co.co_firstlineno, 257)
self.assertEqual(co.co_lnotab, bytes())
def test_literals_with_leading_zeroes(self):
for arg in ["077787", "0xj", "0x.", "0e", "090000000000000",
"080000000000000", "000000000000009", "000000000000008",
"0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2",
"0b101j2", "0o153j2", "0b100e1", "0o777e1", "0777",
"000777", "000000000000007"]:
self.assertRaises(SyntaxError, eval, arg)
self.assertEqual(eval("0xff"), 255)
self.assertEqual(eval("0777."), 777)
self.assertEqual(eval("0777.0"), 777)
self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777)
self.assertEqual(eval("0777e1"), 7770)
self.assertEqual(eval("0e0"), 0)
self.assertEqual(eval("0000e-012"), 0)
self.assertEqual(eval("09.5"), 9.5)
self.assertEqual(eval("0777j"), 777j)
self.assertEqual(eval("000"), 0)
self.assertEqual(eval("00j"), 0j)
self.assertEqual(eval("00.0"), 0)
self.assertEqual(eval("0e3"), 0)
self.assertEqual(eval("090000000000000."), 90000000000000.)
self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.)
self.assertEqual(eval("090000000000000e0"), 90000000000000.)
self.assertEqual(eval("090000000000000e-0"), 90000000000000.)
self.assertEqual(eval("090000000000000j"), 90000000000000j)
self.assertEqual(eval("000000000000008."), 8.)
self.assertEqual(eval("000000000000009."), 9.)
self.assertEqual(eval("0b101010"), 42)
self.assertEqual(eval("-0b000000000010"), -2)
self.assertEqual(eval("0o777"), 511)
self.assertEqual(eval("-0o0000010"), -8)
def test_unary_minus(self):
# Verify treatment of unary minus on negative numbers SF bug #660455
if sys.maxsize == 2147483647:
# 32-bit machine
all_one_bits = '0xffffffff'
self.assertEqual(eval(all_one_bits), 4294967295)
self.assertEqual(eval("-" + all_one_bits), -4294967295)
elif sys.maxsize == 9223372036854775807:
# 64-bit machine
all_one_bits = '0xffffffffffffffff'
self.assertEqual(eval(all_one_bits), 18446744073709551615)
self.assertEqual(eval("-" + all_one_bits), -18446744073709551615)
else:
self.fail("How many bits *does* this machine have???")
# Verify treatment of constant folding on -(sys.maxsize+1)
# i.e. -2147483648 on 32 bit platforms. Should return int.
self.assertIsInstance(eval("%s" % (-sys.maxsize - 1)), int)
self.assertIsInstance(eval("%s" % (-sys.maxsize - 2)), int)
if sys.maxsize == 9223372036854775807:
def test_32_63_bit_values(self):
a = +4294967296 # 1 << 32
b = -4294967296 # 1 << 32
c = +281474976710656 # 1 << 48
d = -281474976710656 # 1 << 48
e = +4611686018427387904 # 1 << 62
f = -4611686018427387904 # 1 << 62
g = +9223372036854775807 # 1 << 63 - 1
h = -9223372036854775807 # 1 << 63 - 1
for variable in self.test_32_63_bit_values.__code__.co_consts:
if variable is not None:
self.assertIsInstance(variable, int)
def test_sequence_unpacking_error(self):
# Verify sequence packing/unpacking with "or". SF bug #757818
i,j = (1, -1) or (-1, 1)
self.assertEqual(i, 1)
self.assertEqual(j, -1)
def test_none_assignment(self):
stmts = [
'None = 0',
'None += 0',
'__builtins__.None = 0',
'def None(): pass',
'class None: pass',
'(a, None) = 0, 0',
'for None in range(10): pass',
'def f(None): pass',
'import None',
'import x as None',
'from x import None',
'from x import y as None'
]
for stmt in stmts:
stmt += "\n"
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single')
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_import(self):
succeed = [
'import sys',
'import os, sys',
'import os as bar',
'import os.path as bar',
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
'from sys import (stdin\n, stderr, stdout)',
'from sys import (stdin\n, stderr, stdout,)',
'from sys import stdin as si, stdout as so, stderr as se',
'from sys import (stdin as si, stdout as so, stderr as se)',
'from sys import (stdin as si, stdout as so, stderr as se,)',
]
fail = [
'import (os, sys)',
'import (os), (sys)',
'import ((os), (sys))',
'import (sys',
'import sys)',
'import (os,)',
'import os As bar',
'import os.path a bar',
'from sys import stdin As stdout',
'from sys import stdin a stdout',
'from (sys) import stdin',
'from __future__ import (nested_scopes',
'from __future__ import nested_scopes)',
'from __future__ import nested_scopes,\ngenerators',
'from sys import (stdin',
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
'from sys import stdin,'
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
]
for stmt in succeed:
compile(stmt, 'tmp', 'exec')
for stmt in fail:
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_for_distinct_code_objects(self):
# SF bug 1048870
def f():
f1 = lambda x=1: x
f2 = lambda x=2: x
return f1, f2
f1, f2 = f()
self.assertNotEqual(id(f1.__code__), id(f2.__code__))
def test_lambda_doc(self):
l = lambda: "foo"
self.assertIsNone(l.__doc__)
def test_encoding(self):
code = b'# -*- coding: badencoding -*-\npass\n'
self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec')
code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n'
compile(code, 'tmp', 'exec')
self.assertEqual(eval(code), '\xc2\xa4')
code = '"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\u20ac')
code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4')
code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4')
def test_subscripts(self):
# SF bug 1448804
# Class to make testing subscript results easy
class str_map(object):
def __init__(self):
self.data = {}
def __getitem__(self, key):
return self.data[str(key)]
def __setitem__(self, key, value):
self.data[str(key)] = value
def __delitem__(self, key):
del self.data[str(key)]
def __contains__(self, key):
return str(key) in self.data
d = str_map()
# Index
d[1] = 1
self.assertEqual(d[1], 1)
d[1] += 1
self.assertEqual(d[1], 2)
del d[1]
self.assertNotIn(1, d)
# Tuple of indices
d[1, 1] = 1
self.assertEqual(d[1, 1], 1)
d[1, 1] += 1
self.assertEqual(d[1, 1], 2)
del d[1, 1]
self.assertNotIn((1, 1), d)
# Simple slice
d[1:2] = 1
self.assertEqual(d[1:2], 1)
d[1:2] += 1
self.assertEqual(d[1:2], 2)
del d[1:2]
self.assertNotIn(slice(1, 2), d)
# Tuple of simple slices
d[1:2, 1:2] = 1
self.assertEqual(d[1:2, 1:2], 1)
d[1:2, 1:2] += 1
self.assertEqual(d[1:2, 1:2], 2)
del d[1:2, 1:2]
self.assertNotIn((slice(1, 2), slice(1, 2)), d)
# Extended slice
d[1:2:3] = 1
self.assertEqual(d[1:2:3], 1)
d[1:2:3] += 1
self.assertEqual(d[1:2:3], 2)
del d[1:2:3]
self.assertNotIn(slice(1, 2, 3), d)
# Tuple of extended slices
d[1:2:3, 1:2:3] = 1
self.assertEqual(d[1:2:3, 1:2:3], 1)
d[1:2:3, 1:2:3] += 1
self.assertEqual(d[1:2:3, 1:2:3], 2)
del d[1:2:3, 1:2:3]
self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d)
# Ellipsis
d[...] = 1
self.assertEqual(d[...], 1)
d[...] += 1
self.assertEqual(d[...], 2)
del d[...]
self.assertNotIn(Ellipsis, d)
# Tuple of Ellipses
d[..., ...] = 1
self.assertEqual(d[..., ...], 1)
d[..., ...] += 1
self.assertEqual(d[..., ...], 2)
del d[..., ...]
self.assertNotIn((Ellipsis, Ellipsis), d)
def test_annotation_limit(self):
# 16 bits are available for # of annotations, but only 8 bits are
# available for the parameter count, hence 255
# is the max. Ensure the result of too many annotations is a
# SyntaxError.
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(256))
self.assertRaises(SyntaxError, compile, s, '?', 'exec')
# Test that the max # of annotations compiles.
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(255))
compile(s, '?', 'exec')
def test_mangling(self):
class A:
def f():
__mangled = 1
__not_mangled__ = 2
import __mangled_mod
import __package__.module
self.assertIn("_A__mangled", A.f.__code__.co_varnames)
self.assertIn("__not_mangled__", A.f.__code__.co_varnames)
self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames)
self.assertIn("__package__", A.f.__code__.co_varnames)
def test_compile_ast(self):
fname = __file__
if fname.lower().endswith('pyc'):
fname = fname[:-1]
with open(fname, 'r') as f:
fcontents = f.read()
sample_code = [
['<assign>', 'x = 5'],
['<ifblock>', """if True:\n pass\n"""],
['<forblock>', """for n in [1, 2, 3]:\n print(n)\n"""],
['<deffunc>', """def foo():\n pass\nfoo()\n"""],
[fname, fcontents],
]
for fname, code in sample_code:
co1 = compile(code, '%s1' % fname, 'exec')
ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast) == _ast.Module)
co2 = compile(ast, '%s3' % fname, 'exec')
self.assertEqual(co1, co2)
# the code object's filename comes from the second compilation step
self.assertEqual(co2.co_filename, '%s3' % fname)
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
self.assertRaises(TypeError, compile, co1, '<ast>', 'eval')
# raise exception when node type is no start node
self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec')
# raise exception when node has invalid children
ast = _ast.Module()
ast.body = [_ast.BoolOp()]
self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
def test_dict_evaluation_order(self):
i = 0
def f():
nonlocal i
i += 1
return i
d = {f(): f(), f(): f()}
self.assertEqual(d, {1: 2, 3: 4})
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
c = compile(s, "myfile", "exec")
for obj in c.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(obj.co_filename, c.co_filename)
def test_single_statement(self):
self.compile_single("1 + 2")
self.compile_single("\n1 + 2")
self.compile_single("1 + 2\n")
self.compile_single("1 + 2\n\n")
self.compile_single("1 + 2\t\t\n")
self.compile_single("1 + 2\t\t\n ")
self.compile_single("1 + 2 # one plus two")
self.compile_single("1; 2")
self.compile_single("import sys; sys")
self.compile_single("def f():\n pass")
self.compile_single("while False:\n pass")
self.compile_single("if x:\n f(x)")
self.compile_single("if x:\n f(x)\nelse:\n g(x)")
self.compile_single("class T:\n pass")
def test_bad_single_statement(self):
self.assertInvalidSingle('1\n2')
self.assertInvalidSingle('def f(): pass')
self.assertInvalidSingle('a = 13\nb = 187')
self.assertInvalidSingle('del x\ndel y')
self.assertInvalidSingle('f()\ng()')
self.assertInvalidSingle('f()\n# blah\nblah()')
self.assertInvalidSingle('f()\nxy # blah\nblah()')
self.assertInvalidSingle('x = 5 # comment\nx = 6\n')
def test_particularly_evil_undecodable(self):
# Issue 24022
src = b'0000\x00\n00000000000\n\x00\n\x9e\n'
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
@support.cpython_only
def test_compiler_recursion_limit(self):
# Expected limit is sys.getrecursionlimit() * the scaling factor
# in symtable.c (currently 3)
# We expect to fail *at* that limit, because we use up some of
# the stack depth limit in the test suite code
# So we check the expected limit and 75% of that
# XXX (ncoghlan): duplicating the scaling factor here is a little
# ugly. Perhaps it should be exposed somewhere...
fail_depth = sys.getrecursionlimit() * 3
success_depth = int(fail_depth * 0.75)
def check_limit(prefix, repeated):
expect_ok = prefix + repeated * success_depth
self.compile_single(expect_ok)
broken = prefix + repeated * fail_depth
details = "Compiling ({!r} + {!r} * {})".format(
prefix, repeated, fail_depth)
with self.assertRaises(RuntimeError, msg=details):
self.compile_single(broken)
check_limit("a", "()")
check_limit("a", ".b")
check_limit("a", "[0]")
check_limit("a", "*a")
class TestStackSize(unittest.TestCase):
# These tests check that the computed stack size for a code object
# stays within reasonable bounds (see issue #21523 for an example
# dysfunction).
N = 100
def check_stack_size(self, code):
# To assert that the alleged stack size is not O(N), we
# check that it is smaller than log(N).
if isinstance(code, str):
code = compile(code, "<foo>", "single")
max_size = math.ceil(math.log(len(code.co_code)))
self.assertLessEqual(code.co_stacksize, max_size)
def test_and(self):
self.check_stack_size("x and " * self.N + "x")
def test_or(self):
self.check_stack_size("x or " * self.N + "x")
def test_and_or(self):
self.check_stack_size("x and x or " * self.N + "x")
def test_chained_comparison(self):
self.check_stack_size("x < " * self.N + "x")
def test_if_else(self):
self.check_stack_size("x if x else " * self.N + "x")
def test_binop(self):
self.check_stack_size("x + " * self.N + "x")
def test_func_and(self):
code = "def f(x):\n"
code += " x and x\n" * self.N
self.check_stack_size(code)
if __name__ == "__main__":
unittest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import context
from rally import consts
from rally import osclients
import uuid
LOG = logging.getLogger(__name__)
class BrowbeatGnocchi(scenario.OpenStackScenario):
@scenario.configure(name='BrowbeatGnocchi.archive_policy_list')
def archive_policy_list(self):
"""List archive policies from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._archive_policy_list(gnocchi_client)
@scenario.configure(name='BrowbeatGnocchi.archive_policy_rule_list')
def archive_policy_rule_list(self):
"""List archive policy rules from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._archive_policy_rule_list(gnocchi_client)
@scenario.configure(name='BrowbeatGnocchi.capabilities_list')
def capabilities_list(self):
"""List capabilities from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._capabilities_list(gnocchi_client)
@scenario.configure(name='BrowbeatGnocchi.create_archive_policy')
def create_archive_policy(self):
"""Create archive policy from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
name = self.generate_random_name()
definition = [{'granularity': '0:00:01', 'timespan': '1:00:00'}]
aggregation_methods = ['std', 'count', '95pct', 'min', 'max', 'sum', 'median', 'mean']
self._create_archive_policy(gnocchi_client, name, definition, aggregation_methods)
@scenario.configure(name='BrowbeatGnocchi.create_delete_archive_policy')
def create_delete_archive_policy(self):
"""Create archive policy from Gnocchi client and then delete it."""
gnocchi_client = self.admin_clients("gnocchi")
name = self.generate_random_name()
definition = [{'granularity': '0:00:01', 'timespan': '1:00:00'}]
aggregation_methods = ['std', 'count', '95pct', 'min', 'max', 'sum', 'median', 'mean']
self._create_archive_policy(gnocchi_client, name, definition, aggregation_methods)
self._delete_archive_policy(gnocchi_client, name)
@scenario.configure(name='BrowbeatGnocchi.create_archive_policy_rule')
def create_archive_policy_rule(self):
"""Create archive policy rule from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
name = self.generate_random_name()
metric_pattern = 'cpu_*'
archive_policy_name = 'low'
self._create_archive_policy_rule(gnocchi_client, name, metric_pattern, archive_policy_name)
@scenario.configure(name='BrowbeatGnocchi.create_delete_archive_policy_rule')
def create_delete_archive_policy_rule(self):
"""Create archive policy rule from Gnocchi client and then delete it."""
gnocchi_client = self.admin_clients("gnocchi")
name = self.generate_random_name()
metric_pattern = 'cpu_*'
archive_policy_name = 'low'
self._create_archive_policy_rule(gnocchi_client, name, metric_pattern, archive_policy_name)
self._delete_archive_policy_rule(gnocchi_client, name)
@scenario.configure(name='BrowbeatGnocchi.create_metric')
def create_metric(self, metric_name=None, archive_policy_name=None, unit=None,
resource_id=None):
"""Create metric from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._create_metric(gnocchi_client, metric_name, archive_policy_name, unit, resource_id)
@scenario.configure(name='BrowbeatGnocchi.create_delete_metric')
def create_delete_metric(self, metric_name=None, archive_policy_name=None, unit=None,
resource_id=None):
"""Create metric from Gnocchi client and then delete it."""
gnocchi_client = self.admin_clients("gnocchi")
metric = self._create_metric(gnocchi_client, metric_name, archive_policy_name, unit,
resource_id)
self._delete_metric(gnocchi_client, metric['id'])
@scenario.configure(name='BrowbeatGnocchi.create_resource')
def create_resource(self, resource_type):
"""Create resource from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._create_resource(gnocchi_client, resource_type)
@scenario.configure(name='BrowbeatGnocchi.create_delete_resource')
def create_delete_resource(self, resource_type):
"""Create resource from Gnocchi client and then delete it."""
gnocchi_client = self.admin_clients("gnocchi")
resource = self._create_resource(gnocchi_client, resource_type)
self._delete_resource(gnocchi_client, resource['id'])
@scenario.configure(name='BrowbeatGnocchi.create_resource_type')
def create_resource_type(self):
"""Create resource type from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._create_resource_type(gnocchi_client, self.generate_random_name())
@scenario.configure(name='BrowbeatGnocchi.create_delete_resource_type')
def create_delete_resource_type(self):
"""Create resource type from Gnocchi client and then delete it."""
gnocchi_client = self.admin_clients("gnocchi")
resource_type = self._create_resource_type(gnocchi_client, self.generate_random_name())
self._delete_resource_type(gnocchi_client, resource_type['name'])
@scenario.configure(name='BrowbeatGnocchi.metric_aggregation')
def metric_aggregation(self, aggregation=None, refresh=False):
"""Get aggregation of metrics from Gnocchi client. The list of metrics to aggregate from
is determined through a context before the scenario starts.
"""
gnocchi_client = self.admin_clients("gnocchi")
metric_index = self.context['iteration'] % len(self.context['metric_ids'])
self._metric_aggregation(gnocchi_client, [self.context['metric_ids'][metric_index]],
aggregation, refresh)
@scenario.configure(name='BrowbeatGnocchi.metric_get_measures')
def metric_get_measures(self, aggregation=None, refresh=False):
"""Get measures from a metric from Gnocchi client. The list of metrics to get measures
from is determined through a context before the scenario starts.
"""
gnocchi_client = self.admin_clients("gnocchi")
metric_index = self.context['iteration'] % len(self.context['metric_ids'])
self._metric_get_measures(gnocchi_client, self.context['metric_ids'][metric_index],
aggregation, refresh)
@scenario.configure(name='BrowbeatGnocchi.metric_list')
def metric_list(self):
"""List metrics from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._metric_list(gnocchi_client)
@scenario.configure(name='BrowbeatGnocchi.resource_list')
def resource_list(self):
"""List resources from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._resource_list(gnocchi_client)
@scenario.configure(name='BrowbeatGnocchi.resource_type_list')
def resource_type_list(self):
"""List resource types from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._resource_type_list(gnocchi_client)
@scenario.configure(name='BrowbeatGnocchi.status_get')
def status_get(self, detailed):
"""Get status of Gnocchi from Gnocchi client."""
gnocchi_client = self.admin_clients("gnocchi")
self._status_get(gnocchi_client, detailed)
@atomic.action_timer("gnocchi.archive_policy_list")
def _archive_policy_list(self, gnocchi_client):
return gnocchi_client.archive_policy.list()
@atomic.action_timer("gnocchi.archive_policy_rule_list")
def _archive_policy_rule_list(self, gnocchi_client):
return gnocchi_client.archive_policy_rule.list()
@atomic.action_timer("gnocchi.capabilities_list")
def _capabilities_list(self, gnocchi_client):
return gnocchi_client.capabilities.list()
@atomic.action_timer("gnocchi.archive_policy_create")
def _create_archive_policy(self, gnocchi_client, name, definition, aggregation_methods):
archive_policy = {}
archive_policy['name'] = name
archive_policy['definition'] = definition
archive_policy['aggregation_methods'] = aggregation_methods
return gnocchi_client.archive_policy.create(archive_policy)
@atomic.action_timer("gnocchi.archive_policy_rule_create")
def _create_archive_policy_rule(self, gnocchi_client, name, metric_pattern,
archive_policy_name):
archive_policy_rule = {}
archive_policy_rule['name'] = name
archive_policy_rule['metric_pattern'] = metric_pattern
archive_policy_rule['archive_policy_name'] = archive_policy_name
return gnocchi_client.archive_policy_rule.create(archive_policy_rule)
@atomic.action_timer("gnocchi.metric_create")
def _create_metric(self, gnocchi_client, name=None, archive_policy_name=None, unit=None,
resource_id=None):
metric = {}
if name:
metric['name'] = name
if archive_policy_name:
metric['archive_policy_name'] = archive_policy_name
if unit:
metric['unit'] = unit
if resource_id:
metric['resource_id'] = resource_id
return gnocchi_client.metric.create(metric)
@atomic.action_timer("gnocchi.resource_create")
def _create_resource(self, gnocchi_client, resource_type='generic'):
resource = {}
resource['id'] = str(uuid.uuid4())
return gnocchi_client.resource.create(resource_type, resource)
@atomic.action_timer("gnocchi.resource_type_create")
def _create_resource_type(self, gnocchi_client, name):
resource_type = {}
resource_type['name'] = name
return gnocchi_client.resource_type.create(resource_type)
@atomic.action_timer("gnocchi.archive_policy_delete")
def _delete_archive_policy(self, gnocchi_client, archive_policy_name):
return gnocchi_client.archive_policy.delete(archive_policy_name)
@atomic.action_timer("gnocchi.archive_policy_rule_delete")
def _delete_archive_policy_rule(self, gnocchi_client, archive_policy_rule_name):
return gnocchi_client.archive_policy_rule.delete(archive_policy_rule_name)
@atomic.action_timer("gnocchi.metric_delete")
def _delete_metric(self, gnocchi_client, metric_id):
return gnocchi_client.metric.delete(metric_id)
@atomic.action_timer("gnocchi.resource_delete")
def _delete_resource(self, gnocchi_client, resource_id):
return gnocchi_client.resource.delete(resource_id)
@atomic.action_timer("gnocchi._delete_resource_type")
def _delete_resource_type(self, gnocchi_client, resource_name):
return gnocchi_client.resource_type.delete(resource_name)
@atomic.action_timer("gnocchi._metric_aggregation")
def _metric_aggregation(self, gnocchi_client, metric_ids, aggregation, refresh):
return gnocchi_client.metric.aggregation(metrics=metric_ids, aggregation=aggregation,
refresh=refresh)
@atomic.action_timer("gnocchi.metric_get_measures")
def _metric_get_measures(self, gnocchi_client, metric_id, aggregation, refresh):
return gnocchi_client.metric.get_measures(metric=metric_id, aggregation=aggregation,
refresh=refresh)
@atomic.action_timer("gnocchi.metric_list")
def _metric_list(self, gnocchi_client):
return gnocchi_client.metric.list()
@atomic.action_timer("gnocchi.resource_list")
def _resource_list(self, gnocchi_client):
return gnocchi_client.resource.list()
@atomic.action_timer("gnocchi.resource_type_list")
def _resource_type_list(self, gnocchi_client):
return gnocchi_client.resource_type.list()
@atomic.action_timer("gnocchi.status_get")
def _status_get(self, gnocchi_client, detailed=False):
return gnocchi_client.status.get(detailed)
@context.configure(name="browbeat_gnocchi_metric_list", order=350)
class BrowbeatGnocchiMetricList(context.Context):
"""Grabs list of metric ids from Gnocchi for use with getting aggregates/measures."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"all": {
"type": "boolean",
}
}
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `browbeat_gnocchi_metric_list`"))
def setup(self):
gnocchi_client = osclients.Clients(self.context["admin"]["credential"]).gnocchi()
if self.config.get('all'):
metric_list = gnocchi_client.metric.list()
self.context['metric_ids'] = [x['id'] for x in metric_list]
while len(metric_list) >= 1000:
metric_list = gnocchi_client.metric.list(marker=metric_list[-1]['id'])
self.context['metric_ids'].extend([x['id'] for x in metric_list])
else:
self.context['metric_ids'] = [x['id'] for x in gnocchi_client.metric.list()]
LOG.debug('Total metric_ids: {}'.format(len(self.context['metric_ids'])))
@logging.log_task_wrapper(LOG.info, _("Exit context: `browbeat_gnocchi_metric_list`"))
def cleanup(self):
pass
|
|
#!/usr/bin/env python
# Cloudeebus
#
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Luc Yriarte <[email protected]>
# Christophe Guiraud <[email protected]>
# Frederic Paut <[email protected]>
#
import argparse, dbus, json, sys
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, WampCraServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
###############################################################################
from cloudeebusengine import VERSION, SERVICELIST, CloudeebusService, cache
import cloudeebusengine
OPENDOOR = False
CREDENTIALS = {}
WHITELIST = []
NETMASK = []
###############################################################################
def ipV4ToHex(mask):
## Convert an ip or an IP mask (such as ip/24 or ip/255.255.255.0) in hex value (32bits)
maskHex = 0
byte = 0
if mask.rfind(".") == -1:
if (int(mask) < 32):
maskHex = (2**(int(mask))-1)
maskHex = maskHex << (32-int(mask))
else:
raise Exception("Illegal mask (larger than 32 bits) " + mask)
else:
maskField = mask.split(".")
# Check if mask has four fields (byte)
if len(maskField) != 4:
raise Exception("Illegal ip address / mask (should be 4 bytes) " + mask)
for maskQuartet in maskField:
byte = int(maskQuartet)
# Check if each field is really a byte
if byte > 255:
raise Exception("Illegal ip address / mask (digit larger than a byte) " + mask)
maskHex += byte
maskHex = maskHex << 8
maskHex = maskHex >> 8
return maskHex
###############################################################################
class CloudeebusServerProtocol(WampCraServerProtocol):
'''
connexion and session authentication management
'''
def onSessionOpen(self):
# CRA authentication options
self.clientAuthTimeout = 0
self.clientAuthAllowAnonymous = OPENDOOR
# CRA authentication init
WampCraServerProtocol.onSessionOpen(self)
def getAuthPermissions(self, key, extra):
return {'permissions': extra.get("permissions", None),
'authextra': extra.get("authextra", None),
'services': extra.get("services", None)}
def getAuthSecret(self, key):
secret = CREDENTIALS.get(key, None)
if secret is None:
return None
# secret must be of str type to be hashed
return str(secret)
def onAuthenticated(self, key, permissions):
if not OPENDOOR:
# check net filter
if NETMASK != []:
ipAllowed = False
for netfilter in NETMASK:
ipHex=ipV4ToHex(self.peer.host)
ipAllowed = (ipHex & netfilter['mask']) == netfilter['ipAllowed'] & netfilter['mask']
if ipAllowed:
break
if not ipAllowed:
raise Exception("host " + self.peer.host + " is not allowed!")
# check authentication key
if key is None:
raise Exception("Authentication failed")
# check permissions, array.index throws exception
if (permissions['permissions'] != None):
for req in permissions['permissions']:
WHITELIST.index(req);
# check allowed service creation, array.index throws exception
if (permissions['services'] != None):
for req in permissions['services']:
SERVICELIST.index(req);
# create cloudeebus service instance
self.cloudeebusService = CloudeebusService(permissions)
# register it for RPC
self.registerForRpc(self.cloudeebusService)
# register for Publish / Subscribe
self.registerForPubSub("", True)
def connectionLost(self, reason):
WampCraServerProtocol.connectionLost(self, reason)
if factory.getConnectionCount() == 0:
cache.reset()
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Javascript DBus bridge.')
parser.add_argument('-v', '--version', action='store_true',
help='print version and exit')
parser.add_argument('-d', '--debug', action='store_true',
help='log debug info on standard output')
parser.add_argument('-o', '--opendoor', action='store_true',
help='allow anonymous access to all services')
parser.add_argument('-p', '--port', default='9000',
help='port number')
parser.add_argument('-c', '--credentials',
help='path to credentials file')
parser.add_argument('-w', '--whitelist',
help='path to whitelist file (DBus services to use)')
parser.add_argument('-s', '--servicelist',
help='path to servicelist file (DBus services to export)')
parser.add_argument('-n', '--netmask',
help='netmask,IP filter (comma separated.) eg. : -n 127.0.0.1,192.168.2.0/24,10.12.16.0/255.255.255.0')
args = parser.parse_args(sys.argv[1:])
if args.version:
print("Cloudeebus version " + VERSION)
exit(0)
if args.debug:
log.startLogging(sys.stdout)
OPENDOOR = args.opendoor
if args.credentials:
jfile = open(args.credentials)
CREDENTIALS = json.load(jfile)
jfile.close()
if args.whitelist:
jfile = open(args.whitelist)
WHITELIST.extend(json.load(jfile))
jfile.close()
if args.servicelist:
jfile = open(args.servicelist)
SERVICELIST.extend(json.load(jfile))
jfile.close()
if args.netmask:
iplist = args.netmask.split(",")
for ip in iplist:
if ip.rfind("/") != -1:
ip=ip.split("/")
ipAllowed = ip[0]
mask = ip[1]
else:
ipAllowed = ip
mask = "255.255.255.255"
NETMASK.append( {'ipAllowed': ipV4ToHex(ipAllowed), 'mask' : ipV4ToHex(mask)} )
uri = "ws://localhost:" + args.port
factory = WampServerFactory(uri, debugWamp = args.debug)
factory.protocol = CloudeebusServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
# Configure cloudeebus engine for WAMP.
cloudeebusengine.factory = factory
cloudeebusengine.OPENDOOR = OPENDOOR
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
|
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1 , ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd , ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd , lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1 , 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
desired = np.iinfo('l').max
np.testing.assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[ 1.47145377828516666, 0.15052899268012659],
[ 0.00943803056963588, 1.02647251615666169],
[ 0.332334982684171 , 0.15451287602753125]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
np.testing.assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
|
|
from nose.tools import eq_, ok_
from nose.plugins.skip import SkipTest
# Skip test on PY3
from flask_admin._compat import PY2, as_unicode
if not PY2:
raise SkipTest('MongoEngine is not Python 3 compatible')
from wtforms import fields, validators
from flask_admin import form
from flask_admin.contrib.mongoengine import ModelView
from . import setup
from datetime import datetime
class CustomModelView(ModelView):
def __init__(self, model,
name=None, category=None, endpoint=None, url=None,
**kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
super(CustomModelView, self).__init__(model,
name, category,
endpoint, url)
def create_models(db):
class Model1(db.Document):
test1 = db.StringField(max_length=20)
test2 = db.StringField(max_length=20)
test3 = db.StringField()
test4 = db.StringField()
datetime_field = db.DateTimeField()
def __str__(self):
return self.test1
class Model2(db.Document):
string_field = db.StringField()
int_field = db.IntField()
float_field = db.FloatField()
bool_field = db.BooleanField()
model1 = db.ReferenceField(Model1)
Model1.objects.delete()
Model2.objects.delete()
return Model1, Model2
def fill_db(Model1, Model2):
Model1('test1_val_1', 'test2_val_1').save()
Model1('test1_val_2', 'test2_val_2').save()
Model1('test1_val_3', 'test2_val_3').save()
Model1('test1_val_4', 'test2_val_4').save()
Model1(None, 'empty_obj').save()
Model2('string_field_val_1', None, None, True).save()
Model2('string_field_val_2', None, None, False).save()
Model2('string_field_val_3', 5000, 25.9).save()
Model2('string_field_val_4', 9000, 75.5).save()
Model2('string_field_val_5', 6169453081680413441).save()
Model1('datetime_obj1', datetime_field=datetime(2014,4,3,1,9,0)).save()
Model1('datetime_obj2', datetime_field=datetime(2013,3,2,0,8,0)).save()
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1')
eq_(view._primary_key, 'id')
ok_('test1' in view._sortable_columns)
ok_('test2' in view._sortable_columns)
ok_('test3' in view._sortable_columns)
ok_('test4' in view._sortable_columns)
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view._search_supported, False)
eq_(view._filters, None)
eq_(view._create_form_class.test1.field_class, fields.StringField)
eq_(view._create_form_class.test2.field_class, fields.StringField)
eq_(view._create_form_class.test3.field_class, fields.TextAreaField)
eq_(view._create_form_class.test4.field_class, fields.TextAreaField)
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model1/new/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
model = Model1.objects.first()
eq_(model.test1, 'test1large')
eq_(model.test2, 'test2')
eq_(model.test3, '')
eq_(model.test4, '')
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
ok_('test1large' in rv.data)
url = '/admin/model1/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
eq_(rv.status_code, 302)
model = Model1.objects.first()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
eq_(model.test3, '')
eq_(model.test4, '')
url = '/admin/model1/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(Model1.objects.count(), 0)
def test_column_editable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1,
column_editable_list=[
'test1', 'datetime_field'])
admin.add_view(view)
fill_db(Model1, Model2)
client = app.test_client()
# Test in-line edit field rendering
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data-role="x-editable"' in data)
# Form - Test basic in-line edit functionality
obj1 = Model1.objects.get(test1 = 'test1_val_3')
rv = client.post('/admin/model1/ajax/update/', data={
'test1-' + str(obj1.id): 'change-success-1',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('change-success-1' in data)
# Test validation error
obj2 = Model1.objects.get(test1 = 'datetime_obj1')
rv = client.post('/admin/model1/ajax/update/', data={
'datetime_field-' + str(obj2.id): 'problematic-input',
})
eq_(rv.status_code, 500)
# Test invalid primary key
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1000': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test editing column not in column_editable_list
rv = client.post('/admin/model1/ajax/update/', data={
'test2-1': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test in-line editing for relations
view = CustomModelView(Model2,
column_editable_list=[
'model1'])
admin.add_view(view)
obj3 = Model2.objects.get(string_field = 'string_field_val_1')
rv = client.post('/admin/model2/ajax/update/', data={
'model1-' + str(obj3.id): str(obj1.id),
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
def test_details_view():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view_no_details = CustomModelView(Model1)
admin.add_view(view_no_details)
# fields are scaffolded
view_w_details = CustomModelView(Model2, can_view_details=True)
admin.add_view(view_w_details)
# show only specific fields in details w/ column_details_list
string_field_view = CustomModelView(Model2, can_view_details=True,
column_details_list=["string_field"],
endpoint="sf_view")
admin.add_view(string_field_view)
fill_db(Model1, Model2)
client = app.test_client()
m1_id = Model1.objects.first().id
m2_id = Model2.objects.first().id
# ensure link to details is hidden when can_view_details is disabled
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('/admin/model1/details/' not in data)
# ensure link to details view appears
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
ok_('/admin/model2/details/' in data)
# test redirection when details are disabled
url = '/admin/model1/details/?url=%2Fadmin%2Fmodel1%2F&id=' + str(m1_id)
rv = client.get(url)
eq_(rv.status_code, 302)
# test if correct data appears in details view when enabled
url = '/admin/model2/details/?url=%2Fadmin%2Fmodel2%2F&id=' + str(m2_id)
rv = client.get(url)
data = rv.data.decode('utf-8')
ok_('String Field' in data)
ok_('string_field_val_1' in data)
ok_('Int Field' in data)
# test column_details_list
url = '/admin/sf_view/details/?url=%2Fadmin%2Fsf_view%2F&id=' + str(m2_id)
rv = client.get(url)
data = rv.data.decode('utf-8')
ok_('String Field' in data)
ok_('string_field_val_1' in data)
ok_('Int Field' not in data)
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
# fill DB with values
fill_db(Model1, Model2)
# Test string filter
view = CustomModelView(Model1, column_filters=['test1'])
admin.add_view(view)
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'contains'),
(1, 'not contains'),
(2, 'equals'),
(3, 'not equal'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# Make some test clients
client = app.test_client()
# string - equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/model1/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/model1/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/model1/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/model1/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/model1/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/model1/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/model1/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test numeric filter
view = CustomModelView(Model2, column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# integer - equals (huge number)
rv = client.get('/admin/model2/?flt0_0=6169453081680413441')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_5' in data)
ok_('string_field_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# integer - in list (huge number)
rv = client.get('/admin/model2/?flt0_5=6169453081680413441')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_5' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# Test boolean filter
view = CustomModelView(Model2, column_filters=['bool_field'],
endpoint="_bools")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Bool Field']],
[
(0, 'equals'),
(1, 'not equal'),
])
# boolean - equals - Yes
rv = client.get('/admin/_bools/?flt0_0=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' not in data)
#ok_('string_field_val_3' not in data)
# boolean - equals - No
rv = client.get('/admin/_bools/?flt0_0=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' in data)
#ok_('string_field_val_3' in data)
# boolean - not equals - Yes
rv = client.get('/admin/_bools/?flt0_1=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' in data)
#ok_('string_field_val_3' in data)
# boolean - not equals - No
rv = client.get('/admin/_bools/?flt0_1=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' not in data)
#ok_('string_field_val_3' not in data)
# Test float filter
view = CustomModelView(Model2, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# Test datetime filter
view = CustomModelView(Model1,
column_filters=['datetime_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_4=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
def test_default_sort():
app, db, admin = setup()
M1, _ = create_models(db)
M1(test1='c').save()
M1(test1='b').save()
M1(test1='a').save()
eq_(M1.objects.count(), 3)
view = CustomModelView(M1, column_default_sort='test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(data[0].test1, 'a')
eq_(data[1].test1, 'b')
eq_(data[2].test1, 'c')
def test_extra_fields():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
ok_('Extra Field' in data)
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 < pos1)
def test_extra_field_order():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
ok_('Extra Field' in data)
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 < pos1)
def test_custom_form_base():
app, db, admin = setup()
class TestForm(form.BaseForm):
pass
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_base_class=TestForm
)
admin.add_view(view)
ok_(hasattr(view._create_form_class, 'test1'))
create_form = view.create_form()
ok_(isinstance(create_form, TestForm))
def test_subdocument_config():
app, db, admin = setup()
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.EmbeddedDocumentField(Comment)
# Check only
view1 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': {
'form_columns': ('name',)
}
}
)
ok_(hasattr(view1._create_form_class, 'subdoc'))
form = view1.create_form()
ok_('name' in dir(form.subdoc.form))
ok_('value' not in dir(form.subdoc.form))
# Check exclude
view2 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': {
'form_excluded_columns': ('value',)
}
}
)
form = view2.create_form()
ok_('name' in dir(form.subdoc.form))
ok_('value' not in dir(form.subdoc.form))
def test_subdocument_class_config():
app, db, admin = setup()
from flask_admin.contrib.mongoengine import EmbeddedForm
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.EmbeddedDocumentField(Comment)
class EmbeddedConfig(EmbeddedForm):
form_columns = ('name',)
# Check only
view1 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': EmbeddedConfig()
}
)
form = view1.create_form()
ok_('name' in dir(form.subdoc.form))
ok_('value' not in dir(form.subdoc.form))
def test_nested_subdocument_config():
app, db, admin = setup()
# Check recursive
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Nested(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
comment = db.EmbeddedDocumentField(Comment)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
nested = db.EmbeddedDocumentField(Nested)
view1 = CustomModelView(
Model1,
form_subdocuments = {
'nested': {
'form_subdocuments': {
'comment': {
'form_columns': ('name',)
}
}
}
}
)
form = view1.create_form()
ok_('name' in dir(form.nested.form.comment.form))
ok_('value' not in dir(form.nested.form.comment.form))
def test_nested_list_subdocument():
app, db, admin = setup()
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.ListField(db.EmbeddedDocumentField(Comment))
# Check only
view1 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
)
form = view1.create_form()
inline_form = form.subdoc.unbound_field.args[2]
ok_('name' in dir(inline_form))
ok_('value' not in dir(inline_form))
def test_list_subdocument_validation():
app, db, admin = setup()
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.ListField(db.EmbeddedDocumentField(Comment))
view = CustomModelView(Model1)
admin.add_view(view)
client = app.test_client()
rv = client.post('/admin/model1/new/',
data={'test1': 'test1large', 'subdoc-0-name': 'comment', 'subdoc-0-value': 'test'})
eq_(rv.status_code, 302)
rv = client.post('/admin/model1/new/',
data={'test1': 'test1large', 'subdoc-0-name': '', 'subdoc-0-value': 'test'})
eq_(rv.status_code, 200)
ok_('This field is required' in rv.data)
def test_ajax_fk():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model2,
url='view',
form_ajax_refs={
'model1': {
'fields': ('test1', 'test2')
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(test1=u'first')
model.save()
model2 = Model1(test1=u'foo', test2=u'bar').save()
# Check loader
loader = view._form_ajax_refs[u'model1']
mdl = loader.get_one(model.id)
eq_(mdl.test1, model.test1)
items = loader.get_list(u'fir')
eq_(len(items), 1)
eq_(items[0].id, model.id)
items = loader.get_list(u'bar')
eq_(len(items), 1)
eq_(items[0].test1, u'foo')
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectField')
with app.test_request_context('/admin/view/'):
ok_(u'value=""' not in form.model1())
form.model1.data = model
needle = u'data-json="["%s", "first"]"' % as_unicode(model.id)
ok_(needle in form.model1())
ok_(u'value="%s"' % as_unicode(model.id) in form.model1())
# Check querying
client = app.test_client()
req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo')
eq_(req.data, u'[["%s", "foo"]]' % model2.id)
# Check submitting
client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = Model2.objects.first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(mdl.model1.id, model.id)
eq_(mdl.model1.test1, u'first')
def test_nested_ajax_refs():
app, db, admin = setup()
# Check recursive
class Comment(db.Document):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Nested(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
comment = db.ReferenceField(Comment)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
nested = db.EmbeddedDocumentField(Nested)
view1 = CustomModelView(
Model1,
form_subdocuments = {
'nested': {
'form_ajax_refs': {
'comment': {
'fields': ['name']
}
}
}
}
)
form = view1.create_form()
eq_(type(form.nested.form.comment).__name__, 'AjaxSelectField')
ok_('nested-comment' in view1._form_ajax_refs)
def test_form_flat_choices():
app, db, admin = setup()
class Model(db.Document):
name = db.StringField(max_length=20, choices=('a', 'b', 'c'))
view = CustomModelView(Model)
admin.add_view(view)
form = view.create_form()
eq_(form.name.choices, [('a', 'a'), ('b', 'b'), ('c', 'c')])
def test_form_args():
app, db, admin = setup()
class Model(db.Document):
test = db.StringField(required=True)
shared_form_args = {'test': {'validators': [validators.Regexp('test')]}}
view = CustomModelView(Model, form_args=shared_form_args)
admin.add_view(view)
# ensure shared field_args don't create duplicate validators
create_form = view.create_form()
eq_(len(create_form.test.validators), 2)
edit_form = view.edit_form()
eq_(len(edit_form.test.validators), 2)
def test_form_args_embeddeddoc():
app, db, admin = setup()
class Info(db.EmbeddedDocument):
name = db.StringField()
age = db.StringField()
class Model(db.Document):
info = db.EmbeddedDocumentField('Info')
timestamp = db.DateTimeField()
view = CustomModelView(
Model,
form_args= {
'info': {'label': 'Information'},
'timestamp': {'label': 'Last Updated Time'}
}
)
admin.add_view(view)
form = view.create_form()
eq_(form.timestamp.label.text, 'Last Updated Time')
# This is the failure
eq_(form.info.label.text, 'Information')
def test_simple_list_pager():
app, db, admin = setup()
Model1, _ = create_models(db)
class TestModelView(CustomModelView):
simple_list_pager = True
def get_count_query(self):
assert False
view = TestModelView(Model1)
admin.add_view(view)
count, data = view.get_list(0, None, None, None, None)
ok_(count is None)
def test_export_csv():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, can_export=True,
column_list=['test1', 'test2'], export_max_rows=2,
endpoint='row_limit_2')
admin.add_view(view)
for x in range(5):
fill_db(Model1, Model2)
client = app.test_client()
# test export_max_rows
rv = client.get('/admin/row_limit_2/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Test1,Test2\r\n"
"test1_val_1,test2_val_1\r\n"
"test1_val_2,test2_val_2\r\n" == data)
view = CustomModelView(Model1, can_export=True,
column_list=['test1', 'test2'],
endpoint='no_row_limit')
admin.add_view(view)
# test row limit without export_max_rows
rv = client.get('/admin/no_row_limit/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_(len(data.splitlines()) > 21)
|
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for dnsrmview
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import os
import sys
import socket
import threading
import time
import getpass
import unittest
import roster_core
import roster_server
from roster_user_tools import roster_client_lib
USER_CONFIG = 'test_data/roster_user_tools.conf'
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
HOST = u'localhost'
USERNAME = u'sharrell'
PASSWORD = u'test'
KEYFILE=('test_data/dnsmgmt.key.pem')
CERTFILE=('test_data/dnsmgmt.cert.pem')
CREDFILE='%s/.dnscred' % os.getcwd()
EXEC='../roster-user-tools/scripts/dnsrmview'
class options(object):
password = u'test'
username = u'sharrell'
server = None
ldap = u'ldaps://ldap.cs.university.edu:636'
credfile = CREDFILE
view_name = None
ip_address = None
target = u'machine1'
ttl = 64
class DaemonThread(threading.Thread):
def __init__(self, config_instance, port):
threading.Thread.__init__(self)
self.config_instance = config_instance
self.port = port
self.daemon_instance = None
def run(self):
self.daemon_instance = roster_server.Server(self.config_instance, KEYFILE,
CERTFILE)
self.daemon_instance.Serve(port=self.port)
class Testdnsmkview(unittest.TestCase):
def setUp(self):
def PickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, 0))
addr, port = s.getsockname()
s.close()
return port
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.port = PickUnusedPort()
self.server_name = 'https://%s:%s' % (HOST, self.port)
self.daemon_thread = DaemonThread(self.config_instance, self.port)
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.core_instance = roster_core.Core(USERNAME, self.config_instance)
self.password = 'test'
time.sleep(1)
roster_client_lib.GetCredentials(USERNAME, u'test', credfile=CREDFILE,
server_name=self.server_name)
def tearDown(self):
if( os.path.exists(CREDFILE) ):
os.remove(CREDFILE)
def testMakeView(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeDnsServerSet(u'test_set')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view', 0, u'test_set')
self.core_instance.MakeViewToACLAssignments(u'test_view', u'test_set', u'acl1', 1)
command = os.popen('python %s view -v test_view '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'REMOVED VIEW view_name: view_name: test_view\n')
command.close()
self.assertEqual(self.core_instance.ListViewToACLAssignments(), [])
def testMakeViewAclAssignment(self):
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeDnsServerSet(u'test_set')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view', 0, u'test_set')
self.core_instance.MakeViewToACLAssignments(u'test_view', u'test_set', u'acl1', 1)
command = os.popen('python %s acl -v test_view -a acl1 -e test_set '
'-c %s -u %s -p %s --config-file %s -s %s --allow' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'REMOVED VIEW TO ACL ASSIGNMENT: view_name: test_view acl_name: acl1\n')
command.close()
def testMakeViewAssignment(self):
command = os.popen('python %s view_subset -v test_view -V test_view2 '
'-u %s -p %s --config-file %s -s %s' % (
EXEC, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: View "test_view" does not exist.\n')
command.close()
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeView(u'test_view2')
self.core_instance.MakeViewAssignment(u'test_view', u'test_view2')
command = os.popen('python %s view_subset -v test_view -V test_view2 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'REMOVED VIEW ASSIGNMENT: view_name: test_view '
'view_subset: test_view2\n')
command.close()
def testMakeDnsServerSetAssignment(self):
self.core_instance.MakeACL(u'outside', u'192.168.1.0/24')
self.core_instance.MakeDnsServerSet(u'set2')
command = os.popen('python %s dns_server_set -v test_view -e set2 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: View "test_view" does not exist.\n')
command.close()
self.core_instance.MakeView(u'test_view')
command = os.popen('python %s dns_server_set -v test_view -e set1 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: Dns Server Set "set1" does not exist.\n')
command.close()
self.core_instance.MakeDnsServerSet(u'set1')
command = os.popen('python %s dns_server_set -v test_view -e set1 '
'-c %s -u %s -p %s '
'--config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'REMOVED DNS SERVER SET VIEW ASSIGNMENT: view_name: '
'test_view dns_server_set: set1\n')
command.close()
def testErrors(self):
self.core_instance.MakeDnsServerSet(u'set1')
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeView(u'test_view2')
command = os.popen('python %s dns_server_set -v test_view -e set1 '
'-V test_view2 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: The -V/--view-dep flag cannot be used with the '
'dns_server_set command.\n')
command.close()
command = os.popen('python %s view_subset -V test_view2 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: The -v/--view-name flag is required.\n')
command.close()
command = os.popen('python %s dns_server_set -e set1 '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'CLIENT ERROR: The -v/--view-name flag is required.\n')
command.close()
self.core_instance.MakeACL(u'acl1', u'192.168.1.0/24')
self.core_instance.MakeDnsServerSet(u'test_set')
self.core_instance.MakeDnsServerSetViewAssignments(u'test_view', 0, u'test_set')
self.core_instance.MakeViewToACLAssignments(u'test_view', u'test_set', u'acl1', 1)
command = os.popen('python %s acl -v test_view -a acl1 -e test_set '
'-c %s -u %s -p %s --config-file %s -s %s' % (
EXEC, CREDFILE, USERNAME, self.password, USER_CONFIG,
self.server_name))
self.assertEqual(command.read(),
'USER ERROR: Either --allow or --deny must be used.\n')
command.close()
if( __name__ == '__main__' ):
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Client-side transfer throttling for use with remote_api_stub.
This module is used to configure rate limiting for programs accessing
AppEngine services through remote_api.
See the Throttle class for more information.
An example with throttling:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext.remote_api import throttle
from myapp import models
import getpass
import threading
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteDatastore('my-app', '/remote_api', auth_func)
full_throttle = throttle.DefaultThrottle(multiplier=1.0)
throttle.ThrottleRemoteDatastore(full_throttle)
# Register any threads that will be using the datastore with the throttler
full_throttle.Register(threading.currentThread())
# Now you can access the remote datastore just as if your code was running on
# App Engine, and you don't need to worry about exceeding quota limits!
houses = models.House.all().fetch(100)
for a_house in houses:
a_house.doors += 1
db.put(houses)
---
This example limits usage to the default free quota levels. The multiplier
kwarg to throttle.DefaultThrottle can be used to scale the throttle levels
higher or lower.
Throttles can also be constructed directly for more control over the limits
for different operations. See the Throttle class and the constants following
it for details.
"""
import functools
import logging
import os
import threading
import time
import urllib2
import urlparse
_HTTPLIB2_AVAILABLE = False
try:
import httplib2
from google.appengine.tools import appengine_rpc_httplib2
_HTTPLIB2_AVAILABLE = True
except ImportError:
pass
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import apiproxy_stub_map
else:
from google.appengine.api import apiproxy_stub_map
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools import appengine_rpc
logger = logging.getLogger('google.appengine.ext.remote_api.throttle')
MINIMUM_THROTTLE_SLEEP_DURATION = 0.001
class Error(Exception):
"""Base class for errors in this module."""
class ThreadNotRegisteredError(Error):
"""An unregistered thread has accessed the throttled datastore stub."""
class UnknownThrottleNameError(Error):
"""A transfer was added for an unknown throttle name."""
def InterruptibleSleep(sleep_time):
"""Puts thread to sleep, checking this threads exit_flag four times a second.
Args:
sleep_time: Time to sleep.
"""
slept = 0.0
epsilon = .0001
thread = threading.currentThread()
while slept < sleep_time - epsilon:
remaining = sleep_time - slept
this_sleep_time = min(remaining, 0.25)
time.sleep(this_sleep_time)
slept += this_sleep_time
if hasattr(thread, 'exit_flag') and thread.exit_flag:
return
class Throttle(object):
"""A base class for upload rate throttling.
Transferring large number of entities, too quickly, could trigger
quota limits and cause the transfer process to halt. In order to
stay within the application's quota, we throttle the data transfer
to a specified limit (across all transfer threads).
This class tracks a moving average of some aspect of the transfer
rate (bandwidth, records per second, http connections per
second). It keeps two windows of counts of bytes transferred, on a
per-thread basis. One block is the "current" block, and the other is
the "prior" block. It will rotate the counts from current to prior
when ROTATE_PERIOD has passed. Thus, the current block will
represent from 0 seconds to ROTATE_PERIOD seconds of activity
(determined by: time.time() - self.last_rotate). The prior block
will always represent a full ROTATE_PERIOD.
Sleeping is performed just before a transfer of another block, and is
based on the counts transferred *before* the next transfer. It really
does not matter how much will be transferred, but only that for all the
data transferred SO FAR that we have interspersed enough pauses to
ensure the aggregate transfer rate is within the specified limit.
These counts are maintained on a per-thread basis, so we do not require
any interlocks around incrementing the counts. There IS an interlock on
the rotation of the counts because we do not want multiple threads to
multiply-rotate the counts.
There are various race conditions in the computation and collection
of these counts. We do not require precise values, but simply to
keep the overall transfer within the bandwidth limits. If a given
pause is a little short, or a little long, then the aggregate delays
will be correct.
"""
ROTATE_PERIOD = 600
def __init__(self,
get_time=time.time,
thread_sleep=InterruptibleSleep,
layout=None):
self.get_time = get_time
self.thread_sleep = thread_sleep
self.start_time = get_time()
self.transferred = {}
self.prior_block = {}
self.totals = {}
self.throttles = {}
self.last_rotate = {}
self.rotate_mutex = {}
if layout:
self.AddThrottles(layout)
def AddThrottle(self, name, limit):
self.throttles[name] = limit
self.transferred[name] = {}
self.prior_block[name] = {}
self.totals[name] = {}
self.last_rotate[name] = self.get_time()
self.rotate_mutex[name] = threading.Lock()
def AddThrottles(self, layout):
for key, value in layout.iteritems():
self.AddThrottle(key, value)
def Register(self, thread):
"""Register this thread with the throttler."""
thread_id = id(thread)
for throttle_name in self.throttles.iterkeys():
self.transferred[throttle_name][thread_id] = 0
self.prior_block[throttle_name][thread_id] = 0
self.totals[throttle_name][thread_id] = 0
def VerifyThrottleName(self, throttle_name):
if throttle_name not in self.throttles:
raise UnknownThrottleNameError('%s is not a registered throttle' %
throttle_name)
def AddTransfer(self, throttle_name, token_count):
"""Add a count to the amount this thread has transferred.
Each time a thread transfers some data, it should call this method to
note the amount sent. The counts may be rotated if sufficient time
has passed since the last rotation.
Args:
throttle_name: The name of the throttle to add to.
token_count: The number to add to the throttle counter.
"""
self.VerifyThrottleName(throttle_name)
transferred = self.transferred[throttle_name]
try:
transferred[id(threading.currentThread())] += token_count
except KeyError:
thread = threading.currentThread()
raise ThreadNotRegisteredError(
'Unregistered thread accessing throttled datastore stub: id = %s\n'
'name = %s' % (id(thread), thread.getName()))
if self.last_rotate[throttle_name] + self.ROTATE_PERIOD < self.get_time():
self._RotateCounts(throttle_name)
def Sleep(self, throttle_name=None):
"""Possibly sleep in order to limit the transfer rate.
Note that we sleep based on *prior* transfers rather than what we
may be about to transfer. The next transfer could put us under/over
and that will be rectified *after* that transfer. Net result is that
the average transfer rate will remain within bounds. Spiky behavior
or uneven rates among the threads could possibly bring the transfer
rate above the requested limit for short durations.
Args:
throttle_name: The name of the throttle to sleep on. If None or
omitted, then sleep on all throttles.
"""
if throttle_name is None:
for throttle_name in self.throttles:
self.Sleep(throttle_name=throttle_name)
return
self.VerifyThrottleName(throttle_name)
thread = threading.currentThread()
while True:
duration = self.get_time() - self.last_rotate[throttle_name]
total = 0
for count in self.prior_block[throttle_name].values():
total += count
if total:
duration += self.ROTATE_PERIOD
for count in self.transferred[throttle_name].values():
total += count
sleep_time = self._SleepTime(total, self.throttles[throttle_name],
duration)
if sleep_time < MINIMUM_THROTTLE_SLEEP_DURATION:
break
logger.debug('[%s] Throttling on %s. Sleeping for %.1f ms '
'(duration=%.1f ms, total=%d)',
thread.getName(), throttle_name,
sleep_time * 1000, duration * 1000, total)
self.thread_sleep(sleep_time)
if thread.exit_flag:
break
self._RotateCounts(throttle_name)
def _SleepTime(self, total, limit, duration):
"""Calculate the time to sleep on a throttle.
Args:
total: The total amount transferred.
limit: The amount per second that is allowed to be sent.
duration: The amount of time taken to send the total.
Returns:
A float for the amount of time to sleep.
"""
if not limit:
return 0.0
return max(0.0, (total / limit) - duration)
def _RotateCounts(self, throttle_name):
"""Rotate the transfer counters.
If sufficient time has passed, then rotate the counters from active to
the prior-block of counts.
This rotation is interlocked to ensure that multiple threads do not
over-rotate the counts.
Args:
throttle_name: The name of the throttle to rotate.
"""
self.VerifyThrottleName(throttle_name)
self.rotate_mutex[throttle_name].acquire()
try:
next_rotate_time = self.last_rotate[throttle_name] + self.ROTATE_PERIOD
if next_rotate_time >= self.get_time():
return
for name, count in self.transferred[throttle_name].items():
self.prior_block[throttle_name][name] = count
self.transferred[throttle_name][name] = 0
self.totals[throttle_name][name] += count
self.last_rotate[throttle_name] = self.get_time()
finally:
self.rotate_mutex[throttle_name].release()
def TotalTransferred(self, throttle_name):
"""Return the total transferred, and over what period.
Args:
throttle_name: The name of the throttle to total.
Returns:
A tuple of the total count and running time for the given throttle name.
"""
total = 0
for count in self.totals[throttle_name].values():
total += count
for count in self.transferred[throttle_name].values():
total += count
return total, self.get_time() - self.start_time
BANDWIDTH_UP = 'http-bandwidth-up'
BANDWIDTH_DOWN = 'http-bandwidth-down'
REQUESTS = 'http-requests'
HTTPS_BANDWIDTH_UP = 'https-bandwidth-up'
HTTPS_BANDWIDTH_DOWN = 'https-bandwidth-down'
HTTPS_REQUESTS = 'https-requests'
DATASTORE_CALL_COUNT = 'datastore-call-count'
ENTITIES_FETCHED = 'entities-fetched'
ENTITIES_MODIFIED = 'entities-modified'
INDEX_MODIFICATIONS = 'index-modifications'
DEFAULT_LIMITS = {
BANDWIDTH_UP: 100000,
BANDWIDTH_DOWN: 100000,
REQUESTS: 15,
HTTPS_BANDWIDTH_UP: 100000,
HTTPS_BANDWIDTH_DOWN: 100000,
HTTPS_REQUESTS: 15,
DATASTORE_CALL_COUNT: 120,
ENTITIES_FETCHED: 400,
ENTITIES_MODIFIED: 400,
INDEX_MODIFICATIONS: 1600,
}
NO_LIMITS = {
BANDWIDTH_UP: None,
BANDWIDTH_DOWN: None,
REQUESTS: None,
HTTPS_BANDWIDTH_UP: None,
HTTPS_BANDWIDTH_DOWN: None,
HTTPS_REQUESTS: None,
DATASTORE_CALL_COUNT: None,
ENTITIES_FETCHED: None,
ENTITIES_MODIFIED: None,
INDEX_MODIFICATIONS: None,
}
def DefaultThrottle(multiplier=1.0):
"""Return a Throttle instance with multiplier * the quota limits."""
layout = dict([(name, multiplier * limit)
for (name, limit) in DEFAULT_LIMITS.iteritems()])
return Throttle(layout=layout)
class ThrottleHandler(urllib2.BaseHandler):
"""A urllib2 handler for http and https requests that adds to a throttle."""
def __init__(self, throttle):
"""Initialize a ThrottleHandler.
Args:
throttle: A Throttle instance to call for bandwidth and http/https request
throttling.
"""
self.throttle = throttle
def _CalculateRequestSize(self, req):
"""Calculates the request size.
May be overriden to support different types of requests.
Args:
req: A urllib2.Request.
Returns:
the size of the request, in bytes.
"""
(unused_scheme,
unused_host_port, url_path,
unused_query, unused_fragment) = urlparse.urlsplit(req.get_full_url())
size = len('%s %s HTTP/1.1\n' % (req.get_method(), url_path))
size += self._CalculateHeaderSize(req.headers)
size += self._CalculateHeaderSize(req.unredirected_hdrs)
data = req.get_data()
if data:
size += len(data)
return size
def _CalculateResponseSize(self, res):
"""Calculates the response size.
May be overriden to support different types of response.
Args:
res: A urllib2.Response.
Returns:
the size of the response, in bytes.
"""
content = res.read()
def ReturnContent():
return content
res.read = ReturnContent
return len(content) + self._CalculateHeaderSize(dict(res.info().items()))
def _CalculateHeaderSize(self, headers):
"""Calculates the size of the headers.
Args:
headers: A dict of header values.
Returns:
the size of the headers.
"""
return sum([len('%s: %s\n' % (key, value))
for key, value in headers.iteritems()])
def AddRequest(self, throttle_name, req):
"""Add to bandwidth throttle for given request.
Args:
throttle_name: The name of the bandwidth throttle to add to.
req: The request whose size will be added to the throttle.
"""
self.throttle.AddTransfer(throttle_name, self._CalculateRequestSize(req))
def AddResponse(self, throttle_name, res):
"""Add to bandwidth throttle for given response.
Args:
throttle_name: The name of the bandwidth throttle to add to.
res: The response whose size will be added to the throttle.
"""
self.throttle.AddTransfer(throttle_name, self._CalculateResponseSize(res))
def http_request(self, req):
"""Process an HTTP request.
If the throttle is over quota, sleep first. Then add request size to
throttle before returning it to be sent.
Args:
req: A urllib2.Request object.
Returns:
The request passed in.
"""
self.throttle.Sleep(BANDWIDTH_UP)
self.throttle.Sleep(BANDWIDTH_DOWN)
self.AddRequest(BANDWIDTH_UP, req)
return req
def https_request(self, req):
"""Process an HTTPS request.
If the throttle is over quota, sleep first. Then add request size to
throttle before returning it to be sent.
Args:
req: A urllib2.Request object.
Returns:
The request passed in.
"""
self.throttle.Sleep(HTTPS_BANDWIDTH_UP)
self.throttle.Sleep(HTTPS_BANDWIDTH_DOWN)
self.AddRequest(HTTPS_BANDWIDTH_UP, req)
return req
def http_response(self, unused_req, res):
"""Process an HTTP response.
The size of the response is added to the bandwidth throttle and the request
throttle is incremented by one.
Args:
unused_req: The urllib2 request for this response.
res: A urllib2 response object.
Returns:
The response passed in.
"""
self.AddResponse(BANDWIDTH_DOWN, res)
self.throttle.AddTransfer(REQUESTS, 1)
return res
def https_response(self, unused_req, res):
"""Process an HTTPS response.
The size of the response is added to the bandwidth throttle and the request
throttle is incremented by one.
Args:
unused_req: The urllib2 request for this response.
res: A urllib2 response object.
Returns:
The response passed in.
"""
self.AddResponse(HTTPS_BANDWIDTH_DOWN, res)
self.throttle.AddTransfer(HTTPS_REQUESTS, 1)
return res
class ThrottledHttpRpcServer(appengine_rpc.HttpRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests.
This RPC server uses a Throttle to prevent exceeding quotas.
"""
def __init__(self, throttle, *args, **kwargs):
"""Initialize a ThrottledHttpRpcServer.
Also sets request_manager.rpc_server to the ThrottledHttpRpcServer instance.
Args:
throttle: A Throttles instance.
args: Positional arguments to pass through to
appengine_rpc.HttpRpcServer.__init__
kwargs: Keyword arguments to pass through to
appengine_rpc.HttpRpcServer.__init__
"""
self.throttle = throttle
appengine_rpc.HttpRpcServer.__init__(self, *args, **kwargs)
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = appengine_rpc.HttpRpcServer._GetOpener(self)
opener.add_handler(ThrottleHandler(self.throttle))
return opener
if _HTTPLIB2_AVAILABLE:
class ThrottledHttpRpcServerOAuth2(
appengine_rpc_httplib2.HttpRpcServerOAuth2):
def __init__(self, throttle, *args, **kwargs):
kwargs['http_class'] = functools.partial(_ThrottledHttp, throttle)
super(ThrottledHttpRpcServerOAuth2, self).__init__(*args, **kwargs)
class _ThrottledHttp(httplib2.Http):
"""An implementation of Http which throttles requests."""
def __init__(self, throttle, *args, **kwargs):
self.throttle_handler = _HttpThrottleHandler(throttle)
super(_ThrottledHttp, self).__init__(*args, **kwargs)
def request(self, uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
scheme = urlparse.urlparse(uri).scheme
request = (uri, method, body, headers)
if scheme == 'http':
self.throttle_handler.http_request(request)
elif scheme == 'https':
self.throttle_handler.https_request(request)
response = super(_ThrottledHttp, self).request(
uri, method, body, headers, redirections, connection_type)
if scheme == 'http':
self.throttle_handler.http_response(request, response)
elif scheme == 'https':
self.throttle_handler.https_response(request, response)
return response
class _HttpThrottleHandler(ThrottleHandler):
"""A ThrottleHandler designed to be used by ThrottledHttp."""
def _CalculateRequestSize(self, req):
"""Calculates the request size.
Args:
req: A tuple of (uri, method name, request body, header map)
Returns:
the size of the request, in bytes.
"""
uri, method, body, headers = req
(unused_scheme,
unused_host_port, url_path,
unused_query, unused_fragment) = urlparse.urlsplit(uri)
size = len('%s %s HTTP/1.1\n' % (method, url_path))
size += self._CalculateHeaderSize(headers)
if body:
size += len(body)
return size
def _CalculateResponseSize(self, res):
"""Calculates the response size.
May be overriden to support different types of response.
Args:
res: A tuple of (header map, response body).
Returns:
the size of the response, in bytes.
"""
headers, content = res
return len(content) + self._CalculateHeaderSize(headers)
class ThrottledHttpRpcServerFactory(object):
"""A factory to produce ThrottledHttpRpcServer for a given throttle."""
def __init__(self, throttle, throttle_class=None):
"""Initialize a ThrottledHttpRpcServerFactory.
Args:
throttle: A Throttle instance to use for the ThrottledHttpRpcServer.
throttle_class: A class to use instead of the default
ThrottledHttpRpcServer.
Returns:
A factory to produce a ThrottledHttpRpcServer.
"""
self.throttle = throttle
self.throttle_class = throttle_class
def __call__(self, *args, **kwargs):
"""Factory to produce a ThrottledHttpRpcServer.
Args:
args: Positional args to pass to ThrottledHttpRpcServer.
kwargs: Keyword args to pass to ThrottledHttpRpcServer.
Returns:
A ThrottledHttpRpcServer instance.
"""
kwargs['account_type'] = 'HOSTED_OR_GOOGLE'
kwargs['save_cookies'] = True
if self.throttle_class:
rpc_server = self.throttle_class(self.throttle, *args, **kwargs)
else:
rpc_server = ThrottledHttpRpcServer(self.throttle, *args, **kwargs)
return rpc_server
class Throttler(object):
def PrehookHandler(self, service, call, request, response):
handler = getattr(self, '_Prehook_' + call, None)
if handler:
handler(request, response)
def PosthookHandler(self, service, call, request, response):
handler = getattr(self, '_Posthook_' + call, None)
if handler:
handler(request, response)
def SleepHandler(*throttle_names):
def SleepOnThrottles(self, request, response):
if throttle_names:
for throttle_name in throttle_names:
self._DatastoreThrottler__throttle.Sleep(throttle_name)
else:
self._DatastoreThrottler__throttle.Sleep()
return SleepOnThrottles
class DatastoreThrottler(Throttler):
def __init__(self, throttle):
Throttler.__init__(self)
self.__throttle = throttle
def AddCost(self, cost_proto):
"""Add costs from the Cost protobuf."""
self.__throttle.AddTransfer(INDEX_MODIFICATIONS, cost_proto.index_writes())
self.__throttle.AddTransfer(ENTITIES_MODIFIED, cost_proto.entity_writes())
self.__throttle.AddTransfer(BANDWIDTH_UP, cost_proto.entity_write_bytes())
_Prehook_Put = SleepHandler(ENTITIES_MODIFIED,
INDEX_MODIFICATIONS,
BANDWIDTH_UP)
def _Posthook_Put(self, request, response):
self.AddCost(response.cost())
_Prehook_Get = SleepHandler(ENTITIES_FETCHED)
def _Posthook_Get(self, request, response):
self.__throttle.AddTransfer(ENTITIES_FETCHED, response.entity_size())
_Prehook_RunQuery = SleepHandler(ENTITIES_FETCHED)
def _Posthook_RunQuery(self, request, response):
if not response.keys_only():
self.__throttle.AddTransfer(ENTITIES_FETCHED, response.result_size())
_Prehook_Next = SleepHandler(ENTITIES_FETCHED)
def _Posthook_Next(self, request, response):
if not response.keys_only():
self.__throttle.AddTransfer(ENTITIES_FETCHED, response.result_size())
_Prehook_Delete = SleepHandler(ENTITIES_MODIFIED, INDEX_MODIFICATIONS)
def _Posthook_Delete(self, request, response):
self.AddCost(response.cost())
_Prehook_Commit = SleepHandler()
def _Posthook_Commit(self, request, response):
self.AddCost(response.cost())
def ThrottleRemoteDatastore(throttle, remote_datastore_stub=None):
"""Install the given throttle for the remote datastore stub.
Args:
throttle: A Throttle instance to limit datastore access rates
remote_datastore_stub: The datstore stub instance to throttle, for
testing purposes.
"""
if not remote_datastore_stub:
remote_datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if not isinstance(remote_datastore_stub, remote_api_stub.RemoteDatastoreStub):
raise remote_api_stub.ConfigurationError('remote_api is not configured.')
throttler = DatastoreThrottler(throttle)
remote_datastore_stub._PreHookHandler = throttler.PrehookHandler
remote_datastore_stub._PostHookHandler = throttler.PosthookHandler
|
|
from unittest import skipUnless
from django.contrib.gis import forms
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry
from django.forms import ValidationError
from django.test import SimpleTestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.html import escape
@skipUnless(HAS_GDAL, "GeometryFieldTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class GeometryFieldTest(SimpleTestCase):
def test_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
with six.assertRaisesRegex(self, forms.ValidationError,
"No geometry value provided."):
fld.clean(None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertIsNone(fld.clean(None))
def test_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def test_field_with_text_widget(self):
class PointForm(forms.Form):
pt = forms.PointField(srid=4326, widget=forms.TextInput)
form = PointForm()
cleaned_pt = form.fields['pt'].clean('POINT(5 23)')
self.assertEqual(cleaned_pt, GEOSGeometry('POINT(5 23)'))
self.assertEqual(4326, cleaned_pt.srid)
point = GEOSGeometry('SRID=4326;POINT(5 23)')
form = PointForm(data={'pt': 'POINT(5 23)'}, initial={'pt': point})
self.assertFalse(form.has_changed())
@skipUnless(HAS_GDAL, "SpecializedFieldTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class SpecializedFieldTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
'multipoint': GEOSGeometry("SRID=4326;MULTIPOINT("
"(13.18634033203125 14.504356384277344),"
"(13.207969665527 14.490966796875),"
"(13.177070617675 14.454917907714))"),
'linestring': GEOSGeometry("SRID=4326;LINESTRING("
"-8.26171875 -0.52734375,"
"-7.734375 4.21875,"
"6.85546875 3.779296875,"
"5.44921875 -3.515625)"),
'multilinestring': GEOSGeometry("SRID=4326;MULTILINESTRING("
"(-16.435546875 -2.98828125,"
"-17.2265625 2.98828125,"
"-0.703125 3.515625,"
"-1.494140625 -3.33984375),"
"(-8.0859375 -5.9765625,"
"8.525390625 -8.7890625,"
"12.392578125 -0.87890625,"
"10.01953125 7.646484375))"),
'polygon': GEOSGeometry("SRID=4326;POLYGON("
"(-1.669921875 6.240234375,"
"-3.8671875 -0.615234375,"
"5.9765625 -3.955078125,"
"18.193359375 3.955078125,"
"9.84375 9.4921875,"
"-1.669921875 6.240234375))"),
'multipolygon': GEOSGeometry("SRID=4326;MULTIPOLYGON("
"((-17.578125 13.095703125,"
"-17.2265625 10.8984375,"
"-13.974609375 10.1953125,"
"-13.359375 12.744140625,"
"-15.732421875 13.7109375,"
"-17.578125 13.095703125)),"
"((-8.525390625 5.537109375,"
"-8.876953125 2.548828125,"
"-5.888671875 1.93359375,"
"-5.09765625 4.21875,"
"-6.064453125 6.240234375,"
"-8.525390625 5.537109375)))"),
'geometrycollection': GEOSGeometry("SRID=4326;GEOMETRYCOLLECTION("
"POINT(5.625 -0.263671875),"
"POINT(6.767578125 -3.603515625),"
"POINT(8.525390625 0.087890625),"
"POINT(8.0859375 -2.13134765625),"
"LINESTRING("
"6.273193359375 -1.175537109375,"
"5.77880859375 -1.812744140625,"
"7.27294921875 -2.230224609375,"
"7.657470703125 -1.25244140625))"),
}
def assertMapWidget(self, form_instance):
"""
Make sure the MapWidget js is passed in the form media and a MapWidget
is actually created
"""
self.assertTrue(form_instance.is_valid())
rendered = form_instance.as_p()
self.assertIn('new MapWidget(options);', rendered)
self.assertIn('gis/js/OLMapWidget.js', str(form_instance.media))
def assertTextarea(self, geom, rendered):
"""Makes sure the wkt and a textarea are in the content"""
self.assertIn('<textarea ', rendered)
self.assertIn('required', rendered)
self.assertIn(geom.wkt, rendered)
def test_pointfield(self):
class PointForm(forms.Form):
p = forms.PointField()
geom = self.geometries['point']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
invalid = PointForm(data={'p': 'some invalid geom'})
self.assertFalse(invalid.is_valid())
self.assertIn('Invalid geometry value', str(invalid.errors))
for invalid in [geo for key, geo in self.geometries.items() if key != 'point']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_multipointfield(self):
class PointForm(forms.Form):
p = forms.MultiPointField()
geom = self.geometries['multipoint']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipoint']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_linestringfield(self):
class LineStringForm(forms.Form):
l = forms.LineStringField()
geom = self.geometries['linestring']
form = LineStringForm(data={'l': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'linestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_multilinestringfield(self):
class LineStringForm(forms.Form):
l = forms.MultiLineStringField()
geom = self.geometries['multilinestring']
form = LineStringForm(data={'l': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multilinestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_polygonfield(self):
class PolygonForm(forms.Form):
p = forms.PolygonField()
geom = self.geometries['polygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'polygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_multipolygonfield(self):
class PolygonForm(forms.Form):
p = forms.MultiPolygonField()
geom = self.geometries['multipolygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipolygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_geometrycollectionfield(self):
class GeometryForm(forms.Form):
g = forms.GeometryCollectionField()
geom = self.geometries['geometrycollection']
form = GeometryForm(data={'g': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(GeometryForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'geometrycollection']:
self.assertFalse(GeometryForm(data={'g': invalid.wkt}).is_valid())
@skipUnless(HAS_GDAL, "OSMWidgetTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class OSMWidgetTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
}
def test_osm_widget(self):
class PointForm(forms.Form):
p = forms.PointField(widget=forms.OSMWidget)
geom = self.geometries['point']
form = PointForm(data={'p': geom})
rendered = form.as_p()
self.assertIn("OpenStreetMap (Mapnik)", rendered)
self.assertIn("id: 'id_p',", rendered)
def test_default_lat_lon(self):
class PointForm(forms.Form):
p = forms.PointField(
widget=forms.OSMWidget(attrs={
'default_lon': 20, 'default_lat': 30
}),
)
form = PointForm()
rendered = form.as_p()
self.assertIn("options['default_lon'] = 20;", rendered)
self.assertIn("options['default_lat'] = 30;", rendered)
if forms.OSMWidget.default_lon != 20:
self.assertNotIn(
"options['default_lon'] = %d;" % forms.OSMWidget.default_lon,
rendered)
if forms.OSMWidget.default_lat != 30:
self.assertNotIn(
"options['default_lat'] = %d;" % forms.OSMWidget.default_lat,
rendered)
@skipUnless(HAS_GDAL, "CustomGeometryWidgetTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class CustomGeometryWidgetTest(SimpleTestCase):
def test_custom_serialization_widget(self):
class CustomGeometryWidget(forms.BaseGeometryWidget):
template_name = 'gis/openlayers.html'
deserialize_called = 0
def serialize(self, value):
return value.json if value else ''
def deserialize(self, value):
self.deserialize_called += 1
return GEOSGeometry(value)
class PointForm(forms.Form):
p = forms.PointField(widget=CustomGeometryWidget)
point = GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)")
form = PointForm(data={'p': point})
self.assertIn(escape(point.json), form.as_p())
CustomGeometryWidget.called = 0
widget = form.fields['p'].widget
# Force deserialize use due to a string value
self.assertIn(escape(point.json), widget.render('p', point.json))
self.assertEqual(widget.deserialize_called, 1)
form = PointForm(data={'p': point.json})
self.assertTrue(form.is_valid())
# Ensure that resulting geometry has srid set
self.assertEqual(form.cleaned_data['p'].srid, 4326)
|
|
import copy
import datetime
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.functional import cached_property
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
BITLEFTSHIFT = '<<'
BITRIGHTSHIFT = '>>'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
@deconstructible
class BaseExpression:
"""Base class for all query expressions."""
# aggregate specific fields
is_summary = False
_output_field = None
def __init__(self, output_field=None):
if output_field is not None:
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, str) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""Hook used by Lookup.get_prep_lookup() to do custom preparation."""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
yield from expr.flatten()
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
path, args, kwargs = self.deconstruct()
other_path, other_args, other_kwargs = other.deconstruct()
if (path, args) == (other_path, other_args):
kwargs = kwargs.copy()
other_kwargs = other_kwargs.copy()
output_field = type(kwargs.pop('output_field', None))
other_output_field = type(other_kwargs.pop('output_field', None))
if output_field == other_output_field:
return kwargs == other_kwargs
return False
def __hash__(self):
path, args, kwargs = self.deconstruct()
h = hash(path) ^ hash(args)
for kwarg in kwargs.items():
h ^= hash(kwarg)
return h
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == rhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
@deconstructible
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
def as_sql(self, *args, **kwargs):
raise ValueError(
'This queryset contains a reference to an outer query and may '
'only be used in a subquery.'
)
def _prepare(self, output_field=None):
return self
class OuterRef(F):
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def _prepare(self, output_field=None):
return self
class Func(Expression):
"""An SQL function call."""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = self.extra.copy()
data.update(**extra_context)
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def as_sqlite(self, compiler, connection):
sql, params = self.as_sql(compiler, connection)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""Represent a wrapped value as a node within an expression."""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if hasattr(self._output_field, 'get_placeholder'):
return self._output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
def __hash__(self):
h = hash(self.sql) ^ hash(self._output_field)
for param in self.params:
h ^= hash(param)
return h
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super().__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = self.extra.copy()
template_params.update(extra_context)
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Subquery(Expression):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = '(%(subquery)s)'
def __init__(self, queryset, output_field=None, **extra):
self.queryset = queryset
self.extra = extra
if output_field is None and len(self.queryset.query.select) == 1:
output_field = self.queryset.query.select[0].field
super().__init__(output_field)
def copy(self):
clone = super().copy()
clone.queryset = clone.queryset.all()
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
clone = self.copy()
clone.is_summary = summarize
clone.queryset.query.bump_prefix(query)
# Need to recursively resolve these.
def resolve_all(child):
if hasattr(child, 'children'):
[resolve_all(_child) for _child in child.children]
if hasattr(child, 'rhs'):
child.rhs = resolve(child.rhs)
def resolve(child):
if hasattr(child, 'resolve_expression'):
resolved = child.resolve_expression(
query=query, allow_joins=allow_joins, reuse=reuse,
summarize=summarize, for_save=for_save,
)
# Add table alias to the parent query's aliases to prevent
# quoting.
if hasattr(resolved, 'alias'):
clone.queryset.query.external_aliases.add(resolved.alias)
return resolved
return child
resolve_all(clone.queryset.query.where)
for key, value in clone.queryset.query.annotations.items():
if isinstance(value, Subquery):
clone.queryset.query.annotations[key] = resolve(value)
return clone
def get_source_expressions(self):
return [
x for x in [
getattr(expr, 'lhs', None)
for expr in self.queryset.query.where.children
] if x
]
def relabeled_clone(self, change_map):
clone = self.copy()
clone.queryset.query = clone.queryset.query.relabeled_clone(change_map)
clone.queryset.query.external_aliases.update(
alias for alias in change_map.values()
if alias not in clone.queryset.query.tables
)
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = self.extra.copy()
template_params.update(extra_context)
template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql()
template = template or template_params.get('template', self.template)
sql = template % template_params
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def _prepare(self, output_field):
# This method will only be called if this instance is the "rhs" in an
# expression: the wrapping () must be removed (as the expression that
# contains this will provide them). SQLite evaluates ((subquery))
# differently than the other databases.
if self.template == '(%(subquery)s)':
clone = self.copy()
clone.template = '%(subquery)s'
return clone
return self
class Exists(Subquery):
template = 'EXISTS(%(subquery)s)'
def __init__(self, *args, negated=False, **kwargs):
self.negated = negated
super().__init__(*args, **kwargs)
def __invert__(self):
return type(self)(self.queryset, self.output_field, negated=(not self.negated), **self.extra)
@property
def output_field(self):
return fields.BooleanField()
def resolve_expression(self, query=None, **kwargs):
# As a performance optimization, remove ordering since EXISTS doesn't
# care about it, just whether or not a row matches.
self.queryset = self.queryset.order_by()
return super().resolve_expression(query, **kwargs)
def as_sql(self, compiler, connection, template=None, **extra_context):
sql, params = super().as_sql(compiler, connection, template, **extra_context)
if self.negated:
sql = 'NOT {}'.format(sql)
return sql, params
def as_oracle(self, compiler, connection, template=None, **extra_context):
# Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a
# CASE WHEN expression. Change the template since the When expression
# requires a left hand side (column) to compare against.
sql, params = self.as_sql(compiler, connection, template, **extra_context)
sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql)
return sql, params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False):
if nulls_first and nulls_last:
raise ValueError('nulls_first and nulls_last are mutually exclusive')
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
if not template:
if self.nulls_last:
template = '%s NULLS LAST' % self.template
elif self.nulls_first:
template = '%s NULLS FIRST' % self.template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
placeholders.update(extra_context)
template = template or self.template
return (template % placeholders).rstrip(), params
def as_sqlite(self, compiler, connection):
template = None
if self.nulls_last:
template = '%(expression)s IS NULL, %(expression)s %(ordering)s'
elif self.nulls_first:
template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s'
return self.as_sql(compiler, connection, template=template)
def as_mysql(self, compiler, connection):
template = None
if self.nulls_last:
template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s '
elif self.nulls_first:
template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s '
return self.as_sql(compiler, connection, template=template)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package session
# Module caffe2.python.session
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.task import Cluster, Task, TaskGroup, WorkspaceType
class CompiledRunnable(object):
""" Wrapper for compiled runnable returned from session.compile() """
def __init__(self, obj, session_class):
self.obj = obj
self.session_class = session_class
class Session(object):
"""
Allows to run Nets, ExecutionSteps, Plans, Tasks and TaskGroups.
A session can potentially run in multiple nodes concurrently.
Example:
from core import Net
from caffe2.python.task import Task, TaskGroup, WorkspaceType
net = Net('test1')
net.Add([net.Const(1), net.Const(2)])
net2 = net.Clone()
step = core.execution_step('step1', [net2])
with TaskGroup(WorkspaceType.GLOBAL) as init_tg:
with Node('node1'):
n1setup = net.Net('n1setup')
n1msg = n1setup.Const('Hello from node 1.')
Task(step=n1setup)
with TaskGroup() as private_tg:
with Node('node1'):
n1 = net.Net('n1')
n1.Print(n1msg, 0)
Task(step=n1)
with Node('node2'):
n2 = net.Net('n2')
n2.Print(n2.Const('Hello from node 2.'), 0)
Task(step=n2)
session = LocalSession()
session.run(net)
session.run(step)
session.run(init_tg)
session.run(private_tg)
Global Workspace:
At the beggining of the session, a global workspace is created and kept
alive for the duration of the session.
Private Workspace:
Tasks can be run either directly on the global workspace, or they can
instantiate a private child workspace that is released after each run.
Blob visibility:
Tasks running in different nodes in parallel will always run under
different workspaces, so it must be assumed that they won't be able to
access each other's blobs. Tasks running on the same node will follow
Workspace hierarchy rules: tasks running on separate private workspaces
will only be able to share blobs defined on a common parent Workspace.
"""
_compiled_cache = {}
def __init__(self):
self._open = True
def is_open(self):
return self._open
@classmethod
def compile(cls, runnable, workspace_type=None, setup_net_list=None):
if isinstance(runnable, CompiledRunnable):
assert cls == runnable.session_class, (
'Runnable was compiled for different session type. ' +
'Need: %s, got: %s' % (
cls.__name__, runnable.session_class.__name__))
return runnable
if runnable in cls._compiled_cache:
return cls._compiled_cache[runnable]
if isinstance(runnable, TaskGroup):
if workspace_type:
if runnable.workspace_type():
assert runnable.workspace_type() == workspace_type, \
"Require {} but already have {}".format(
workspace_type, runnable.workspace_type())
else:
runnable._workspace_type = workspace_type
tg = runnable
else:
if workspace_type is None:
workspace_type = WorkspaceType.GLOBAL
tg = TaskGroup(workspace_type=workspace_type)
if isinstance(runnable, Task):
tg.add(runnable)
elif isinstance(runnable, core.ExecutionStep):
tg.add(Task(step=runnable))
elif isinstance(runnable, core.Plan):
# ExecutionSteps in Plan() object is supposed to run sequentially, while
# tasks in TaskGroup run in parallel. So if we have multiple
# ExecutionSteps in Plan() object, we choose to have a root
# ExecutionStep to wrap all ExecutionSteps.
assert len(runnable.Steps()) > 0
if len(runnable.Steps()) == 1:
tg.add(Task(step=runnable.Steps()[0]))
else:
# Task takes a list of ExecutionSteps and automatically wrap into
# a root ExecutionStep
tg.add(Task(step=runnable.Steps()))
else:
step = core.execution_step('runnable', runnable)
tg.add(Task(step=step))
compiled = CompiledRunnable(
cls._compile_task_group(tg, setup_net_list), session_class=cls)
cls._compiled_cache[runnable] = compiled
return compiled
def run(self, runnable, workspace_type=None, setup_net_list=None):
"""Run the given runnable.
Args:
runnable: Object recognized by the Session. Currently, we support
TaskGroup, Task, Plan, ExecutionStep, and Net.
workspace_type: A string defined in the WorkspaceType object.
setup_net_list: A list of Net objects or a list of NetDef protos.
So far this is only used by the DistributedSession, in which we
need to pass a list of special nets to setup the master.
"""
assert self.is_open(), 'Session is closed.'
assert runnable is not None, 'Got a none runnable.'
self._run_compiled(self.compile(runnable, workspace_type,
setup_net_list).obj)
def close(self):
if self.is_open():
self._do_close()
self._open = False
def fetch_output(self, output):
raise NotImplementedError()
def _run_compiled(self, task_group):
raise NotImplementedError()
@classmethod
def _compile_task_group(cls, task_group, setup_net_list=None):
return task_group
def _do_close(self):
pass
def __enter__(self):
assert self._open, 'Session already closed.'
return self
def __exit__(self, ex_type, value, traceback):
if ex_type is None:
self.close()
class LocalSession(Session):
"""
Session that runs in a single node.
Tasks are all remapped to run in parallel in the 'local' node.
Currently, LocalSession runs all parallel tasks in the same workspace,
but this behavior may change in the future. Only tasks pointing to the
same logical node are guaranteed to always run in the same workspace.
"""
def __init__(self, ws=None):
Session.__init__(self)
self._ws = ws or workspace.C.Workspace.current
@classmethod
def _compile_task_group(cls, task_group, setup_net_list=None):
with Cluster():
task = task_group.to_task()
plan = core.Plan('task_group_plan')
plan.AddStep(task.get_step())
return (plan, task.output_list(), task.workspace_type)
def _run_compiled(self, compiled):
plan, output_list, workspace_type = compiled
# make sure the output blobs belong to the parent workspace
outputs = []
for name in output_list.names():
self._ws.create_blob(str(name))
outputs.append(core.BlobReference(str(name)))
output_list.set_values(outputs, _fetch_func=self._fetch_output)
task_ws = (
workspace.C.Workspace(self._ws)
if workspace_type == WorkspaceType.PRIVATE else self._ws)
with workspace.WorkspaceGuard(task_ws):
task_ws.run(plan)
def _fetch_output(self, output):
return self._ws.blobs[str(output)].fetch()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.