text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""This file is a collection of widgets to produce some common signs and symbols.
Widgets include:
- ETriangle (an equilateral triangle),
- RTriangle (a right angled triangle),
- Octagon,
- Crossbox,
- Tickbox,
- SmileyFace,
- StopSign,
- NoEntry,
- NotAllowed (the red roundel from 'no smoking' signs),
- NoSmoking,
- DangerSign (a black exclamation point in a yellow triangle),
- YesNo (returns a tickbox or a crossbox depending on a testvalue),
- FloppyDisk,
- ArrowOne, and
- ArrowTwo
"""
__version__=''' $Id: signsandsymbols.py 2385 2004-06-17 15:26:05Z rgbecker $ '''
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.graphics import shapes
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics import renderPDF
class _Symbol(Widget):
"""Abstract base widget
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
_nodoc = 1
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc='symbol x coordinate'),
y = AttrMapValue(isNumber,desc='symbol y coordinate'),
dx = AttrMapValue(isNumber,desc='symbol x coordinate adjustment'),
dy = AttrMapValue(isNumber,desc='symbol x coordinate adjustment'),
size = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
strokeColor = AttrMapValue(isColorOrNone),
strokeWidth = AttrMapValue(isNumber),
)
def __init__(self):
assert self.__class__.__name__!='_Symbol', 'Abstract class _Symbol instantiated'
self.x = self.y = self.dx = self.dy = 0
self.size = 100
self.fillColor = colors.red
self.strokeColor = None
self.strokeWidth = 0.1
def demo(self):
D = shapes.Drawing(200, 100)
s = float(self.size)
ob = self.__class__()
ob.x=50
ob.y=0
ob.draw()
D.add(ob)
D.add(shapes.String(ob.x+(s/2),(ob.y-12),
ob.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=10))
return D
class ETriangle(_Symbol):
"""This draws an equilateral triangle."""
def __init__(self):
pass #AbstractSymbol
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Triangle specific bits
ae = s*0.125 #(ae = 'an eighth')
triangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x+(s/2),self.y+s],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=s/50.)
g.add(triangle)
return g
class RTriangle(_Symbol):
"""This draws a right-angled triangle.
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.green
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Triangle specific bits
ae = s*0.125 #(ae = 'an eighth')
triangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x,self.y+s],
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=s/50.)
g.add(triangle)
return g
class Octagon(_Symbol):
"""This widget draws an Octagon.
possible attributes:
'x', 'y', 'size', 'fillColor', 'strokeColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.yellow
self.strokeColor = None
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# Octagon specific bits
athird=s/3
octagon = shapes.Polygon(points=[self.x+athird, self.y,
self.x, self.y+athird,
self.x, self.y+(athird*2),
self.x+athird, self.y+s,
self.x+(athird*2), self.y+s,
self.x+s, self.y+(athird*2),
self.x+s, self.y+athird,
self.x+(athird*2), self.y],
strokeColor = self.strokeColor,
fillColor = self.fillColor,
strokeWidth=10)
g.add(octagon)
return g
class Crossbox(_Symbol):
"""This draws a black box with a red cross in it - a 'checkbox'.
possible attributes:
'x', 'y', 'size', 'crossColor', 'strokeColor', 'crosswidth'
"""
_attrMap = AttrMap(BASE=_Symbol,
crossColor = AttrMapValue(isColorOrNone),
crosswidth = AttrMapValue(isNumber),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.white
self.crossColor = colors.red
self.strokeColor = colors.black
self.crosswidth = 10
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# crossbox specific bits
box = shapes.Rect(self.x+1, self.y+1, s-2, s-2,
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=2)
g.add(box)
crossLine1 = shapes.Line(self.x+(s*0.15), self.y+(s*0.15), self.x+(s*0.85), self.y+(s*0.85),
fillColor = self.crossColor,
strokeColor = self.crossColor,
strokeWidth = self.crosswidth)
g.add(crossLine1)
crossLine2 = shapes.Line(self.x+(s*0.15), self.y+(s*0.85), self.x+(s*0.85) ,self.y+(s*0.15),
fillColor = self.crossColor,
strokeColor = self.crossColor,
strokeWidth = self.crosswidth)
g.add(crossLine2)
return g
class Tickbox(_Symbol):
"""This draws a black box with a red tick in it - another 'checkbox'.
possible attributes:
'x', 'y', 'size', 'tickColor', 'strokeColor', 'tickwidth'
"""
_attrMap = AttrMap(BASE=_Symbol,
tickColor = AttrMapValue(isColorOrNone),
tickwidth = AttrMapValue(isNumber),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.tickColor = colors.red
self.strokeColor = colors.black
self.fillColor = colors.white
self.tickwidth = 10
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# tickbox specific bits
box = shapes.Rect(self.x+1, self.y+1, s-2, s-2,
fillColor = self.fillColor,
strokeColor = self.strokeColor,
strokeWidth=2)
g.add(box)
tickLine = shapes.PolyLine(points = [self.x+(s*0.15), self.y+(s*0.35), self.x+(s*0.35), self.y+(s*0.15),
self.x+(s*0.35), self.y+(s*0.15), self.x+(s*0.85) ,self.y+(s*0.85)],
fillColor = self.tickColor,
strokeColor = self.tickColor,
strokeWidth = self.tickwidth)
g.add(tickLine)
return g
class SmileyFace(_Symbol):
"""This draws a classic smiley face.
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
_Symbol.__init__(self)
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.yellow
self.strokeColor = colors.black
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# SmileyFace specific bits
g.add(shapes.Circle(cx=self.x+(s/2), cy=self.y+(s/2), r=s/2,
fillColor=self.fillColor, strokeColor=self.strokeColor,
strokeWidth=max(s/38.,self.strokeWidth)))
for i in (1,2):
g.add(shapes.Ellipse(self.x+(s/3)*i,self.y+(s/3)*2, s/30, s/10,
fillColor=self.strokeColor, strokeColor = self.strokeColor,
strokeWidth=max(s/38.,self.strokeWidth)))
# calculate a pointslist for the mouth
# THIS IS A HACK! - don't use if there is a 'shapes.Arc'
centerx=self.x+(s/2)
centery=self.y+(s/2)
radius=s/3
yradius = radius
xradius = radius
startangledegrees=200
endangledegrees=340
degreedelta = 1
pointslist = []
a = pointslist.append
from math import sin, cos, pi
degreestoradians = pi/180.0
radiansdelta = degreedelta*degreestoradians
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = startangle
while angle<endangle:
x = centerx + cos(angle)*radius
y = centery + sin(angle)*yradius
a(x); a(y)
angle = angle+radiansdelta
# make the mouth
smile = shapes.PolyLine(pointslist,
fillColor = self.strokeColor,
strokeColor = self.strokeColor,
strokeWidth = max(s/38.,self.strokeWidth))
g.add(smile)
return g
class StopSign(_Symbol):
"""This draws a (British) stop sign.
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
stopColor = AttrMapValue(isColorOrNone,desc='color of the word stop'),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.orangered
self.stopColor = colors.ghostwhite
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# stop-sign specific bits
athird=s/3
outerOctagon = shapes.Polygon(points=[self.x+athird, self.y,
self.x, self.y+athird,
self.x, self.y+(athird*2),
self.x+athird, self.y+s,
self.x+(athird*2), self.y+s,
self.x+s, self.y+(athird*2),
self.x+s, self.y+athird,
self.x+(athird*2), self.y],
strokeColor = self.strokeColor,
fillColor = None,
strokeWidth=1)
g.add(outerOctagon)
innerOctagon = shapes.Polygon(points=[self.x+athird+(s/75), self.y+(s/75),
self.x+(s/75), self.y+athird+(s/75),
self.x+(s/75), self.y+(athird*2)-(s/75),
self.x+athird+(s/75), self.y+s-(s/75),
self.x+(athird*2)-(s/75), (self.y+s)-(s/75),
(self.x+s)-(s/75), self.y+(athird*2)-(s/75),
(self.x+s)-(s/75), self.y+athird+(s/75),
self.x+(athird*2)-(s/75), self.y+(s/75)],
strokeColor = None,
fillColor = self.fillColor,
strokeWidth=0)
g.add(innerOctagon)
if self.stopColor:
g.add(shapes.String(self.x+(s*0.5),self.y+(s*0.4),
'STOP', fillColor=self.stopColor, textAnchor='middle',
fontSize=s/3, fontName="Helvetica-Bold"))
return g
class NoEntry(_Symbol):
"""This draws a (British) No Entry sign - a red circle with a white line on it.
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
innerBarColor = AttrMapValue(isColorOrNone,desc='color of the inner bar'),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.orangered
self.innerBarColor = colors.ghostwhite
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# no-entry-sign specific bits
if self.strokeColor:
g.add(shapes.Circle(cx = (self.x+(s/2)), cy = (self.y+(s/2)), r = s/2, fillColor = None, strokeColor = self.strokeColor, strokeWidth=1))
if self.fillColor:
g.add(shapes.Circle(cx = (self.x+(s/2)), cy =(self.y+(s/2)), r = ((s/2)-(s/50)), fillColor = self.fillColor, strokeColor = None, strokeWidth=0))
innerBarColor = self.innerBarColor
if innerBarColor:
g.add(shapes.Rect(self.x+(s*0.1), self.y+(s*0.4), width=s*0.8, height=s*0.2, fillColor = innerBarColor, strokeColor = innerBarColor, strokeLineCap = 1, strokeWidth = 0))
return g
class NotAllowed(_Symbol):
"""This draws a 'forbidden' roundel (as used in the no-smoking sign).
possible attributes:
'x', 'y', 'size'
"""
_attrMap = AttrMap(BASE=_Symbol,
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.red
self.fillColor = colors.white
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
strokeColor = self.strokeColor
# not=allowed specific bits
outerCircle = shapes.Circle(cx = (self.x+(s/2)), cy = (self.y+(s/2)), r = (s/2)-(s/10), fillColor = self.fillColor, strokeColor = strokeColor, strokeWidth=s/10.)
g.add(outerCircle)
centerx=self.x+s
centery=self.y+(s/2)-(s/6)
radius=s-(s/6)
yradius = radius/2
xradius = radius/2
startangledegrees=100
endangledegrees=-80
degreedelta = 90
pointslist = []
a = pointslist.append
from math import sin, cos, pi
degreestoradians = pi/180.0
radiansdelta = degreedelta*degreestoradians
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = startangle
while angle<endangle:
x = centerx + cos(angle)*radius
y = centery + sin(angle)*yradius
a(x); a(y)
angle = angle+radiansdelta
crossbar = shapes.PolyLine(pointslist, fillColor = strokeColor, strokeColor = strokeColor, strokeWidth = s/10.)
g.add(crossbar)
return g
class NoSmoking(NotAllowed):
"""This draws a no-smoking sign.
possible attributes:
'x', 'y', 'size'
"""
def __init__(self):
NotAllowed.__init__(self)
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = NotAllowed.draw(self)
# no-smoking-sign specific bits
newx = self.x+(s/2)-(s/3.5)
newy = self.y+(s/2)-(s/32)
cigarrette1 = shapes.Rect(x = newx, y = newy, width = (s/2), height =(s/16),
fillColor = colors.ghostwhite, strokeColor = colors.gray, strokeWidth=0)
newx=newx+(s/2)+(s/64)
g.insert(-1,cigarrette1)
cigarrette2 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette2)
cigarrette3 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette3)
cigarrette4 = shapes.Rect(x = newx, y = newy, width = (s/80), height =(s/16),
fillColor = colors.orangered, strokeColor = None, strokeWidth=0)
newx= newx+(s/35)
g.insert(-1,cigarrette4)
return g
class DangerSign(_Symbol):
"""This draws a 'danger' sign: a yellow box with a black exclamation point.
possible attributes:
'x', 'y', 'size', 'strokeColor', 'fillColor', 'strokeWidth'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.strokeColor = colors.black
self.fillColor = colors.gold
self.strokeWidth = self.size*0.125
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
ew = self.strokeWidth
ae = s*0.125 #(ae = 'an eighth')
# danger sign specific bits
ew = self.strokeWidth
ae = s*0.125 #(ae = 'an eighth')
outerTriangle = shapes.Polygon(points = [
self.x, self.y,
self.x+s, self.y,
self.x+(s/2),self.y+s],
fillColor = None,
strokeColor = self.strokeColor,
strokeWidth=0)
g.add(outerTriangle)
innerTriangle = shapes.Polygon(points = [
self.x+(s/50), self.y+(s/75),
(self.x+s)-(s/50), self.y+(s/75),
self.x+(s/2),(self.y+s)-(s/50)],
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(innerTriangle)
exmark = shapes.Polygon(points=[
((self.x+s/2)-ew/2), self.y+ae*2.5,
((self.x+s/2)+ew/2), self.y+ae*2.5,
((self.x+s/2)+((ew/2))+(ew/6)), self.y+ae*5.5,
((self.x+s/2)-((ew/2))-(ew/6)), self.y+ae*5.5],
fillColor = self.strokeColor,
strokeColor = None)
g.add(exmark)
exdot = shapes.Polygon(points=[
((self.x+s/2)-ew/2), self.y+ae,
((self.x+s/2)+ew/2), self.y+ae,
((self.x+s/2)+ew/2), self.y+ae*2,
((self.x+s/2)-ew/2), self.y+ae*2],
fillColor = self.strokeColor,
strokeColor = None)
g.add(exdot)
return g
class YesNo(_Symbol):
"""This widget draw a tickbox or crossbox depending on 'testValue'.
If this widget is supplied with a 'True' or 1 as a value for
testValue, it will use the tickbox widget. Otherwise, it will
produce a crossbox.
possible attributes:
'x', 'y', 'size', 'tickcolor', 'crosscolor', 'testValue'
"""
_attrMap = AttrMap(BASE=_Symbol,
tickcolor = AttrMapValue(isColor),
crosscolor = AttrMapValue(isColor),
testValue = AttrMapValue(isBoolean),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.tickcolor = colors.green
self.crosscolor = colors.red
self.testValue = 1
def draw(self):
if self.testValue:
yn=Tickbox()
yn.tickColor=self.tickcolor
else:
yn=Crossbox()
yn.crossColor=self.crosscolor
yn.x=self.x
yn.y=self.y
yn.size=self.size
yn.draw()
return yn
def demo(self):
D = shapes.Drawing(200, 100)
yn = YesNo()
yn.x = 15
yn.y = 25
yn.size = 70
yn.testValue = 0
yn.draw()
D.add(yn)
yn2 = YesNo()
yn2.x = 120
yn2.y = 25
yn2.size = 70
yn2.testValue = 1
yn2.draw()
D.add(yn2)
labelFontSize = 8
D.add(shapes.String(yn.x+(yn.size/2),(yn.y-(1.2*labelFontSize)),
'testValue=0', fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
D.add(shapes.String(yn2.x+(yn2.size/2),(yn2.y-(1.2*labelFontSize)),
'testValue=1', fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
labelFontSize = 10
D.add(shapes.String(yn.x+85,(yn.y-20),
self.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
return D
class FloppyDisk(_Symbol):
"""This widget draws an icon of a floppy disk.
possible attributes:
'x', 'y', 'size', 'diskcolor'
"""
_attrMap = AttrMap(BASE=_Symbol,
diskColor = AttrMapValue(isColor),
)
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.diskColor = colors.black
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# floppy disk specific bits
diskBody = shapes.Rect(x=self.x, y=self.y+(s/100), width=s, height=s-(s/100),
fillColor = self.diskColor,
strokeColor = None,
strokeWidth=0)
g.add(diskBody)
label = shapes.Rect(x=self.x+(s*0.1), y=(self.y+s)-(s*0.5), width=s*0.8, height=s*0.48,
fillColor = colors.whitesmoke,
strokeColor = None,
strokeWidth=0)
g.add(label)
labelsplash = shapes.Rect(x=self.x+(s*0.1), y=(self.y+s)-(s*0.1), width=s*0.8, height=s*0.08,
fillColor = colors.royalblue,
strokeColor = None,
strokeWidth=0)
g.add(labelsplash)
line1 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.6*s), x2=self.x+(s*0.85), y2=self.y+(0.6*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line1)
line2 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.7*s), x2=self.x+(s*0.85), y2=self.y+(0.7*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line2)
line3 = shapes.Line(x1=self.x+(s*0.15), y1=self.y+(0.8*s), x2=self.x+(s*0.85), y2=self.y+(0.8*s),
fillColor = colors.black,
strokeColor = colors.black,
strokeWidth=0)
g.add(line3)
metalcover = shapes.Rect(x=self.x+(s*0.2), y=(self.y), width=s*0.5, height=s*0.35,
fillColor = colors.silver,
strokeColor = None,
strokeWidth=0)
g.add(metalcover)
coverslot = shapes.Rect(x=self.x+(s*0.28), y=(self.y)+(s*0.035), width=s*0.12, height=s*0.28,
fillColor = self.diskColor,
strokeColor = None,
strokeWidth=0)
g.add(coverslot)
return g
class ArrowOne(_Symbol):
"""This widget draws an arrow (style one).
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.red
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# arrow specific bits
body = shapes.Rect(x=self.x, y=(self.y+(s/2))-(s/6), width=2*(s/3), height=(s/3),
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(body)
head = shapes.Polygon(points = [self.x+(3*(s/6)), (self.y+(s/2)),
self.x+(3*(s/6)), self.y+8*(s/10),
self.x+s, self.y+(s/2),
self.x+(3*(s/6)), self.y+2*(s/10)],
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(head)
return g
class ArrowTwo(ArrowOne):
"""This widget draws an arrow (style two).
possible attributes:
'x', 'y', 'size', 'fillColor'
"""
def __init__(self):
self.x = 0
self.y = 0
self.size = 100
self.fillColor = colors.blue
def draw(self):
# general widget bits
s = float(self.size) # abbreviate as we will use this a lot
g = shapes.Group()
# arrow specific bits
body = shapes.Rect(x=self.x, y=(self.y+(s/2))-(s/24), width=9*(s/10), height=(s/12),
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(body)
head = shapes.Polygon(points = [self.x+(2.5*(s/3)), (self.y+(s/2)),
self.x+(4*(s/6)), self.y+4*(s/6),
self.x+s, self.y+(s/2),
self.x+(4*(s/6)), self.y+2*(s/6)],
fillColor = self.fillColor,
strokeColor = None,
strokeWidth=0)
g.add(head)
return g
def test():
"""This function produces a pdf with examples of all the signs and symbols from this file.
"""
labelFontSize = 10
D = shapes.Drawing(450,650)
cb = Crossbox()
cb.x = 20
cb.y = 530
D.add(cb)
D.add(shapes.String(cb.x+(cb.size/2),(cb.y-(1.2*labelFontSize)),
cb.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
tb = Tickbox()
tb.x = 170
tb.y = 530
D.add(tb)
D.add(shapes.String(tb.x+(tb.size/2),(tb.y-(1.2*labelFontSize)),
tb.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
yn = YesNo()
yn.x = 320
yn.y = 530
D.add(yn)
tempstring = yn.__class__.__name__ + '*'
D.add(shapes.String(yn.x+(tb.size/2),(yn.y-(1.2*labelFontSize)),
tempstring, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
D.add(shapes.String(130,6,
"(The 'YesNo' widget returns a tickbox if testvalue=1, and a crossbox if testvalue=0)", fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize*0.75))
ss = StopSign()
ss.x = 20
ss.y = 400
D.add(ss)
D.add(shapes.String(ss.x+(ss.size/2), ss.y-(1.2*labelFontSize),
ss.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ne = NoEntry()
ne.x = 170
ne.y = 400
D.add(ne)
D.add(shapes.String(ne.x+(ne.size/2),(ne.y-(1.2*labelFontSize)),
ne.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
sf = SmileyFace()
sf.x = 320
sf.y = 400
D.add(sf)
D.add(shapes.String(sf.x+(sf.size/2),(sf.y-(1.2*labelFontSize)),
sf.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ds = DangerSign()
ds.x = 20
ds.y = 270
D.add(ds)
D.add(shapes.String(ds.x+(ds.size/2),(ds.y-(1.2*labelFontSize)),
ds.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
na = NotAllowed()
na.x = 170
na.y = 270
D.add(na)
D.add(shapes.String(na.x+(na.size/2),(na.y-(1.2*labelFontSize)),
na.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
ns = NoSmoking()
ns.x = 320
ns.y = 270
D.add(ns)
D.add(shapes.String(ns.x+(ns.size/2),(ns.y-(1.2*labelFontSize)),
ns.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
a1 = ArrowOne()
a1.x = 20
a1.y = 140
D.add(a1)
D.add(shapes.String(a1.x+(a1.size/2),(a1.y-(1.2*labelFontSize)),
a1.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
a2 = ArrowTwo()
a2.x = 170
a2.y = 140
D.add(a2)
D.add(shapes.String(a2.x+(a2.size/2),(a2.y-(1.2*labelFontSize)),
a2.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
fd = FloppyDisk()
fd.x = 320
fd.y = 140
D.add(fd)
D.add(shapes.String(fd.x+(fd.size/2),(fd.y-(1.2*labelFontSize)),
fd.__class__.__name__, fillColor=colors.black, textAnchor='middle',
fontSize=labelFontSize))
renderPDF.drawToFile(D, 'signsandsymbols.pdf', 'signsandsymbols.py')
print 'wrote file: signsandsymbols.pdf'
if __name__=='__main__':
test() | {
"content_hash": "58153cc07c4e1515fc428770a6254bf0",
"timestamp": "",
"source": "github",
"line_count": 913,
"max_line_length": 181,
"avg_line_length": 33.71960569550931,
"alnum_prop": 0.49811602676541283,
"repo_name": "jwheare/digest",
"id": "11262f00f5649fb991be5cb410ead376df03e563",
"size": "31093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/reportlab/graphics/widgets/signsandsymbols.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "75169"
},
{
"name": "Python",
"bytes": "3874016"
}
],
"symlink_target": ""
} |
"""
Created on Wed Mar 10 14:58:01 2021
@author: Lucian
"""
import tellurium as te
import phrasedml
import libsedml
import sys
r = te.loada("""
model case_01
species S1=10, S2=5
S1 -> S2; S1*k
k = 0.3
end
""")
SBML = r.getSBML()
p_str = """
model0 = model "case_01.xml"
sim0 = simulate uniform(0, 10, 10)
task0 = run sim0 on model0
plot "UniformTimecourse" time vs S1, S2
report task0.time vs task0.S1
"""
te.saveToFile("case_01.xml", SBML)
# phrasedml.setReferencedSBML("case_01.xml", SBML)
sed = phrasedml.convertString(p_str)
if sed is None:
print(phrasedml.getLastError())
sys.exit()
sedml = libsedml.readSedMLFromString(sed)
sedml.setVersion(4)
plot = sedml.getOutput(0)
curve = plot.getCurve(0)
curve.setType(libsedml.SEDML_CURVETYPE_HORIZONTALBARSTACKED)
curve.setStyle("blue_with_purple")
curve = plot.getCurve(1)
curve.setType(libsedml.SEDML_CURVETYPE_HORIZONTALBARSTACKED)
curve.setStyle("blue_with_red")
style = sedml.createStyle()
style.setId("blue_with_purple")
line = style.createLineStyle()
line.setColor("#FF00FF")
line.setThickness(4)
fill = style.createFillStyle()
fill.setColor("#0000FF")
style = sedml.createStyle()
style.setId("blue_with_red")
line = style.createLineStyle()
line.setColor("#FF0000")
line.setThickness(4)
fill = style.createFillStyle()
fill.setColor("#aaaaFF")
sedstr = libsedml.writeSedMLToString(sedml)
print(sedstr)
te.saveToFile("case_01", SBML)
te.executeSEDML(sedstr)
import os
sedfile = os.path.basename(__file__)
sedfile = sedfile.replace(".py", ".sedml")
sedfile = sedfile.replace("create_sedml_", "")
te.saveToFile(sedfile, sedstr)
| {
"content_hash": "e08de9b36e8ac297b314392a65213f3a",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 60,
"avg_line_length": 21.467532467532468,
"alnum_prop": 0.7114337568058077,
"repo_name": "luciansmith/sedml-test-suite",
"id": "8fdbac5d55befe00fbcbaea5994a3e8f6cff6720",
"size": "1677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contributions/HARMONY 2021/create_sedml_test_hbar_stacked.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import getpass
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
from grr.lib.builders import signing
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
parser.add_argument("--sign", action="store_true", default=False,
help="Sign executables.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--templatedir", default="", help="Directory"
"containing template zip files to repack.")
parser_buildanddeploy.add_argument("--debug_build", action="store_true",
default=False, help="Create a debug client.")
args = parser.parse_args()
def GetBuilder(context):
"""Get instance of builder class based on flags."""
try:
if "Target:Darwin" in context:
builder_class = builders.DarwinClientBuilder
elif "Target:Windows" in context:
builder_class = builders.WindowsClientBuilder
elif "Target:LinuxDeb" in context:
builder_class = builders.LinuxClientBuilder
elif "Target:LinuxRpm" in context:
builder_class = builders.CentosClientBuilder
else:
parser.error("Bad build context: %s" % context)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_class(context=context)
def GetDeployer(context, signer=None):
"""Get the appropriate client deployer based on the selected flags."""
# TODO(user): The builder-deployer separation probably can be consolidated
# into something simpler under the vagrant build system.
if "Target:Darwin" in context:
deployer_class = build.DarwinClientDeployer
elif "Target:Windows" in context:
deployer_class = build.WindowsClientDeployer
elif "Target:LinuxDeb" in context:
deployer_class = build.LinuxClientDeployer
elif "Target:LinuxRpm" in context:
deployer_class = build.CentosClientDeployer
else:
parser.error("Bad build context: %s" % context)
return deployer_class(context=context, signer=signer)
def GetSigner(context):
if args.platform == "windows" and args.subparser_name in ["deploy", "repack",
"buildanddeploy"]:
print "Enter passphrase for code signing cert:"
passwd = getpass.getpass()
cert = config_lib.CONFIG.Get(
"ClientBuilder.windows_signing_cert", context=context)
key = config_lib.CONFIG.Get(
"ClientBuilder.windows_signing_key", context=context)
app_name = config_lib.CONFIG.Get(
"ClientBuilder.windows_signing_application_name", context=context)
return signing.WindowsCodeSigner(cert, key, passwd, app_name)
else:
parser.error("Signing only supported on windows for deploy, repack,"
" buildanddeploy")
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeployWindows(signer=None):
"""Run buildanddeploy for 32/64 dbg/prod."""
build_combos = [
{"arch": "amd64", "debug_build": True},
{"arch": "amd64", "debug_build": False},
{"arch": "i386", "debug_build": True},
{"arch": "i386", "debug_build": False}]
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
args.package_format = "exe"
context_orig = SetOSContextFromArgs([])
# Take a copy of the context list so we can reset back to clean state for each
# buildanddeploy run
context = list(context_orig)
for argset in build_combos:
for key, value in argset.items():
setattr(args, key, value)
context = SetArchContextFromArgs(context)
context = SetDebugContextFromArgs(context)
print "Building for: %s" % context
BuildAndDeploy(context, timestamp=timestamp, signer=signer)
context = list(context_orig)
def BuildAndDeploy(context, signer=None, timestamp=None):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = timestamp or time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.templatedir:
template_path = TemplateInputFilename(context)
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
context.append(newcontext)
try:
deployer = GetDeployer(context, signer=signer)
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format,
context=deployer.context):
continue
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def Deploy(context, signer=None):
"""Reconfigure a client template to match config.
Args:
context: config_lib context
signer: lib.builders.signing.CodeSigner object
"""
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
deployer = GetDeployer(context, signer=signer)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
def Repack(context, signer=None):
"""Turn a template into an installer.
Args:
context: config_lib context
signer: lib.builders.signing.CodeSigner object
"""
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
deployer = GetDeployer(context, signer=signer)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
def SetOSContextFromArgs(context):
"""Set OS context sections based on args."""
context.append("ClientBuilder Context")
if args.platform == "darwin":
context = ["Platform:Darwin", "Target:Darwin"] + context
elif args.platform == "windows":
context = ["Platform:Windows", "Target:Windows"] + context
elif args.platform == "linux":
context = ["Platform:Linux", "Target:Linux"] + context
if args.package_format == "deb":
context = ["Target:LinuxDeb"] + context
elif args.package_format == "rpm":
context = ["Target:LinuxRpm"] + context
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
return context
def SetArchContextFromArgs(context):
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
return context
def SetDebugContextFromArgs(context):
if args.subparser_name != "build" and args.debug_build:
context += ["DebugClientBuild Context"]
return context
def SetContextFromArgs(context):
context = SetArchContextFromArgs(context)
context = SetDebugContextFromArgs(context)
return SetOSContextFromArgs(context)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
context = flags.FLAGS.context
context = SetContextFromArgs(context)
signer = None
if args.sign:
signer = GetSigner(context)
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
Repack(context, signer=signer)
elif args.subparser_name == "deploy":
Deploy(context, signer=signer)
elif args.subparser_name == "buildanddeploy":
if args.platform == "windows":
# Handle windows differently because we do 32, 64, and debug builds all at
# once.
BuildAndDeployWindows(signer=signer)
else:
BuildAndDeploy(context, signer=signer)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "bccfc08d16d2108e6f699048a7342aa2",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 80,
"avg_line_length": 35.8997668997669,
"alnum_prop": 0.6631387572235569,
"repo_name": "pchaigno/grr",
"id": "01dd6cdeccd23c0ea55b50c2e14b8aa24f0655ca",
"size": "15423",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "client/client_build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "276081"
},
{
"name": "CMake",
"bytes": "3044"
},
{
"name": "CSS",
"bytes": "12677"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "71587"
},
{
"name": "JavaScript",
"bytes": "228300"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "197889"
},
{
"name": "Python",
"bytes": "5172085"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43112"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
class VarEnv:
def __init__(self):
pass
| {
"content_hash": "efacba9a8c113af34a3742fe1904af73",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 21,
"avg_line_length": 15,
"alnum_prop": 0.5777777777777777,
"repo_name": "jonathanmarvens/jeeves",
"id": "efc88792a414113b8830a04c61a338f14d8ebf3c",
"size": "45",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/VarEnv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24672"
},
{
"name": "HTML",
"bytes": "183277"
},
{
"name": "JavaScript",
"bytes": "81040"
},
{
"name": "Makefile",
"bytes": "9025"
},
{
"name": "Python",
"bytes": "673325"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
"""
test_motome_notemodel
----------------------------------
Tests for `Motome.Models.NoteModel`
"""
import glob
import os
import shutil
import unittest
from Motome.Models.NoteModel import NoteModel
from Motome.config import NOTE_EXTENSION, HISTORY_FOLDER, END_OF_TEXT
TESTS_DIR = os.path.join(os.getcwd(), 'tests')
TESTER_NOTES_PATH = os.path.join(os.getcwd(), 'tests', 'notes_for_testing')
ZEN_TEXT_FILE = os.path.join(os.getcwd(), 'tests', 'zen.txt')
class TestNoteModel(unittest.TestCase):
def setUp(self):
self.notepaths = set(glob.glob(TESTER_NOTES_PATH + '/*' + NOTE_EXTENSION))
self.db_notes = dict()
for filepath in self.notepaths:
filename = os.path.basename(filepath)
if filename not in self.db_notes.keys():
note = NoteModel(filepath)
self.db_notes[note.filename] = note
def test_add_remove(self):
filepath = os.path.join(TESTER_NOTES_PATH, 'zen' + NOTE_EXTENSION)
zen_note = NoteModel(filepath)
# file doesn't exist yet
self.assertFalse(os.path.exists(filepath))
self.assertEqual(zen_note.content, '')
self.assertEqual(zen_note.timestamp, -1)
self.assertEqual(zen_note.metadata, dict())
self.assertEqual(zen_note.history, list())
self.assertEqual(zen_note.wordset, '')
self.assertFalse(zen_note.recorded)
self.assertFalse(zen_note.pinned)
# add content
content = NoteModel.enc_read(ZEN_TEXT_FILE)
zen_note.content = content
self.assertTrue(os.path.exists(filepath))
self.assertNotEqual(zen_note.metadata, dict())
self.assertNotEqual(zen_note.timestamp, -1)
# remove note
zen_note.remove()
self.assertFalse(os.path.exists(filepath))
self.assertEqual(zen_note.content, '')
self.assertEqual(zen_note.timestamp, -1)
self.assertEqual(zen_note.metadata, dict())
self.assertEqual(zen_note.history, list())
self.assertEqual(zen_note.wordset, '')
self.assertFalse(zen_note.recorded)
self.assertFalse(zen_note.pinned)
def test_add_rename(self):
filepath = os.path.join(TESTER_NOTES_PATH, 'zen' + NOTE_EXTENSION)
zen_note = NoteModel(filepath)
# add content
content = NoteModel.enc_read(ZEN_TEXT_FILE)
zen_note.content = content
self.assertTrue(os.path.exists(filepath))
self.assertNotEqual(zen_note.metadata, dict())
self.assertNotEqual(zen_note.timestamp, -1)
# rename
filepath2 = os.path.join(TESTER_NOTES_PATH, 'zen2' + NOTE_EXTENSION)
zen_note.notename = 'zen2'
self.assertTrue(os.path.exists(filepath2))
self.assertEqual(zen_note.notename, 'zen2')
def test_add_record(self):
filepath = os.path.join(TESTER_NOTES_PATH, 'zen' + NOTE_EXTENSION)
zen_note = NoteModel(filepath)
# add content
content = NoteModel.enc_read(ZEN_TEXT_FILE)
zen_note.content = content
# record
self.assertFalse(os.path.exists(zen_note.historypath))
self.assertEqual(zen_note.load_old_note(0), (None, None))
zen_note.record(TESTER_NOTES_PATH)
self.assertTrue(os.path.exists(zen_note.historypath))
self.assertNotEqual(zen_note.history, list())
self.assertNotEqual(zen_note.load_old_note(0), (None, None))
def test_get_changed_content(self):
notename = self.db_notes.keys()[0]
note = self.db_notes[notename]
filepath = note.filepath
# Read data from file, not using NoteModel
raw_data = NoteModel.enc_read(filepath)
content, metadata = NoteModel.parse_note_content(raw_data)
timestamp = os.stat(filepath).st_mtime
self.assertEqual(note.content, content)
self.assertEqual(note.timestamp, timestamp)
# Make a change
new_content = content + '\nNew line\n'
self.assertNotEqual(note.content, new_content)
# Write changed data not from NoteModel
filedata = new_content + END_OF_TEXT + '\n'
for key, value in metadata.items():
filedata = filedata + '{0}:{1}\n'.format(key, value)
NoteModel.enc_write(filepath, filedata)
# Change happened?
self.assertNotEqual(note.timestamp, timestamp)
# And the content automatically updates when accessed
self.assertEqual(note.content, new_content)
# Reset file
filedata = content + END_OF_TEXT + '\n'
for key, value in metadata.items():
filedata = filedata + '{0}:{1}\n'.format(key, value)
NoteModel.enc_write(filepath, filedata)
def tearDown(self):
# Clear out any vestiges of the zen files
zenpaths = glob.glob(TESTER_NOTES_PATH + '/zen*' + NOTE_EXTENSION)
for zen in zenpaths:
os.remove(zen)
# Remove the archive folder
if os.path.exists(os.path.join(TESTER_NOTES_PATH, HISTORY_FOLDER)):
shutil.rmtree(os.path.join(TESTER_NOTES_PATH, HISTORY_FOLDER))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "f905f39db0f6cc44b95015b371d65e20",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 82,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6308411214953271,
"repo_name": "akehrer/Motome",
"id": "5b12b28ae4c463dd2e531fa0721f11477252ff53",
"size": "5183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_motome_notemodel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "4437"
},
{
"name": "Python",
"bytes": "323928"
}
],
"symlink_target": ""
} |
"""
Aho-Corasick string search algorithm.
Author : Wojciech Muła, [email protected]
WWW : http://0x80.pl
License : public domain
"""
import ahocorasick
A = ahocorasick.Automaton()
for index, word in enumerate("he her hers she".split()):
A.add_word(word, (index, word))
A.clear()
| {
"content_hash": "c5aab7aa52fc2ecb0b6e87c16ce67e67",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.6432926829268293,
"repo_name": "pombredanne/pyahocorasick",
"id": "1df22ce4153755f81cf0203ad5ec409976de64e0",
"size": "353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "regression/issue_19.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "980"
},
{
"name": "C",
"bytes": "160031"
},
{
"name": "C++",
"bytes": "2464"
},
{
"name": "Makefile",
"bytes": "1362"
},
{
"name": "Python",
"bytes": "111411"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
"""Diagnostics support for Kostal Plenticore."""
from __future__ import annotations
from typing import Any
from homeassistant.components.diagnostics import REDACTED, async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD
from homeassistant.core import HomeAssistant
from .const import DOMAIN
from .helper import Plenticore
TO_REDACT = {CONF_PASSWORD}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, dict[str, Any]]:
"""Return diagnostics for a config entry."""
data = {"config_entry": async_redact_data(config_entry.as_dict(), TO_REDACT)}
plenticore: Plenticore = hass.data[DOMAIN][config_entry.entry_id]
# Get information from Kostal Plenticore library
available_process_data = await plenticore.client.get_process_data()
available_settings_data = await plenticore.client.get_settings()
data["client"] = {
"version": str(await plenticore.client.get_version()),
"me": str(await plenticore.client.get_me()),
"available_process_data": available_process_data,
"available_settings_data": {
module_id: [str(setting) for setting in settings]
for module_id, settings in available_settings_data.items()
},
}
device_info = {**plenticore.device_info}
device_info["identifiers"] = REDACTED # contains serial number
data["device"] = device_info
return data
| {
"content_hash": "1d790f4086d4faaef1ead53dd34b9c0e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 35.54761904761905,
"alnum_prop": 0.7119892833221702,
"repo_name": "rohitranjan1991/home-assistant",
"id": "2e061d35528a4bc7dc20068bf434f168b2ad71ad",
"size": "1493",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/kostal_plenticore/diagnostics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
import astropy.units as u
import numpy as np
from astropy import log
from emcee import autocorr
from .extern.interruptible_pool import InterruptiblePool as Pool
from .extern.validator import validate_array
from .utils import sed_conversion, validate_data_table
__all__ = ["plot_chain", "plot_fit", "plot_data", "plot_blob", "plot_corner"]
marker_cycle = ["o", "s", "d", "p", "*"]
# from seaborn: sns.color_palette('deep',6)
color_cycle = [
(0.2980392156862745, 0.4470588235294118, 0.6901960784313725),
(0.3333333333333333, 0.6588235294117647, 0.40784313725490196),
(0.7686274509803922, 0.3058823529411765, 0.3215686274509804),
(0.5058823529411764, 0.4470588235294118, 0.6980392156862745),
(0.8, 0.7254901960784313, 0.4549019607843137),
(0.39215686274509803, 0.7098039215686275, 0.803921568627451),
]
def plot_chain(sampler, p=None, **kwargs):
"""Generate a diagnostic plot of the sampler chains.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler containing the chains to be plotted.
p : int (optional)
Index of the parameter to plot. If omitted, all chains are plotted.
last_step : bool (optional)
Whether to plot the last step of the chain or the complete chain
(default).
Returns
-------
figure : `matplotlib.figure.Figure`
Figure
"""
if p is None:
npars = sampler.get_chain().shape[-1]
for pp in range(npars):
_plot_chain_func(sampler, pp, **kwargs)
fig = None
else:
fig = _plot_chain_func(sampler, p, **kwargs)
return fig
def _latex_float(f, format=".3g"):
"""http://stackoverflow.com/a/13490601"""
float_str = "{{0:{0}}}".format(format).format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0}\times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
def round2(x, n):
y = round(x, n)
if n < 1:
y = str(int(y))
else:
# preserve trailing zeroes
y = ("{{0:.{0}f}}".format(n)).format(x)
return y
def _latex_value_error(val, elo, ehi=0, tol=0.25):
order = int(np.log10(abs(val)))
if order > 2 or order < -2:
val /= 10 ** order
elo /= 10 ** order
ehi /= 10 ** order
else:
order = 0
nlo = -int(np.floor(np.log10(elo)))
if elo * 10 ** nlo < 2:
nlo += 1
if ehi:
# elo = round(elo,nlo)
nhi = -int(np.floor(np.log10(ehi)))
if ehi * 10 ** nhi < 2:
nhi += 1
# ehi = round(ehi,nhi)
if np.abs(elo - ehi) / ((elo + ehi) / 2.0) > tol:
n = max(nlo, nhi)
string = "{0}^{{+{1}}}_{{-{2}}}".format(
*[
round2(x, nn)
for x, nn in zip([val, ehi, elo], [n, nhi, nlo])
]
)
else:
e = (elo + ehi) / 2.0
n = -int(np.floor(np.log10(e)))
if e * 10 ** n < 2:
n += 1
string = "{0} \\pm {1}".format(*[round2(x, n) for x in [val, e]])
else:
string = "{0} \\pm {1}".format(*[round2(x, nlo) for x in [val, elo]])
if order != 0:
string = "(" + string + r")\times10^{{{0}}}".format(order)
return "$" + string + "$"
def _plot_chain_func(sampler, p, last_step=False):
chain = sampler.get_chain()
label = sampler.labels[p]
import matplotlib.pyplot as plt
from scipy import stats
if len(chain.shape) > 2:
# transpose from (step, walker) to (walker, step)
traces = chain[:, :, p].T
if last_step:
# keep only last step
dist = traces[:, -1]
else:
# convert chain to flatchain
dist = traces.flatten()
else:
log.warning(
"we need the full chain to plot the traces, not a flatchain!"
)
return None
nwalkers = traces.shape[0]
nsteps = traces.shape[1]
f = plt.figure()
ax1 = f.add_subplot(221)
ax2 = f.add_subplot(122)
f.subplots_adjust(left=0.1, bottom=0.15, right=0.95, top=0.9)
# plot five percent of the traces darker
if nwalkers < 60:
thresh = 1 - 3.0 / nwalkers
else:
thresh = 0.95
red = np.arange(nwalkers) / float(nwalkers) >= thresh
ax1.set_rasterization_zorder(1)
for t in traces[~red]: # range(nwalkers):
ax1.plot(t, color=(0.1,) * 3, lw=1.0, alpha=0.25, zorder=0)
for t in traces[red]:
ax1.plot(t, color=color_cycle[0], lw=1.5, alpha=0.75, zorder=0)
ax1.set_xlabel("step number")
# [l.set_rotation(45) for l in ax1.get_yticklabels()]
ax1.set_ylabel(label)
ax1.yaxis.set_label_coords(-0.15, 0.5)
ax1.set_title("Walker traces")
nbins = min(max(25, int(len(dist) / 100.0)), 100)
xlabel = label
n, x, _ = ax2.hist(
dist,
nbins,
histtype="stepfilled",
color=color_cycle[0],
lw=0,
density=True,
)
kde = stats.kde.gaussian_kde(dist)
ax2.plot(x, kde(x), color="k", label="KDE")
quant = [16, 50, 84]
xquant = np.percentile(dist, quant)
quantiles = dict(zip(quant, xquant))
ax2.axvline(
quantiles[50],
ls="--",
color="k",
alpha=0.5,
lw=2,
label="50% quantile",
)
ax2.axvspan(
quantiles[16],
quantiles[84],
color=(0.5,) * 3,
alpha=0.25,
label="68% CI",
lw=0,
)
# ax2.legend()
for xticklabel in ax2.get_xticklabels():
xticklabel.set_rotation(45)
ax2.set_xlabel(xlabel)
ax2.xaxis.set_label_coords(0.5, -0.1)
ax2.set_title("posterior distribution")
ax2.set_ylim(top=n.max() * 1.05)
# Print distribution parameters on lower-left
try:
autocorr_message = "{0:.1f}".format(autocorr.integrated_time(chain)[p])
except autocorr.AutocorrError:
# Raised when chain is too short for meaningful auto-correlation
# estimation
autocorr_message = None
if last_step:
clen = "last ensemble"
else:
clen = "whole chain"
chain_props = "Walkers: {0} \nSteps in chain: {1} \n".format(
nwalkers, nsteps
)
if autocorr_message is not None:
chain_props += "Autocorrelation time: {0}\n".format(autocorr_message)
chain_props += "Mean acceptance fraction: {0:.3f}\n".format(
np.mean(sampler.acceptance_fraction)
) + "Distribution properties for the {clen}:\n \
$-$ median: ${median}$, std: ${std}$ \n \
$-$ median with uncertainties based on \n \
the 16th and 84th percentiles ($\\sim$1$\\sigma$):\n".format(
median=_latex_float(quantiles[50]),
std=_latex_float(np.std(dist)),
clen=clen,
)
info_line = (
" " * 10
+ label
+ " = "
+ _latex_value_error(
quantiles[50],
quantiles[50] - quantiles[16],
quantiles[84] - quantiles[50],
)
)
chain_props += info_line
if "log10(" in label or "log(" in label:
nlabel = label.split("(")[-1].split(")")[0]
ltype = label.split("(")[0]
if ltype == "log10":
new_dist = 10 ** dist
elif ltype == "log":
new_dist = np.exp(dist)
quant = [16, 50, 84]
quantiles = dict(zip(quant, np.percentile(new_dist, quant)))
label_template = "\n" + " " * 10 + "{{label:>{0}}}".format(len(label))
new_line = label_template.format(label=nlabel)
new_line += " = " + _latex_value_error(
quantiles[50],
quantiles[50] - quantiles[16],
quantiles[84] - quantiles[50],
)
chain_props += new_line
info_line += new_line
log.info("{0:-^50}\n".format(label) + info_line)
f.text(0.05, 0.45, chain_props, ha="left", va="top")
return f
def _process_blob(sampler, modelidx, last_step=False, energy=None):
"""
Process binary blob in sampler. If blob in position modelidx is:
- a Quantity array of len(blob[i])=len(data['energy']: use blob as model,
data['energy'] as modelx
- a tuple: use first item as modelx, second as model
- a Quantity scalar: return array of scalars
"""
# Allow process blob to be used by _calc_samples and _calc_ML by sending
# only blobs, not full sampler
try:
blobs = sampler.get_blobs()
blob0 = blobs[-1][0][modelidx]
energy = sampler.data["energy"]
except AttributeError:
blobs = [sampler]
blob0 = sampler[0][modelidx]
last_step = True
if isinstance(blob0, u.Quantity):
if blob0.size == energy.size:
# Energy array for blob is not provided, use data['energy']
modelx = energy
elif blob0.size == 1:
modelx = None
if last_step:
model = u.Quantity([m[modelidx] for m in blobs[-1]])
else:
model = []
for step in blobs:
for walkerblob in step:
model.append(walkerblob[modelidx])
model = u.Quantity(model)
elif np.isscalar(blob0):
modelx = None
if last_step:
model = u.Quantity([m[modelidx] for m in blobs[-1]])
else:
model = []
for step in blobs:
for walkerblob in step:
model.append(walkerblob[modelidx])
model = u.Quantity(model)
elif isinstance(blob0, list) or isinstance(blob0, tuple):
if (
len(blob0) == 2
and isinstance(blob0[0], u.Quantity)
and isinstance(blob0[1], u.Quantity)
):
# Energy array for model is item 0 in blob, model flux is item 1
modelx = blob0[0]
if last_step:
model = u.Quantity([m[modelidx][1] for m in blobs[-1]])
else:
model = []
for step in blobs:
for walkerblob in step:
model.append(walkerblob[modelidx][1])
model = u.Quantity(model)
else:
raise TypeError("Model {0} has wrong blob format".format(modelidx))
else:
raise TypeError("Model {0} has wrong blob format".format(modelidx))
return modelx, model
def _read_or_calc_samples(
sampler,
modelidx=0,
n_samples=100,
last_step=False,
e_range=None,
e_npoints=100,
threads=None,
):
"""Get samples from blob or compute them from chain and sampler.modelfn"""
if e_range is None:
# return the results saved in blobs
modelx, model = _process_blob(sampler, modelidx, last_step=last_step)
else:
# prepare bogus data for calculation
e_range = validate_array(
"e_range", u.Quantity(e_range), physical_type="energy"
)
e_unit = e_range.unit
energy = (
np.logspace(
np.log10(e_range[0].value),
np.log10(e_range[1].value),
e_npoints,
)
* e_unit
)
data = {
"energy": energy,
"flux": np.zeros(energy.shape) * sampler.data["flux"].unit,
}
# init pool and select parameters
chain = (
sampler.get_chain()[-1]
if last_step
else sampler.get_chain(flat=True)
)
pars = chain[np.random.randint(len(chain), size=n_samples)]
args = ((p, data) for p in pars)
blobs = []
pool = Pool(threads)
modelouts = pool.starmap(sampler.modelfn, args)
pool.close()
pool.terminate()
for modelout in modelouts:
if isinstance(modelout, np.ndarray):
blobs.append([modelout])
else:
blobs.append(modelout)
modelx, model = _process_blob(
blobs, modelidx=modelidx, energy=data["energy"]
)
return modelx, model
def _calc_ML(sampler, modelidx=0, e_range=None, e_npoints=100):
"""Get ML model from blob or compute them from chain and sampler.modelfn"""
ML, MLp, MLerr, ML_model = find_ML(sampler, modelidx)
if e_range is not None:
# prepare bogus data for calculation
e_range = validate_array(
"e_range", u.Quantity(e_range), physical_type="energy"
)
e_unit = e_range.unit
energy = (
np.logspace(
np.log10(e_range[0].value),
np.log10(e_range[1].value),
e_npoints,
)
* e_unit
)
data = {
"energy": energy,
"flux": np.zeros(energy.shape) * sampler.data["flux"].unit,
}
modelout = sampler.modelfn(MLp, data)
if isinstance(modelout, np.ndarray):
blob = modelout
else:
blob = modelout[modelidx]
if isinstance(blob, u.Quantity):
modelx = data["energy"].copy()
model_ML = blob.copy()
elif len(blob) == 2:
modelx = blob[0].copy()
model_ML = blob[1].copy()
else:
raise TypeError("Model {0} has wrong blob format".format(modelidx))
ML_model = (modelx, model_ML)
return ML, MLp, MLerr, ML_model
def _calc_CI(
sampler,
modelidx=0,
confs=[3, 1],
last_step=False,
e_range=None,
e_npoints=100,
threads=None,
):
"""Calculate confidence interval."""
from scipy import stats
# If we are computing the samples for the confidence intervals, we need at
# least one sample to constrain the highest confidence band
# 1 sigma -> 6 samples
# 2 sigma -> 43 samples
# 3 sigma -> 740 samples
# 4 sigma -> 31574 samples
# 5 sigma -> 3488555 samples
# We limit it to 1000 samples and warn that it might not be enough
if e_range is not None:
maxconf = np.max(confs)
minsamples = min(100, int(1 / stats.norm.cdf(-maxconf) + 1))
if minsamples > 1000:
log.warning(
"In order to sample the confidence band for {0} sigma,"
" {1} new samples need to be computed, but we are limiting"
" it to 1000 samples, so the confidence band might not be"
" well constrained."
" Consider reducing the maximum"
" confidence significance or using the samples stored in"
" the sampler by setting e_range"
" to None".format(maxconf, minsamples)
)
minsamples = 1000
else:
minsamples = None
modelx, model = _read_or_calc_samples(
sampler,
modelidx,
last_step=last_step,
e_range=e_range,
e_npoints=e_npoints,
n_samples=minsamples,
threads=threads,
)
nwalkers = len(model) - 1
CI = []
for conf in confs:
fmin = stats.norm.cdf(-conf)
fmax = stats.norm.cdf(conf)
ymin, ymax = [], []
for fr, y in ((fmin, ymin), (fmax, ymax)):
nf = int((fr * nwalkers))
for i in range(len(modelx)):
ysort = np.sort(model[:, i])
y.append(ysort[nf])
# create an array from lists ymin and ymax preserving units
CI.append((u.Quantity(ymin), u.Quantity(ymax)))
return modelx, CI
def _plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed):
"""compute and plot ML model"""
ML, MLp, MLerr, ML_model = _calc_ML(
sampler, modelidx, e_range=e_range, e_npoints=e_npoints
)
f_unit, sedf = sed_conversion(ML_model[0], ML_model[1].unit, sed)
ax.loglog(
ML_model[0].to(e_unit).value,
(ML_model[1] * sedf).to(f_unit).value,
color="k",
lw=2,
alpha=0.8,
)
def plot_CI(
ax,
sampler,
modelidx=0,
sed=True,
confs=[3, 1, 0.5],
e_unit=u.eV,
label=None,
e_range=None,
e_npoints=100,
threads=None,
last_step=False,
):
"""Plot confidence interval.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is `[3,1,0.5]`
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
confs.sort(reverse=True)
modelx, CI = _calc_CI(
sampler,
modelidx=modelidx,
confs=confs,
e_range=e_range,
e_npoints=e_npoints,
last_step=last_step,
threads=threads,
)
# pick first confidence interval curve for units
f_unit, sedf = sed_conversion(modelx, CI[0][0].unit, sed)
for (ymin, ymax), conf in zip(CI, confs):
color = np.log(conf) / np.log(20) + 0.4
ax.fill_between(
modelx.to(e_unit).value,
(ymax * sedf).to(f_unit).value,
(ymin * sedf).to(f_unit).value,
lw=0.001,
color=(color,) * 3,
alpha=0.6,
zorder=-10,
)
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel(
"{0} [{1}]".format(label, f_unit.to_string("latex_inline"))
)
def plot_samples(
ax,
sampler,
modelidx=0,
sed=True,
n_samples=100,
e_unit=u.eV,
e_range=None,
e_npoints=100,
threads=None,
label=None,
last_step=False,
):
"""Plot a number of samples from the sampler chain.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
n_samples : int, optional
Number of samples to plot. Default is 100.
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_range : list of `~astropy.units.Quantity`, length 2, optional
Limits in energy for the computation of the model samples and ML model.
Note that setting this parameter will mean that the samples for the
model are recomputed and depending on the model speed might be quite
slow.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
modelx, model = _read_or_calc_samples(
sampler,
modelidx,
last_step=last_step,
e_range=e_range,
e_npoints=e_npoints,
threads=threads,
)
# pick first model sample for units
f_unit, sedf = sed_conversion(modelx, model[0].unit, sed)
sample_alpha = min(5.0 / n_samples, 0.5)
for my in model[np.random.randint(len(model), size=n_samples)]:
ax.loglog(
modelx.to(e_unit).value,
(my * sedf).to(f_unit).value,
color=(0.1,) * 3,
alpha=sample_alpha,
lw=1.0,
)
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel(
"{0} [{1}]".format(label, f_unit.to_string("latex_inline"))
)
def find_ML(sampler, modelidx):
"""
Find Maximum Likelihood parameters as those in the chain with a highest log
probability.
"""
lnprobability = sampler.get_log_prob()
index = np.unravel_index(np.argmax(lnprobability), lnprobability.shape)
MLp = sampler.get_chain()[index]
blobs = sampler.get_blobs()
if modelidx is not None and blobs is not None:
blob = blobs[index][modelidx]
if isinstance(blob, u.Quantity):
modelx = sampler.data["energy"].copy()
model_ML = blob.copy()
elif len(blob) == 2:
modelx = blob[0].copy()
model_ML = blob[1].copy()
else:
raise TypeError("Model {0} has wrong blob format".format(modelidx))
elif modelidx is not None and hasattr(sampler, "modelfn"):
blob = _process_blob(
[sampler.modelfn(MLp, sampler.data)],
modelidx,
energy=sampler.data["energy"],
)
modelx, model_ML = blob[0], blob[1][0]
else:
modelx, model_ML = None, None
MLerr = []
for dist in sampler.get_chain(flat=True).T:
hilo = np.percentile(dist, [16.0, 84.0])
MLerr.append((hilo[1] - hilo[0]) / 2.0)
ML = lnprobability[index]
return ML, MLp, MLerr, (modelx, model_ML)
def plot_blob(
sampler, blobidx=0, label=None, last_step=False, figure=None, **kwargs
):
"""
Plot a metadata blob as a fit to spectral data or value distribution
Additional ``kwargs`` are passed to `plot_fit`.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
blobidx : int, optional
Metadata blob index to plot.
label : str, optional
Label for the value distribution. Labels for the fit plot can be passed
as ``xlabel`` and ``ylabel`` and will be passed to `plot_fit`.
Returns
-------
figure : `matplotlib.pyplot.Figure`
`matplotlib` figure instance containing the plot.
"""
modelx, model = _process_blob(sampler, blobidx, last_step)
if label is None:
label = "Model output {0}".format(blobidx)
if modelx is None:
# Blob is scalar, plot distribution
f = plot_distribution(model, label, figure=figure)
else:
f = plot_fit(
sampler,
modelidx=blobidx,
last_step=last_step,
label=label,
figure=figure,
**kwargs
)
return f
def plot_fit(
sampler,
modelidx=0,
label=None,
sed=True,
last_step=False,
n_samples=100,
confs=None,
ML_info=False,
figure=None,
plotdata=None,
plotresiduals=None,
e_unit=None,
e_range=None,
e_npoints=100,
threads=None,
xlabel=None,
ylabel=None,
ulim_opts={},
errorbar_opts={},
):
"""
Plot data with fit confidence regions.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
modelidx : int, optional
Model index to plot.
label : str, optional
Label for the title of the plot.
sed : bool, optional
Whether to plot SED or differential spectrum.
last_step : bool, optional
Whether to use only the samples of the last step in the run when
showing either the model samples or the confidence intervals.
n_samples : int, optional
If not ``None``, number of sample models to plot. If ``None``,
confidence bands will be plotted instead of samples. Default is 100.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is to plot sample models instead of
confidence bands.
ML_info : bool, optional
Whether to plot information about the maximum likelihood parameters and
the standard deviation of their distributions. Default is True.
figure : `matplotlib.figure.Figure`, optional
`matplotlib` figure to plot on. If omitted a new one will be generated.
plotdata : bool, optional
Wheter to plot data on top of model confidence intervals. Default is
True if the physical types of the data and the model match.
plotresiduals : bool, optional
Wheter to plot the residuals with respect to the maximum likelihood
model. Default is True if ``plotdata`` is True and either ``confs`` or
``n_samples`` are set.
e_unit : `~astropy.units.Unit`, optional
Units for the energy axis of the plot. The default is to use the units
of the energy array of the observed data.
e_range : list of `~astropy.units.Quantity`, length 2, optional
Limits in energy for the computation of the model samples and ML model.
Note that setting this parameter will mean that the samples for the
model are recomputed and depending on the model speed might be quite
slow.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
xlabel : str, optional
Label for the ``x`` axis of the plot.
ylabel : str, optional
Label for the ``y`` axis of the plot.
ulim_opts : dict
Option for upper-limit plotting. Available options are capsize (arrow
width) and height_fraction (arrow length in fraction of flux value).
errorbar_opts : dict
Addtional options to pass to `matplotlib.plt.errorbar` for plotting the
spectral flux points.
"""
import matplotlib.pyplot as plt
ML, MLp, MLerr, model_ML = find_ML(sampler, modelidx)
infostr = "Maximum log probability: {0:.3g}\n".format(ML)
infostr += "Maximum Likelihood values:\n"
maxlen = np.max([len(ilabel) for ilabel in sampler.labels])
vartemplate = "{{2:>{0}}}: {{0:>8.3g}} +/- {{1:<8.3g}}\n".format(maxlen)
for p, v, ilabel in zip(MLp, MLerr, sampler.labels):
infostr += vartemplate.format(p, v, ilabel)
# log.info(infostr)
data = sampler.data
if e_range is None and not hasattr(sampler, "blobs"):
e_range = data["energy"][[0, -1]] * np.array((1.0 / 3.0, 3.0))
if plotdata is None and len(model_ML[0]) == len(data["energy"]):
model_unit, _ = sed_conversion(model_ML[0], model_ML[1].unit, sed)
data_unit, _ = sed_conversion(data["energy"], data["flux"].unit, sed)
plotdata = model_unit.is_equivalent(data_unit)
elif plotdata is None:
plotdata = False
if plotresiduals is None and plotdata and (confs is not None or n_samples):
plotresiduals = True
if confs is None and not n_samples and plotdata and not plotresiduals:
# We actually only want to plot the data, so let's go there
return plot_data(
sampler.data,
xlabel=xlabel,
ylabel=ylabel,
sed=sed,
figure=figure,
e_unit=e_unit,
ulim_opts=ulim_opts,
errorbar_opts=errorbar_opts,
)
if figure is None:
f = plt.figure()
else:
f = figure
if plotdata and plotresiduals:
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax2 = plt.subplot2grid((4, 1), (3, 0), sharex=ax1)
for subp in [ax1, ax2]:
f.add_subplot(subp)
else:
ax1 = f.add_subplot(111)
if e_unit is None:
e_unit = data["energy"].unit
if confs is not None:
plot_CI(
ax1,
sampler,
modelidx,
sed=sed,
confs=confs,
e_unit=e_unit,
label=label,
e_range=e_range,
e_npoints=e_npoints,
last_step=last_step,
threads=threads,
)
elif n_samples:
plot_samples(
ax1,
sampler,
modelidx,
sed=sed,
n_samples=n_samples,
e_unit=e_unit,
label=label,
e_range=e_range,
e_npoints=e_npoints,
last_step=last_step,
threads=threads,
)
else:
# plot only ML model
_plot_MLmodel(ax1, sampler, modelidx, e_range, e_npoints, e_unit, sed)
xlaxis = ax1
if plotdata:
_plot_data_to_ax(
data,
ax1,
e_unit=e_unit,
sed=sed,
ylabel=ylabel,
ulim_opts=ulim_opts,
errorbar_opts=errorbar_opts,
)
if plotresiduals:
_plot_residuals_to_ax(
data,
model_ML,
ax2,
e_unit=e_unit,
sed=sed,
errorbar_opts=errorbar_opts,
)
xlaxis = ax2
for tl in ax1.get_xticklabels():
tl.set_visible(False)
xmin = 10 ** np.floor(
np.log10(
np.min(data["energy"] - data["energy_error_lo"])
.to(e_unit)
.value
)
)
xmax = 10 ** np.ceil(
np.log10(
np.max(data["energy"] + data["energy_error_hi"])
.to(e_unit)
.value
)
)
ax1.set_xlim(xmin, xmax)
else:
ax1.set_xscale("log")
ax1.set_yscale("log")
if sed:
ndecades = 10
else:
ndecades = 20
# restrict y axis to ndecades to avoid autoscaling deep exponentials
xmin, xmax, ymin, ymax = ax1.axis()
ymin = max(ymin, ymax / 10 ** ndecades)
ax1.set_ylim(bottom=ymin)
# scale x axis to largest model_ML x point within ndecades decades of
# maximum
f_unit, sedf = sed_conversion(model_ML[0], model_ML[1].unit, sed)
hi = np.where((model_ML[1] * sedf).to(f_unit).value > ymin)
xmax = np.max(model_ML[0][hi])
ax1.set_xlim(right=10 ** np.ceil(np.log10(xmax.to(e_unit).value)))
if e_range is not None:
# ensure that xmin/xmax contains e_range
xmin, xmax, ymin, ymax = ax1.axis()
xmin = min(xmin, e_range[0].to(e_unit).value)
xmax = max(xmax, e_range[1].to(e_unit).value)
ax1.set_xlim(xmin, xmax)
if ML_info and (confs is not None or n_samples):
ax1.text(
0.05,
0.05,
infostr,
ha="left",
va="bottom",
transform=ax1.transAxes,
family="monospace",
)
if label is not None:
ax1.set_title(label)
if xlabel is None:
xlaxis.set_xlabel(
"Energy [{0}]".format(e_unit.to_string("latex_inline"))
)
else:
xlaxis.set_xlabel(xlabel)
f.subplots_adjust(hspace=0)
return f
def _plot_ulims(
ax, x, y, xerr, color, capsize=5, height_fraction=0.25, elinewidth=2
):
"""
Plot upper limits as arrows with cap at value of upper limit.
uplim behaviour has been fixed in matplotlib 1.4
"""
ax.errorbar(
x, y, xerr=xerr, ls="", color=color, elinewidth=elinewidth, capsize=0
)
from distutils.version import LooseVersion
import matplotlib
mpl_version = LooseVersion(matplotlib.__version__)
if mpl_version >= LooseVersion("1.4.0"):
ax.errorbar(
x,
y,
yerr=height_fraction * y,
ls="",
uplims=True,
color=color,
elinewidth=elinewidth,
capsize=capsize,
zorder=10,
)
else:
ax.errorbar(
x,
(1 - height_fraction) * y,
yerr=height_fraction * y,
ls="",
lolims=True,
color=color,
elinewidth=elinewidth,
capsize=capsize,
zorder=10,
)
def _plot_data_to_ax(
data_all,
ax1,
e_unit=None,
sed=True,
ylabel=None,
ulim_opts={},
errorbar_opts={},
):
"""Plots data errorbars and upper limits onto ax.
X label is left to plot_data and plot_fit because they depend on whether
residuals are plotted.
"""
if e_unit is None:
e_unit = data_all["energy"].unit
f_unit, sedf = sed_conversion(
data_all["energy"], data_all["flux"].unit, sed
)
if "group" not in data_all.keys():
data_all["group"] = np.zeros(len(data_all))
groups = np.unique(data_all["group"])
for g in groups:
data = data_all[np.where(data_all["group"] == g)]
_, sedfg = sed_conversion(data["energy"], data["flux"].unit, sed)
# wrap around color and marker cycles
color = color_cycle[int(g) % len(color_cycle)]
marker = marker_cycle[int(g) % len(marker_cycle)]
ul = data["ul"]
notul = ~ul
# Hack to show y errors compatible with 0 in loglog plot
yerr_lo = data["flux_error_lo"][notul]
y = data["flux"][notul].to(yerr_lo.unit)
bad_err = np.where((y - yerr_lo) <= 0.0)
yerr_lo[bad_err] = y[bad_err] * (1.0 - 1e-7)
yerr = u.Quantity((yerr_lo, data["flux_error_hi"][notul]))
xerr = u.Quantity((data["energy_error_lo"], data["energy_error_hi"]))
opts = dict(
zorder=100,
marker=marker,
ls="",
elinewidth=2,
capsize=0,
mec=color,
mew=0.1,
ms=5,
color=color,
)
opts.update(**errorbar_opts)
ax1.errorbar(
data["energy"][notul].to(e_unit).value,
(data["flux"][notul] * sedfg[notul]).to(f_unit).value,
yerr=(yerr * sedfg[notul]).to(f_unit).value,
xerr=xerr[:, notul].to(e_unit).value,
**opts
)
if np.any(ul):
if "elinewidth" in errorbar_opts:
ulim_opts["elinewidth"] = errorbar_opts["elinewidth"]
_plot_ulims(
ax1,
data["energy"][ul].to(e_unit).value,
(data["flux"][ul] * sedfg[ul]).to(f_unit).value,
(xerr[:, ul]).to(e_unit).value,
color,
**ulim_opts
)
ax1.set_xscale("log")
ax1.set_yscale("log")
xmin = 10 ** np.floor(
np.log10(
np.min(data["energy"] - data["energy_error_lo"]).to(e_unit).value
)
)
xmax = 10 ** np.ceil(
np.log10(
np.max(data["energy"] + data["energy_error_hi"]).to(e_unit).value
)
)
ax1.set_xlim(xmin, xmax)
# avoid autoscaling to errorbars to 0
notul = ~data_all["ul"]
if np.any(data_all["flux_error_lo"][notul] >= data_all["flux"][notul]):
elo = (data_all["flux"][notul] * sedf[notul]).to(f_unit).value - (
data_all["flux_error_lo"][notul] * sedf[notul]
).to(f_unit).value
gooderr = np.where(
data_all["flux_error_lo"][notul] < data_all["flux"][notul]
)
ymin = 10 ** np.floor(np.log10(np.min(elo[gooderr])))
ax1.set_ylim(bottom=ymin)
if ylabel is None:
if sed:
ax1.set_ylabel(
r"$E^2\mathrm{{d}}N/\mathrm{{d}}E$"
" [{0}]".format(u.Unit(f_unit).to_string("latex_inline"))
)
else:
ax1.set_ylabel(
r"$\mathrm{{d}}N/\mathrm{{d}}E$"
" [{0}]".format(u.Unit(f_unit).to_string("latex_inline"))
)
else:
ax1.set_ylabel(ylabel)
def _plot_residuals_to_ax(
data_all, model_ML, ax, e_unit=u.eV, sed=True, errorbar_opts={}
):
"""Function to compute and plot residuals in units of the uncertainty"""
if "group" not in data_all.keys():
data_all["group"] = np.zeros(len(data_all))
groups = np.unique(data_all["group"])
MLf_unit, MLsedf = sed_conversion(model_ML[0], model_ML[1].unit, sed)
MLene = model_ML[0].to(e_unit)
MLflux = (model_ML[1] * MLsedf).to(MLf_unit)
ax.axhline(0, color="k", lw=1, ls="--")
interp = False
if data_all["energy"].size != MLene.size or not np.allclose(
data_all["energy"].value, MLene.value
):
interp = True
from scipy.interpolate import interp1d
modelfunc = interp1d(MLene.value, MLflux.value, bounds_error=False)
for g in groups:
groupidx = np.where(data_all["group"] == g)
data = data_all[groupidx]
notul = ~data["ul"]
df_unit, dsedf = sed_conversion(data["energy"], data["flux"].unit, sed)
ene = data["energy"].to(e_unit)
xerr = u.Quantity((data["energy_error_lo"], data["energy_error_hi"]))
flux = (data["flux"] * dsedf).to(df_unit)
dflux = (data["flux_error_lo"] + data["flux_error_hi"]) / 2.0
dflux = (dflux * dsedf).to(df_unit)[notul]
if interp:
difference = flux[notul] - modelfunc(ene[notul]) * flux.unit
else:
difference = flux[notul] - MLflux[groupidx][notul]
# wrap around color and marker cycles
color = color_cycle[int(g) % len(color_cycle)]
marker = marker_cycle[int(g) % len(marker_cycle)]
opts = dict(
zorder=100,
marker=marker,
ls="",
elinewidth=2,
capsize=0,
mec=color,
mew=0.1,
ms=6,
color=color,
)
opts.update(errorbar_opts)
ax.errorbar(
ene[notul].value,
(difference / dflux).decompose().value,
yerr=(dflux / dflux).decompose().value,
xerr=xerr[:, notul].to(e_unit).value,
**opts
)
from matplotlib.ticker import MaxNLocator
ax.yaxis.set_major_locator(
MaxNLocator(5, integer="True", prune="upper", symmetric=True)
)
ax.set_ylabel(r"$\Delta\sigma$")
ax.set_xscale("log")
def plot_data(
input_data,
xlabel=None,
ylabel=None,
sed=True,
figure=None,
e_unit=None,
ulim_opts={},
errorbar_opts={},
):
"""
Plot spectral data.
Parameters
----------
input_data : `emcee.EnsembleSampler`, `astropy.table.Table`, or `dict`
Spectral data to plot. Can be given as a data table, a dict generated
with `validate_data_table` or a `emcee.EnsembleSampler` with a data
property.
xlabel : str, optional
Label for the ``x`` axis of the plot.
ylabel : str, optional
Label for the ``y`` axis of the plot.
sed : bool, optional
Whether to plot SED or differential spectrum.
figure : `matplotlib.figure.Figure`, optional
`matplotlib` figure to plot on. If omitted a new one will be generated.
e_unit : `astropy.unit.Unit`, optional
Units for energy axis. Defaults to those of the data.
ulim_opts : dict
Options for upper-limit plotting. Available options are capsize (arrow
width) and height_fraction (arrow length in fraction of flux value).
errorbar_opts : dict
Addtional options to pass to `matplotlib.plt.errorbar` for plotting the
spectral flux points.
"""
import matplotlib.pyplot as plt
try:
data = validate_data_table(input_data)
except TypeError as exc:
if hasattr(input_data, "data"):
data = input_data.data
elif isinstance(input_data, dict) and "energy" in input_data.keys():
data = input_data
else:
log.warning(
"input_data format unknown, no plotting data! "
"Data loading exception: {}".format(exc)
)
raise
if figure is None:
f = plt.figure()
else:
f = figure
if len(f.axes) > 0:
ax1 = f.axes[0]
else:
ax1 = f.add_subplot(111)
# try to get units from previous plot in figure
try:
old_e_unit = u.Unit(ax1.get_xlabel().split("[")[-1].split("]")[0])
except ValueError:
old_e_unit = u.Unit("")
if e_unit is None and old_e_unit.physical_type == "energy":
e_unit = old_e_unit
elif e_unit is None:
e_unit = data["energy"].unit
_plot_data_to_ax(
data,
ax1,
e_unit=e_unit,
sed=sed,
ylabel=ylabel,
ulim_opts=ulim_opts,
errorbar_opts=errorbar_opts,
)
if xlabel is not None:
ax1.set_xlabel(xlabel)
elif xlabel is None and ax1.get_xlabel() == "":
ax1.set_xlabel(
r"$\mathrm{Energy}$"
+ " [{0}]".format(e_unit.to_string("latex_inline"))
)
ax1.autoscale()
return f
def plot_distribution(samples, label, figure=None):
"""Plot a distribution and print statistics about it"""
import matplotlib.pyplot as plt
from scipy import stats
quant = [16, 50, 84]
quantiles = dict(zip(quant, np.percentile(samples, quant)))
std = np.std(samples)
if isinstance(samples[0], u.Quantity):
unit = samples[0].unit
std = std.value
quantiles = {k: v.value for k, v in quantiles.items()}
else:
unit = ""
dist_props = "{label} distribution properties:\n \
$-$ median: ${median}$ {unit}, std: ${std}$ {unit}\n \
$-$ Median with uncertainties based on \n \
the 16th and 84th percentiles ($\\sim$1$\\sigma$):\n\
{label} = {value_error} {unit}".format(
label=label,
median=_latex_float(quantiles[50]),
std=_latex_float(std),
value_error=_latex_value_error(
quantiles[50],
quantiles[50] - quantiles[16],
quantiles[84] - quantiles[50],
),
unit=unit,
)
if figure is None:
f = plt.figure()
else:
f = figure
ax = f.add_subplot(111)
f.subplots_adjust(bottom=0.40, top=0.93, left=0.06, right=0.95)
f.text(0.2, 0.27, dist_props, ha="left", va="top")
histnbins = min(max(25, int(len(samples) / 100.0)), 100)
xlabel = "" if label is None else label
if isinstance(samples, u.Quantity):
samples_nounit = samples.value
else:
samples_nounit = samples
n, x, _ = ax.hist(
samples_nounit,
histnbins,
histtype="stepfilled",
color=color_cycle[0],
lw=0,
density=True,
)
kde = stats.kde.gaussian_kde(samples_nounit)
ax.plot(x, kde(x), color="k", label="KDE")
ax.axvline(
quantiles[50],
ls="--",
color="k",
alpha=0.5,
lw=2,
label="50% quantile",
)
ax.axvspan(
quantiles[16],
quantiles[84],
color=(0.5,) * 3,
alpha=0.25,
label="68% CI",
lw=0,
)
for xticklabel in ax.get_xticklabels():
xticklabel.set_rotation(45)
if unit != "":
xlabel += " [{0}]".format(unit)
ax.set_xlabel(xlabel)
ax.set_title("Posterior distribution of {0}".format(label))
ax.set_ylim(top=n.max() * 1.05)
return f
def plot_corner(sampler, show_ML=True, **kwargs):
"""
A plot that summarizes the parameter samples by showing them as individual
histograms and 2D histograms against each other. The maximum likelihood
parameter vector is indicated by a cross.
This function is a thin wrapper around `corner.corner`, found at
https://github.com/corner/corner.py.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler with a stored chain.
show_ML : bool, optional
Whether to show the maximum likelihood parameter vector as a cross on
the 2D histograms.
"""
import matplotlib.pyplot as plt
oldlw = plt.rcParams["lines.linewidth"]
plt.rcParams["lines.linewidth"] = 0.7
try:
from corner import corner
if show_ML:
_, MLp, _, _ = find_ML(sampler, 0)
else:
MLp = None
corner_opts = {
"labels": sampler.labels,
"truths": MLp,
"quantiles": [0.16, 0.5, 0.84],
"verbose": False,
"truth_color": color_cycle[0],
}
corner_opts.update(kwargs)
f = corner(sampler.get_chain(flat=True), **corner_opts)
except ImportError:
log.warning(
"The corner package is not installed;" " corner plot not available"
)
f = None
plt.rcParams["lines.linewidth"] = oldlw
return f
| {
"content_hash": "3a6e9b01a5d22ea2e30420ca243f3455",
"timestamp": "",
"source": "github",
"line_count": 1500,
"max_line_length": 79,
"avg_line_length": 29.917333333333332,
"alnum_prop": 0.553614404135841,
"repo_name": "zblz/naima",
"id": "6bd53158182bb5d11d694417dace8a75b178ab5a",
"size": "44940",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/naima/plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "254295"
}
],
"symlink_target": ""
} |
import pickle
import logging
import numpy as np
import mxnet as mx
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Define the network
data = mx.sym.Variable('data')
fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=2)
sigmoid1 = mx.sym.Activation(data=fc1, name='sigmoid1', act_type='sigmoid')
fc2 = mx.sym.FullyConnected(data=sigmoid1, name='fc2', num_hidden=2)
mlp = mx.sym.SoftmaxOutput(data=fc2, name='softmax')
shape = {'data': (2,)}
mlp_dot = mx.viz.plot_network(symbol=mlp, shape=shape)
mlp_dot.render('simple_mlp.gv', view=True)
# Load data & train the model
with open('../data.pkl', 'rb') as f:
samples, labels = pickle.load(f)
logging.getLogger().setLevel(logging.DEBUG)
batch_size = len(labels)
samples = np.array(samples)
labels = np.array(labels)
train_iter = mx.io.NDArrayIter(samples, labels, batch_size)
model = mx.model.FeedForward.create(
symbol=mlp,
X=train_iter,
num_epoch=1000,
learning_rate=0.1,
momentum=0.99)
'''
# Alternative interface to train the model
model = mx.model.FeedForward(
symbol=mlp,
num_epoch=1000,
learning_rate=0.1,
momentum=0.99)
model.fit(X=train_iter)
'''
print(model.predict(mx.nd.array([[0.5, 0.5]])))
# Visualize result
X = np.arange(0, 1.05, 0.05)
Y = np.arange(0, 1.05, 0.05)
X, Y = np.meshgrid(X, Y)
grids = mx.nd.array([[X[i][j], Y[i][j]] for i in range(X.shape[0]) for j in range(X.shape[1])])
grid_probs = model.predict(grids)[:, 1].reshape(X.shape)
fig = plt.figure('Sample Surface')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, grid_probs, alpha=0.15, color='k', rstride=2, cstride=2, lw=0.5)
samples0 = samples[labels==0]
samples0_probs = model.predict(samples0)[:, 1]
samples1 = samples[labels==1]
samples1_probs = model.predict(samples1)[:, 1]
ax.scatter(samples0[:, 0], samples0[:, 1], samples0_probs, c='b', marker='^', s=50)
ax.scatter(samples1[:, 0], samples1[:, 1], samples1_probs, c='r', marker='o', s=50)
plt.show()
| {
"content_hash": "216ac95466db9cb7b14ca2a8c44ce6b3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 95,
"avg_line_length": 28.3,
"alnum_prop": 0.6824835941443715,
"repo_name": "frombeijingwithlove/dlcv_for_beginners",
"id": "dded1ce7665e0691b0655ce99af2f7548d7075b2",
"size": "1981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chap7/mxnet/simple_mlp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "134476"
},
{
"name": "Shell",
"bytes": "1053"
}
],
"symlink_target": ""
} |
import os
import subprocess
from textwrap import dedent
from mock import patch, Mock
import pytest
from pretend import stub
import pip
from pip.exceptions import (RequirementsFileParseError)
from pip.download import PipSession
from pip.index import PackageFinder
from pip.req.req_install import InstallRequirement
from pip.req.req_file import (parse_requirements, process_line, join_lines,
ignore_comments, break_args_options)
@pytest.fixture
def session():
return PipSession()
@pytest.fixture
def finder(session):
return PackageFinder([], [], session=session)
@pytest.fixture
def options(session):
return stub(
isolated_mode=False, default_vcs=None, index_url='default_url',
skip_requirements_regex=False,
format_control=pip.index.FormatControl(set(), set()))
class TestIgnoreComments(object):
"""tests for `ignore_comment`"""
def test_strip_empty_line(self):
lines = ['req1', '', 'req2']
result = ignore_comments(lines)
assert list(result) == ['req1', 'req2']
def test_strip_comment(self):
lines = ['req1', '# comment', 'req2']
result = ignore_comments(lines)
assert list(result) == ['req1', 'req2']
class TestJoinLines(object):
"""tests for `join_lines`"""
def test_join_lines(self):
lines = dedent('''\
line 1
line 2:1 \\
line 2:2
line 3:1 \\
line 3:2 \\
line 3:3
line 4
''').splitlines()
expect = [
'line 1',
'line 2:1 line 2:2',
'line 3:1 line 3:2 line 3:3',
'line 4',
]
assert expect == list(join_lines(lines))
class TestProcessLine(object):
"""tests for `process_line`"""
def test_parser_error(self):
with pytest.raises(RequirementsFileParseError):
list(process_line("--bogus", "file", 1))
def test_only_one_req_per_line(self):
# pkg_resources raises the ValueError
with pytest.raises(ValueError):
list(process_line("req1 req2", "file", 1))
def test_yield_line_requirement(self):
line = 'SomeProject'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
def test_yield_line_constraint(self):
line = 'SomeProject'
filename = 'filename'
comes_from = '-c %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(
line, comes_from=comes_from, constraint=True)
found_req = list(process_line(line, filename, 1, constraint=True))[0]
assert repr(found_req) == repr(req)
assert found_req.constraint is True
def test_yield_line_requirement_with_spaces_in_specifier(self):
line = 'SomeProject >= 2'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
assert req.req.specs == [('>=', '2')]
def test_yield_editable_requirement(self):
url = 'git+https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(url, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
def test_yield_editable_constraint(self):
url = 'git+https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
comes_from = '-c %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(
url, comes_from=comes_from, constraint=True)
found_req = list(process_line(line, filename, 1, constraint=True))[0]
assert repr(found_req) == repr(req)
assert found_req.constraint is True
def test_nested_requirements_file(self, monkeypatch):
line = '-r another_file'
req = InstallRequirement.from_line('SomeProject')
import pip.req.req_file
def stub_parse_requirements(req_url, finder, comes_from, options,
session, wheel_cache, constraint):
return [(req, constraint)]
parse_requirements_stub = stub(call=stub_parse_requirements)
monkeypatch.setattr(pip.req.req_file, 'parse_requirements',
parse_requirements_stub.call)
assert list(process_line(line, 'filename', 1)) == [(req, False)]
def test_nested_constraints_file(self, monkeypatch):
line = '-c another_file'
req = InstallRequirement.from_line('SomeProject')
import pip.req.req_file
def stub_parse_requirements(req_url, finder, comes_from, options,
session, wheel_cache, constraint):
return [(req, constraint)]
parse_requirements_stub = stub(call=stub_parse_requirements)
monkeypatch.setattr(pip.req.req_file, 'parse_requirements',
parse_requirements_stub.call)
assert list(process_line(line, 'filename', 1)) == [(req, True)]
def test_options_on_a_requirement_line(self):
line = 'SomeProject --install-option=yo1 --install-option yo2 '\
'--global-option="yo3" --global-option "yo4"'
filename = 'filename'
req = list(process_line(line, filename, 1))[0]
assert req.options == {
'global_options': ['yo3', 'yo4'],
'install_options': ['yo1', 'yo2']}
def test_set_isolated(self, options):
line = 'SomeProject'
filename = 'filename'
options.isolated_mode = True
result = process_line(line, filename, 1, options=options)
assert list(result)[0].isolated
def test_set_default_vcs(self, options):
url = 'https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
options.default_vcs = 'git'
result = process_line(line, filename, 1, options=options)
assert list(result)[0].link.url == 'git+' + url
def test_set_finder_no_index(self, finder):
list(process_line("--no-index", "file", 1, finder=finder))
assert finder.index_urls == []
def test_set_finder_index_url(self, finder):
list(process_line("--index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_set_finder_find_links(self, finder):
list(process_line("--find-links=url", "file", 1, finder=finder))
assert finder.find_links == ['url']
def test_set_finder_extra_index_urls(self, finder):
list(process_line("--extra-index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_set_finder_use_wheel(self, finder):
list(process_line("--use-wheel", "file", 1, finder=finder))
no_use_wheel_fmt = pip.index.FormatControl(set(), set())
assert finder.format_control == no_use_wheel_fmt
def test_set_finder_no_use_wheel(self, finder):
list(process_line("--no-use-wheel", "file", 1, finder=finder))
no_use_wheel_fmt = pip.index.FormatControl(set([':all:']), set())
assert finder.format_control == no_use_wheel_fmt
def test_set_finder_trusted_host(self, finder):
list(process_line("--trusted-host=url", "file", 1, finder=finder))
assert finder.secure_origins == [('*', 'url', '*')]
def test_noop_always_unzip(self, finder):
# noop, but confirm it can be set
list(process_line("--always-unzip", "file", 1, finder=finder))
def test_noop_finder_no_allow_unsafe(self, finder):
# noop, but confirm it can be set
list(process_line("--no-allow-insecure", "file", 1, finder=finder))
def test_relative_local_find_links(self, finder, monkeypatch):
"""
Test a relative find_links path is joined with the req file directory
"""
req_file = '/path/req_file.txt'
nested_link = '/path/rel_path'
exists_ = os.path.exists
def exists(path):
if path == nested_link:
return True
else:
exists_(path)
monkeypatch.setattr(os.path, 'exists', exists)
list(process_line("--find-links=rel_path", req_file, 1,
finder=finder))
assert finder.find_links == [nested_link]
def test_relative_http_nested_req_files(self, finder, monkeypatch):
"""
Test a relative nested req file path is joined with the req file url
"""
req_file = 'http://me.com/me/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == 'http://me.com/me/reqs.txt'
def test_relative_local_nested_req_files(self, finder, monkeypatch):
"""
Test a relative nested req file path is joined with the req file dir
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == '/path/reqs.txt'
def test_absolute_local_nested_req_files(self, finder, monkeypatch):
"""
Test an absolute nested req file path
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r /other/reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == '/other/reqs.txt'
def test_absolute_http_nested_req_file_in_local(self, finder, monkeypatch):
"""
Test a nested req file url in a local req file
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r http://me.com/me/reqs.txt", req_file, 1,
finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == 'http://me.com/me/reqs.txt'
def test_set_finder_process_dependency_links(self, finder):
list(process_line(
"--process-dependency-links", "file", 1, finder=finder))
assert finder.process_dependency_links
class TestBreakOptionsArgs(object):
def test_no_args(self):
assert ('', '--option') == break_args_options('--option')
def test_no_options(self):
assert ('arg arg', '') == break_args_options('arg arg')
def test_args_short_options(self):
result = break_args_options('arg arg -s')
assert ('arg arg', '-s') == result
def test_args_long_options(self):
result = break_args_options('arg arg --long')
assert ('arg arg', '--long') == result
class TestOptionVariants(object):
# this suite is really just testing optparse, but added it anyway
def test_variant1(self, finder):
list(process_line("-i url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant2(self, finder):
list(process_line("-i 'url'", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant3(self, finder):
list(process_line("--index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant4(self, finder):
list(process_line("--index-url url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant5(self, finder):
list(process_line("--index-url='url'", "file", 1, finder=finder))
assert finder.index_urls == ['url']
class TestParseRequirements(object):
"""tests for `parse_requirements`"""
@pytest.mark.network
def test_remote_reqs_parse(self):
"""
Test parsing a simple remote requirements file
"""
# this requirements file just contains a comment previously this has
# failed in py3: https://github.com/pypa/pip/issues/760
for req in parse_requirements(
'https://raw.githubusercontent.com/pypa/'
'pip-test-package/master/'
'tests/req_just_comment.txt', session=PipSession()):
pass
def test_multiple_appending_options(self, tmpdir, finder, options):
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url url1 \n")
fp.write("--extra-index-url url2 ")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
session=PipSession(), options=options))
assert finder.index_urls == ['url1', 'url2']
def test_skip_regex(self, tmpdir, finder, options):
options.skip_requirements_regex = '.*Bad.*'
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url Bad \n")
fp.write("--extra-index-url Good ")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
options=options, session=PipSession()))
assert finder.index_urls == ['Good']
def test_join_lines(self, tmpdir, finder):
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url url1 \\\n--extra-index-url url2")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
session=PipSession()))
assert finder.index_urls == ['url1', 'url2']
def test_req_file_parse_no_only_binary(self, data, finder):
list(parse_requirements(
data.reqfiles.join("supported_options2.txt"), finder,
session=PipSession()))
expected = pip.index.FormatControl(set(['fred']), set(['wilma']))
assert finder.format_control == expected
def test_req_file_parse_comment_start_of_line(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("# Comment ")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert not reqs
def test_req_file_parse_comment_end_of_line_with_url(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz # Comment ")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].link.url == "https://example.com/foo.tar.gz"
def test_req_file_parse_egginfo_end_of_line_with_url(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz#egg=wat")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].name == "wat"
def test_req_file_no_finder(self, tmpdir):
"""
Test parsing a requirements file without a finder
"""
with open(tmpdir.join("req.txt"), "w") as fp:
fp.write("""
--find-links https://example.com/
--index-url https://example.com/
--extra-index-url https://two.example.com/
--no-use-wheel
--no-index
""")
parse_requirements(tmpdir.join("req.txt"), session=PipSession())
def test_install_requirements_with_options(self, tmpdir, finder, session,
options):
global_option = '--dry-run'
install_option = '--prefix=/opt'
content = '''
--only-binary :all:
INITools==2.0 --global-option="{global_option}" \
--install-option "{install_option}"
'''.format(global_option=global_option, install_option=install_option)
req_path = tmpdir.join('requirements.txt')
with open(req_path, 'w') as fh:
fh.write(content)
req = next(parse_requirements(
req_path, finder=finder, options=options, session=session))
req.source_dir = os.curdir
with patch.object(subprocess, 'Popen') as popen:
popen.return_value.stdout.readline.return_value = ""
try:
req.install([])
except:
pass
call = popen.call_args_list[0][0][0]
assert call.index(install_option) > \
call.index('install') > \
call.index(global_option) > 0
assert options.format_control.no_binary == set([':all:'])
assert options.format_control.only_binary == set([])
| {
"content_hash": "1f106d144d6cebb7c2d9b48c3b4d15c9",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 79,
"avg_line_length": 36.86278586278586,
"alnum_prop": 0.5862049517793695,
"repo_name": "zorosteven/pip",
"id": "0c2b3ae3f45a40069abee8274a8970e9ad9cfa1b",
"size": "17731",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/test_req_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "2200122"
},
{
"name": "Shell",
"bytes": "2326"
}
],
"symlink_target": ""
} |
import mimetypes
from datetime import datetime
from operator import itemgetter
import wtforms
from werkzeug.datastructures import FileStorage
from wtforms.validators import InputRequired, NumberRange, ValidationError
from indico.modules.events.registration.fields.base import RegistrationFormBillableField, RegistrationFormFieldBase
from indico.util.countries import get_countries, get_country
from indico.util.date_time import strftime_all_years
from indico.util.fs import secure_client_filename
from indico.util.i18n import L_, _
from indico.util.string import normalize_phone_number
from indico.web.forms.fields import IndicoRadioField
from indico.web.forms.validators import IndicoEmail
class TextField(RegistrationFormFieldBase):
name = 'text'
wtf_field_class = wtforms.StringField
class NumberField(RegistrationFormBillableField):
name = 'number'
wtf_field_class = wtforms.IntegerField
required_validator = InputRequired
@property
def validators(self):
min_value = self.form_item.data.get('min_value', None)
return [NumberRange(min=min_value)] if min_value else None
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable'):
return 0
return versioned_data.get('price', 0) * int(reg_data or 0)
def get_friendly_data(self, registration_data, for_humans=False, for_search=False):
if registration_data.data is None:
return ''
return str(registration_data.data) if for_humans else registration_data.data
class TextAreaField(RegistrationFormFieldBase):
name = 'textarea'
wtf_field_class = wtforms.StringField
class CheckboxField(RegistrationFormBillableField):
name = 'checkbox'
wtf_field_class = wtforms.BooleanField
friendly_data_mapping = {None: '',
True: L_('Yes'),
False: L_('No')}
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable') or not reg_data:
return 0
return versioned_data.get('price', 0)
def get_friendly_data(self, registration_data, for_humans=False, for_search=False):
return self.friendly_data_mapping[registration_data.data]
def get_places_used(self):
places_used = 0
if self.form_item.data.get('places_limit'):
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
if registration.data_by_field[self.form_item.id].data:
places_used += 1
return places_used
@property
def view_data(self):
return dict(super().view_data, places_used=self.get_places_used())
@property
def filter_choices(self):
return {str(val).lower(): caption for val, caption in self.friendly_data_mapping.items()
if val is not None}
@property
def validators(self):
def _check_number_of_places(form, field):
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
if field.data and self.form_item.data.get('places_limit'):
places_left = self.form_item.data.get('places_limit') - self.get_places_used()
if not places_left:
raise ValidationError(_('There are no places left for this option.'))
return [_check_number_of_places]
@property
def default_value(self):
return None
class DateField(RegistrationFormFieldBase):
name = 'date'
wtf_field_class = wtforms.StringField
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
if value:
date_format = self.form_item.data['date_format']
value = datetime.strptime(value, date_format).isoformat()
return super().process_form_data(registration, value, old_data, billable_items_locked)
def get_friendly_data(self, registration_data, for_humans=False, for_search=False):
date_string = registration_data.data
if not date_string:
return ''
elif for_search:
return date_string # already in isoformat
dt = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S')
return strftime_all_years(dt, self.form_item.data['date_format'])
@property
def view_data(self):
has_time = ' ' in self.form_item.data['date_format']
return dict(super().view_data, has_time=has_time)
class BooleanField(RegistrationFormBillableField):
name = 'bool'
wtf_field_class = IndicoRadioField
required_validator = InputRequired
friendly_data_mapping = {None: '',
True: L_('Yes'),
False: L_('No')}
@property
def wtf_field_kwargs(self):
return {'choices': [('yes', _('Yes')), ('no', _('No'))],
'coerce': lambda x: {'yes': True, 'no': False}.get(x, None)}
@property
def filter_choices(self):
return {str(val).lower(): caption for val, caption in self.friendly_data_mapping.items()
if val is not None}
@property
def view_data(self):
return dict(super().view_data, places_used=self.get_places_used())
@property
def validators(self):
def _check_number_of_places(form, field):
if form.modified_registration:
old_data = form.modified_registration.data_by_field.get(self.form_item.id)
if not old_data or not self.has_data_changed(field.data, old_data):
return
if field.data and self.form_item.data.get('places_limit'):
places_left = self.form_item.data.get('places_limit') - self.get_places_used()
if field.data and not places_left:
raise ValidationError(_('There are no places left for this option.'))
return [_check_number_of_places]
@property
def default_value(self):
return None
def get_places_used(self):
places_used = 0
if self.form_item.data.get('places_limit'):
for registration in self.form_item.registration_form.active_registrations:
if self.form_item.id not in registration.data_by_field:
continue
if registration.data_by_field[self.form_item.id].data:
places_used += 1
return places_used
def calculate_price(self, reg_data, versioned_data):
if not versioned_data.get('is_billable'):
return 0
return versioned_data.get('price', 0) if reg_data else 0
def get_friendly_data(self, registration_data, for_humans=False, for_search=False):
return self.friendly_data_mapping[registration_data.data]
class PhoneField(RegistrationFormFieldBase):
name = 'phone'
wtf_field_class = wtforms.StringField
wtf_field_kwargs = {'filters': [lambda x: normalize_phone_number(x) if x else '']}
class CountryField(RegistrationFormFieldBase):
name = 'country'
wtf_field_class = wtforms.SelectField
@property
def wtf_field_kwargs(self):
return {'choices': sorted(get_countries().items(), key=itemgetter(1))}
@classmethod
def unprocess_field_data(cls, versioned_data, unversioned_data):
choices = sorted(({'caption': v, 'countryKey': k} for k, v in get_countries().items()),
key=itemgetter('caption'))
return {'choices': choices}
@property
def filter_choices(self):
return dict(self.wtf_field_kwargs['choices'])
def get_friendly_data(self, registration_data, for_humans=False, for_search=False):
if registration_data.data == 'None':
# XXX: Not sure where this garbage data is coming from, but it resulted in
# this method returning `None` and thus breaking the participant list..
return ''
return get_country(registration_data.data) if registration_data.data else ''
class _DeletableFileField(wtforms.FileField):
def process_formdata(self, valuelist):
if not valuelist:
self.data = {'keep_existing': False, 'uploaded_file': None}
else:
# This expects a form with a hidden field and a file field with the same name.
# If the hidden field is empty, it indicates that an existing file should be
# deleted or replaced with the newly uploaded file.
keep_existing = '' not in valuelist
uploaded_file = next((x for x in valuelist if isinstance(x, FileStorage)), None)
if not uploaded_file or not uploaded_file.filename:
uploaded_file = None
self.data = {'keep_existing': keep_existing, 'uploaded_file': uploaded_file}
class FileField(RegistrationFormFieldBase):
name = 'file'
wtf_field_class = _DeletableFileField
def process_form_data(self, registration, value, old_data=None, billable_items_locked=False):
data = {'field_data': self.form_item.current_data}
if not value:
return data
file_ = value['uploaded_file']
if file_:
# we have a file -> always save it
data['file'] = {
'data': file_.stream,
'name': secure_client_filename(file_.filename),
'content_type': mimetypes.guess_type(file_.filename)[0] or file_.mimetype or 'application/octet-stream'
}
elif not value['keep_existing']:
data['file'] = None
return data
@property
def default_value(self):
return None
def get_friendly_data(self, registration_data, for_humans=False, for_search=False):
if not registration_data:
return ''
return registration_data.filename
class EmailField(RegistrationFormFieldBase):
name = 'email'
wtf_field_class = wtforms.StringField
wtf_field_kwargs = {'filters': [lambda x: x.lower() if x else x]}
@property
def validators(self):
return [IndicoEmail()]
| {
"content_hash": "d404e97b0beb330b1e094af858da32a4",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 119,
"avg_line_length": 38.2639405204461,
"alnum_prop": 0.6357718838045273,
"repo_name": "ThiefMaster/indico",
"id": "acc5169200151d14d92be1ecaeb3f3e7a0cb0cb4",
"size": "10507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/registration/fields/simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""
pyrseas.dbobject.operfamily
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines two classes: OperatorFamily derived from
DbSchemaObject and OperatorFamilyDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject
from pyrseas.dbobject import commentable, ownable, split_schema_obj
class OperatorFamily(DbSchemaObject):
"""An operator family"""
keylist = ['schema', 'name', 'index_method']
single_extern_file = True
catalog = 'pg_opfamily'
def __init__(self, name, schema, index_method, description, owner,
oid=None):
"""Initialize the operator family
:param name: operator name (from opfname)
:param schema: schema name (from opfnamespace)
:param index_method: index access method (from amname via opfmethod)
:param description: comment text (from obj_description())
:param owner: owner name (from rolname via opfowner)
"""
super(OperatorFamily, self).__init__(name, schema, description)
self._init_own_privs(owner, [])
self.index_method = index_method
self.oid = oid
@property
def objtype(self):
return "OPERATOR FAMILY"
def extern_key(self):
"""Return the key to be used in external maps for the operator family
:return: string
"""
return '%s %s using %s' % (self.objtype.lower(), self.name,
self.index_method)
def identifier(self):
"""Return a full identifier for an operator family object
:return: string
"""
return "%s USING %s" % (self.qualname(), self.index_method)
@commentable
@ownable
def create(self):
"""Return SQL statements to CREATE the operator family
:return: SQL statements
"""
return ["CREATE OPERATOR FAMILY %s USING %s" % (
self.qualname(), self.index_method)]
class OperatorFamilyDict(DbObjectDict):
"The collection of operator families in a database"
cls = OperatorFamily
query = \
"""SELECT o.oid,
nspname AS schema, opfname AS name, rolname AS owner,
amname AS index_method,
obj_description(o.oid, 'pg_opfamily') AS description
FROM pg_opfamily o
JOIN pg_roles r ON (r.oid = opfowner)
JOIN pg_am a ON (opfmethod = a.oid)
JOIN pg_namespace n ON (opfnamespace = n.oid)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
AND o.oid NOT IN (
SELECT objid FROM pg_depend WHERE deptype = 'e'
AND classid = 'pg_opfamily'::regclass)
ORDER BY opfnamespace, opfname, amname"""
def from_map(self, schema, inopfams):
"""Initalize the dict of operator families by converting the input map
:param schema: schema owning the operators
:param inopfams: YAML map defining the operator families
"""
for key in inopfams:
if not key.startswith('operator family ') or ' using ' not in key:
raise KeyError("Unrecognized object type: %s" % key)
pos = key.rfind(' using ')
opf = key[16:pos] # 16 = len('operator family ')
idx = key[pos + 7:] # 7 = len(' using ')
inopfam = inopfams[key]
self[(schema.name, opf, idx)] = opfam = OperatorFamily(
opf, schema.name, idx, inopfam.pop('description', None),
inopfam.pop('owner', None))
if 'oldname' in inopfam:
opfam.oldname = inopfam.get('oldname')
def find(self, obj, meth):
schema, name = split_schema_obj(obj)
return self.get((schema, name, meth))
| {
"content_hash": "b937f938547a68a768bd8987a0010371",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 36.70192307692308,
"alnum_prop": 0.5858003667801939,
"repo_name": "dvarrazzo/Pyrseas",
"id": "ed7d366dc3690c59c2998be267b2adc044268cda",
"size": "3841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrseas/dbobject/operfamily.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "PLpgSQL",
"bytes": "55358"
},
{
"name": "Python",
"bytes": "758329"
}
],
"symlink_target": ""
} |
"""A dummy exception subclass used by core/discover.py's unit tests."""
from unittest_data.discoverable_classes import discover_dummyclass
class DummyExceptionWithParameterImpl2(discover_dummyclass.DummyException):
def __init__(self, parameter1, parameter2):
super(DummyExceptionWithParameterImpl2, self).__init__()
del parameter1, parameter2
| {
"content_hash": "b6b80b511b6e5818c3a48ef0ce37712b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 75,
"avg_line_length": 50.57142857142857,
"alnum_prop": 0.7909604519774012,
"repo_name": "SaschaMester/delicium",
"id": "f299f82a12d1f212ca7bc28a4098684b7af2607e",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/unittest_data/discoverable_classes/parameter_discover_dummyclass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4171711"
},
{
"name": "C++",
"bytes": "243066171"
},
{
"name": "CSS",
"bytes": "935112"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27211018"
},
{
"name": "Java",
"bytes": "14285999"
},
{
"name": "JavaScript",
"bytes": "20413885"
},
{
"name": "Makefile",
"bytes": "23496"
},
{
"name": "Objective-C",
"bytes": "1725804"
},
{
"name": "Objective-C++",
"bytes": "9880229"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "478406"
},
{
"name": "Python",
"bytes": "8261413"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from tensorflowjs import version
# File name for the indexing JSON file in an artifact directory.
ARTIFACT_MODEL_JSON_FILE_NAME = 'model.json'
ASSETS_DIRECTORY_NAME = 'assets'
# JSON string keys for fields of the indexing JSON.
ARTIFACT_MODEL_TOPOLOGY_KEY = 'modelTopology'
ARTIFACT_MODEL_INITIALIZER = 'modelInitializer'
ARTIFACT_WEIGHTS_MANIFEST_KEY = 'weightsManifest'
FORMAT_KEY = 'format'
TFJS_GRAPH_MODEL_FORMAT = 'graph-model'
TFJS_LAYERS_MODEL_FORMAT = 'layers-model'
GENERATED_BY_KEY = 'generatedBy'
CONVERTED_BY_KEY = 'convertedBy'
SIGNATURE_KEY = 'signature'
INITIALIZER_SIGNATURE_KEY = 'initializerSignature'
USER_DEFINED_METADATA_KEY = 'userDefinedMetadata'
STRUCTURED_OUTPUTS_KEYS_KEY = 'structuredOutputKeys'
RESOURCE_ID_KEY = 'resourceId'
# Model formats.
KERAS_SAVED_MODEL = 'keras_saved_model'
KERAS_MODEL = 'keras'
TF_SAVED_MODEL = 'tf_saved_model'
TF_HUB_MODEL = 'tf_hub'
TFJS_GRAPH_MODEL = 'tfjs_graph_model'
TFJS_LAYERS_MODEL = 'tfjs_layers_model'
TF_FROZEN_MODEL = 'tf_frozen_model'
# CLI argument strings.
INPUT_PATH = 'input_path'
OUTPUT_PATH = 'output_path'
INPUT_FORMAT = 'input_format'
OUTPUT_FORMAT = 'output_format'
OUTPUT_NODE = 'output_node_names'
SIGNATURE_NAME = 'signature_name'
SAVED_MODEL_TAGS = 'saved_model_tags'
QUANTIZATION_BYTES = 'quantization_bytes'
QUANTIZATION_TYPE_FLOAT16 = 'quantize_float16'
QUANTIZATION_TYPE_UINT8 = 'quantize_uint8'
QUANTIZATION_TYPE_UINT16 = 'quantize_uint16'
SPLIT_WEIGHTS_BY_LAYER = 'split_weights_by_layer'
VERSION = 'version'
SKIP_OP_CHECK = 'skip_op_check'
STRIP_DEBUG_OPS = 'strip_debug_ops'
USE_STRUCTURED_OUTPUTS_NAMES = 'use_structured_outputs_names'
WEIGHT_SHARD_SIZE_BYTES = 'weight_shard_size_bytes'
CONTROL_FLOW_V2 = 'control_flow_v2'
EXPERIMENTS = 'experiments'
METADATA = 'metadata'
def get_converted_by():
"""Get the convertedBy string for storage in model artifacts."""
return 'TensorFlow.js Converter v%s' % version.version
| {
"content_hash": "1a50a2f845a22139b639f917937a5751",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 66,
"avg_line_length": 32.610169491525426,
"alnum_prop": 0.7598752598752598,
"repo_name": "tensorflow/tfjs",
"id": "1ed96dd5a2c19c1436c51886efe5cab714e2ee56",
"size": "2579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfjs-converter/python/tensorflowjs/converters/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2165"
},
{
"name": "C",
"bytes": "1149"
},
{
"name": "C++",
"bytes": "511030"
},
{
"name": "CSS",
"bytes": "27067"
},
{
"name": "Dockerfile",
"bytes": "1840"
},
{
"name": "HTML",
"bytes": "132169"
},
{
"name": "Java",
"bytes": "4081"
},
{
"name": "JavaScript",
"bytes": "1200362"
},
{
"name": "Objective-C",
"bytes": "5247"
},
{
"name": "Python",
"bytes": "518704"
},
{
"name": "Ruby",
"bytes": "1981"
},
{
"name": "Shell",
"bytes": "76252"
},
{
"name": "Starlark",
"bytes": "176198"
},
{
"name": "TypeScript",
"bytes": "10878537"
}
],
"symlink_target": ""
} |
import os
from oslo.config import cfg
from oslo.utils import strutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import image_service as service
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(rameshg87): This file now registers some of opts in pxe group.
# This is acceptable for now as a future refactoring into
# separate boot and deploy interfaces is planned, and moving config
# options twice is not recommended. Hence we would move the parameters
# to the appropriate place in the final refactoring.
pxe_opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot.'),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
help='Default file system format for ephemeral partition, '
'if one is created.'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='Directory where images are stored on disk.'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='Directory where master instance images are stored on '
'disk.'),
cfg.IntOpt('image_cache_size',
default=20480,
help='Maximum size (in MiB) of cache for master images, '
'including those in use.'),
# 10080 here is 1 week - 60*24*7. It is entirely arbitrary in the absence
# of a facility to disable the ttl entirely.
cfg.IntOpt('image_cache_ttl',
default=10080,
help='Maximum TTL (in minutes) for old master images in '
'cache.'),
cfg.StrOpt('disk_devices',
default='cciss/c0d0,sda,hda,vda',
help='The disk devices to scan while doing the deploy.'),
]
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
@image_cache.cleanup(priority=50)
class InstanceImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
super(self.__class__, self).__init__(
CONF.pxe.instance_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
def _get_image_dir_path(node_uuid):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, node_uuid)
def _get_image_file_path(node_uuid):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(node_uuid), 'disk')
def parse_instance_info(node):
"""Gets the instance specific Node deployment info.
This method validates whether the 'instance_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = node.instance_info
i_info = {}
i_info['image_source'] = info.get('image_source')
i_info['root_gb'] = info.get('root_gb')
error_msg = _("Cannot validate iSCSI deploy")
deploy_utils.check_for_missing_params(i_info, error_msg)
# Internal use only
i_info['deploy_key'] = info.get('deploy_key')
i_info['swap_mb'] = info.get('swap_mb', 0)
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
i_info['ephemeral_format'] = info.get('ephemeral_format')
err_msg_invalid = _("Cannot validate parameter for iSCSI deploy. "
"Invalid parameter %(param)s. Reason: %(reason)s")
for param in ('root_gb', 'swap_mb', 'ephemeral_gb'):
try:
int(i_info[param])
except ValueError:
reason = _("'%s' is not an integer value.") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param, 'reason': reason})
if i_info['ephemeral_gb'] and not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = strutils.bool_from_string(
preserve_ephemeral, strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': 'preserve_ephemeral', 'reason': e})
return i_info
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
i_info = parse_instance_info(task.node)
image_path = _get_image_file_path(task.node.uuid)
image_mb = deploy_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. '
'Image size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
def cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the AMI and writes them to the appropriate place
on local disk.
:param ctx: context
:param node: an ironic node object
:returns: a tuple containing the uuid of the image and the path in
the filesystem where image is cached.
"""
i_info = parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
uuid = i_info['image_source']
LOG.debug("Fetching image %(ami)s for node %(uuid)s",
{'ami': uuid, 'uuid': node.uuid})
deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)])
return (uuid, image_path)
def destroy_images(node_uuid):
"""Delete instance's image file.
:param node_uuid: the uuid of the ironic node.
"""
utils.unlink_without_raise(_get_image_file_path(node_uuid))
utils.rmtree_without_raise(_get_image_dir_path(node_uuid))
InstanceImageCache().clean_up()
def get_deploy_info(node, **kwargs):
"""Returns the information required for doing iSCSI deploy in a
dictionary.
:param node: ironic node object
:param kwargs: the keyword args passed from the conductor node.
:raises: MissingParameterValue, if some required parameters were not
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
deploy_key = kwargs.get('key')
i_info = parse_instance_info(node)
if i_info['deploy_key'] != deploy_key:
raise exception.InvalidParameterValue(_("Deploy key does not match"))
params = {'address': kwargs.get('address'),
'port': kwargs.get('port', '3260'),
'iqn': kwargs.get('iqn'),
'lun': kwargs.get('lun', '1'),
'image_path': _get_image_file_path(node.uuid),
'root_mb': 1024 * int(i_info['root_gb']),
'swap_mb': int(i_info['swap_mb']),
'ephemeral_mb': 1024 * int(i_info['ephemeral_gb']),
'preserve_ephemeral': i_info['preserve_ephemeral'],
'node_uuid': node.uuid,
}
missing = [key for key in params if params[key] is None]
if missing:
raise exception.MissingParameterValue(_(
"Parameters %s were not passed to ironic"
" for deploy.") % missing)
# ephemeral_format is nullable
params['ephemeral_format'] = i_info.get('ephemeral_format')
return params
def set_failed_state(task, msg):
"""Sets the deploy status as failed with relevant messages.
This method sets the deployment as fail with the given message.
It sets node's provision_state to DEPLOYFAIL and updates last_error
with the given error message. It also powers off the baremetal node.
:param task: a TaskManager instance containing the node to act on.
:param msg: the message to set in last_error of the node.
"""
node = task.node
node.provision_state = states.DEPLOYFAIL
node.target_provision_state = states.NOSTATE
node.save()
try:
manager_utils.node_power_action(task, states.POWER_OFF)
except Exception:
msg2 = (_('Node %s failed to power off while handling deploy '
'failure. This may be a serious condition. Node '
'should be removed from Ironic or put in maintenance '
'mode until the problem is resolved.') % node.uuid)
LOG.exception(msg2)
finally:
# NOTE(deva): node_power_action() erases node.last_error
# so we need to set it again here.
node.last_error = msg
node.save()
def continue_deploy(task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: the kwargs to be passed to deploy.
:returns: UUID of the root partition or None on error.
"""
node = task.node
node.provision_state = states.DEPLOYING
node.save()
params = get_deploy_info(node, **kwargs)
ramdisk_error = kwargs.get('error')
if ramdisk_error:
LOG.error(_LE('Error returned from deploy ramdisk: %s'),
ramdisk_error)
set_failed_state(task, _('Failure in deploy ramdisk.'))
destroy_images(node.uuid)
return
LOG.info(_LI('Continuing deployment for node %(node)s, params %(params)s'),
{'node': node.uuid, 'params': params})
root_uuid = None
try:
root_uuid = deploy_utils.deploy(**params)
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
set_failed_state(task, _('Failed to continue iSCSI deployment.'))
destroy_images(node.uuid)
return root_uuid
def build_deploy_ramdisk_options(node):
"""Build the ramdisk config options for a node
This method builds the ramdisk options for a node,
given all the required parameters for doing iscsi deploy.
:param node: a single Node.
:returns: A dictionary of options to be passed to ramdisk for performing
the deploy.
"""
# NOTE: we should strip '/' from the end because this is intended for
# hardcoded ramdisk script
ironic_api = (CONF.conductor.api_url or
keystone.get_service_url()).rstrip('/')
deploy_key = utils.random_alnum(32)
i_info = node.instance_info
i_info['deploy_key'] = deploy_key
node.instance_info = i_info
node.save()
deploy_options = {
'deployment_id': node['uuid'],
'deployment_key': deploy_key,
'iscsi_target_iqn': "iqn-%s" % node.uuid,
'ironic_api_url': ironic_api,
'disk': CONF.pxe.disk_devices,
}
return deploy_options
def validate_glance_image_properties(ctx, deploy_info, properties):
"""Validate the image in Glance.
Check if the image exist in Glance and if it contains the
properties passed.
:param ctx: security context
:param deploy_info: the deploy_info to be validated
:param properties: the list of image meta-properties to be validated.
:raises: InvalidParameterValue if connection to glance failed or
authorization for accessing image failed or if image doesn't exist.
:raises: MissingParameterValue if the glance image doesn't contain
the mentioned properties.
"""
image_id = deploy_info['image_source']
try:
glance_service = service.Service(version=1, context=ctx)
image_props = glance_service.show(image_id)['properties']
except (exception.GlanceConnectionFailed,
exception.ImageNotAuthorized,
exception.Invalid):
raise exception.InvalidParameterValue(_(
"Failed to connect to Glance to get the properties "
"of the image %s") % image_id)
except exception.ImageNotFound:
raise exception.InvalidParameterValue(_(
"Image %s not found in Glance") % image_id)
missing_props = []
for prop in properties:
if not image_props.get(prop):
missing_props.append(prop)
if missing_props:
props = ', '.join(missing_props)
raise exception.MissingParameterValue(_(
"Image %(image)s is missing the following properties: "
"%(properties)s") % {'image': image_id, 'properties': props})
def validate(task):
"""Validates the pre-requisites for iSCSI deploy.
Validates whether node in the task provided has some ports enrolled.
This method validates whether conductor url is available either from CONF
file or from keystone.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if no ports are enrolled for the given node.
"""
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.InvalidParameterValue(_("Node %s does not have "
"any port associated with it.") % node.uuid)
try:
# TODO(lucasagomes): Validate the format of the URL
CONF.conductor.api_url or keystone.get_service_url()
except (exception.CatalogFailure,
exception.CatalogNotFound,
exception.CatalogUnauthorized):
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog."))
| {
"content_hash": "5542bdf1648cbc0494020eac190fd219",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 79,
"avg_line_length": 37.3756345177665,
"alnum_prop": 0.6407714246910227,
"repo_name": "debayanray/ironic_backup",
"id": "aa914e27c3f708a02af3dcab4ba007ff957f268c",
"size": "15383",
"binary": false,
"copies": "3",
"ref": "refs/heads/fix_for_bug_1418327_node_boot_mode",
"path": "ironic/drivers/modules/iscsi_deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2208014"
}
],
"symlink_target": ""
} |
from __future__ import with_statement, division, unicode_literals
import json
from pkgutil import get_data
from jsonschema import validate, RefResolver, FormatChecker, ValidationError
SCHEMA_FILE = 'api.json'
REQUEST_SCHEMA = '#/definitions/request'
RESPONSE_SCHEMA = '#/definitions/response'
def load_schema():
# Read resource from same directory of (potentially-zipped) module
schema_data = get_data(__package__, SCHEMA_FILE).decode('utf-8')
return json.loads(schema_data)
def validate_subschema(data, schema_selector):
schema = load_schema()
resolver = RefResolver.from_schema(schema)
format_checker = FormatChecker()
request_schema = resolver.resolve_from_url(schema_selector)
validate(data, request_schema, resolver=resolver, format_checker=format_checker)
def validate_request(data):
validate_subschema(data, REQUEST_SCHEMA)
def validate_response(data):
validate_subschema(data, RESPONSE_SCHEMA)
| {
"content_hash": "b996e0cb7e17210272cde63ec044bb7d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.749475890985325,
"repo_name": "MatchmakerExchange/reference-server",
"id": "209baaae6eb87ce3f2471e729a660ec12c74375d",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mme_server/schemas/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62016"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.webpubsub import WebPubSubManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-webpubsub
# USAGE
python web_pub_sub_custom_domains_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = WebPubSubManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.web_pub_sub_custom_domains.begin_delete(
resource_group_name="myResourceGroup",
resource_name="myWebPubSubService",
name="example",
).result()
print(response)
# x-ms-original-file: specification/webpubsub/resource-manager/Microsoft.SignalRService/preview/2022-08-01-preview/examples/WebPubSubCustomDomains_Delete.json
if __name__ == "__main__":
main()
| {
"content_hash": "898fa69e0c1298030918aae4fbbbcb80",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 158,
"avg_line_length": 35.35294117647059,
"alnum_prop": 0.737936772046589,
"repo_name": "Azure/azure-sdk-for-python",
"id": "aa78c6ec7f179b4fa1e93e89578e59e6a2e96949",
"size": "1670",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/webpubsub/azure-mgmt-webpubsub/generated_samples/web_pub_sub_custom_domains_delete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(map(lambda x: x / float(n_instances), runtimes))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
xtick_names = plt.setp(ax1, xticklabels=cls_infos)
plt.setp(xtick_names)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.iteritems():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.iterkeys()):
x = np.array(sorted([n for n in percentiles[cls_name].iterkeys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
ax1.legend()
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
ax.legend()
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| {
"content_hash": "a73f10911345e6d16054d616c424b4ca",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 79,
"avg_line_length": 36.05079365079365,
"alnum_prop": 0.6177351179992955,
"repo_name": "loli/sklearn-ensembletrees",
"id": "ad2334a8669f78884b5c909bfff39788ec545464",
"size": "11356",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/applications/plot_prediction_latency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18283366"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5190027"
},
{
"name": "Shell",
"bytes": "6015"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from functools import reduce
from utils.geo import get_nearest_destination, get_distances
from utils.morudall import get_last_lat_lon, get_avg_velocity, get_last_time_online, get_last_positions
def get_nearest_stop(latitude, longitude, kinds=None):
from .models import BusStop
# TODO: Flat select is better...
stops = BusStop.objects.all()
if kinds:
stops = stops.filter(stop_type__in=kinds)
if stops.count() > 0:
pos_list = [(x.latitude, x.longitude) for x in stops]
index, distance = get_nearest_destination((latitude, longitude), pos_list)
return stops[index], distance
return None, None
def current_location(device_id):
return get_last_lat_lon(device_id)
def avg_velocity(device_id):
'''
Returns the average velocity from the
last 10 (if available) data collected.
'''
return get_avg_velocity(device_id, 10)
def is_online(device_id):
'''
Returns True if the Bus was online in
the past 5 minutes.
'''
time = get_last_time_online(device_id)
if time:
min_time = datetime.utcnow() - timedelta(minutes=5)
return time > min_time
return False
def is_moving(device_id):
'''
Returns True if the Bus has moved
more then 1000m in the last 5 minutes.
'''
lat, lon = get_last_lat_lon(device_id)
positions = list(get_last_positions(device_id))
if lat and lon and len(positions) > 1:
distances = get_distances([(lat, lon)], [positions[0], positions[-1]])
if distances:
distances = list(distances.values())
distance = abs(distances[0].meters - distances[-1].meters)
return distance >= 1000
return False
def is_parked(device_id):
'''
Returns true if the Bus is not is_moving
and is less then 250m from any station
or garage.
'''
if not is_moving(device_id):
lat, lon = current_location(device_id)
if lat and lon:
stop, distance = get_nearest_stop(lat, lon, ['bus-station', 'garage'])
if stop:
return distance < 250
return False
| {
"content_hash": "25fe533ddeb314450728bc50a84b9121",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 103,
"avg_line_length": 26.902439024390244,
"alnum_prop": 0.6246600181323663,
"repo_name": "alexandrevicenzi/tcc",
"id": "72ff4a43fe24b76c2b0d27da73153fe8999bb90c",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bustracker/apps/core/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1455"
},
{
"name": "HTML",
"bytes": "19682"
},
{
"name": "JavaScript",
"bytes": "4341"
},
{
"name": "Lua",
"bytes": "8825"
},
{
"name": "Python",
"bytes": "66097"
},
{
"name": "Shell",
"bytes": "572"
}
],
"symlink_target": ""
} |
import math
import ConfigParser
import os
import logging
import StringIO
import sys
import textwrap
import datetime
class configuration(object):
"""Configuration settings. Any user-specific values are read from an external file
and parsed by an instance of the built-in ConfigParser class"""
def __init__(self):
# doesn't do anything
pass
def configure(self, configFile=None, use_logging=True):
# get a logger
logger = logging.getLogger("configuration")
# this (and only this) logger needs to be configured immediately, otherwise it won't work
# we can't use the full user-supplied configuration mechanism in this particular case,
# because we haven't loaded it yet!
#
# so, just use simple console-only logging
logger.setLevel(logging.DEBUG) # this level is hardwired here - should change it to INFO
# add a handler & its formatter - will write only to console
ch = logging.StreamHandler()
logger.addHandler(ch)
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
# first, set up some default configuration values
self.initial_configuration()
# next, load in any user-supplied configuration values
# that might over-ride the default values
self.user_configuration(configFile)
# now that we have loaded the user's configuration, we can load the
# separate config file for logging (the name of that file will be specified in the config file)
if use_logging:
self.logging_configuration()
# finally, set up all remaining configuration values
# that depend upon either default or user-supplied values
self.complete_configuration()
logger.debug('configuration completed')
def initial_configuration(self):
# to be called before loading any user specific values
# things to put here are
# 1. variables that the user cannot change
# 2. variables that need to be set before loading the user's config file
UTTID_REGEX = '(.*)\..*'
def user_configuration(self,configFile=None):
# get a logger
logger = logging.getLogger("configuration")
# load and parse the provided configFile, if provided
if not configFile:
logger.warn('no user configuration file provided; using only built-in default settings')
return
# load the config file
try:
configparser = ConfigParser.ConfigParser()
configparser.readfp(open(configFile))
logger.debug('successfully read and parsed user configuration file %s' % configFile)
except:
logger.fatal('error reading user configuration file %s' % configFile)
raise
#work_dir must be provided before initialising other directories
self.work_dir = None
if self.work_dir == None:
try:
self.work_dir = configparser.get('Paths', 'work')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
if self.work_dir == None:
logger.critical('Paths:work has no value!')
raise Exception
# look for those items that are user-configurable, and get their values
# sptk_bindir= ....
# a list instead of a dict because OrderedDict is not available until 2.7
# and I don't want to import theano here just for that one class
# each entry is a tuple of (variable name, default value, section in config file, option name in config file)
#
# the type of the default value is important and controls the type that the corresponding
# variable will have
#
# to set a default value of 'undefined' use an empty string
# or the special value 'impossible', as appropriate
#
impossible_int=int(-99999)
impossible_float=float(-99999.0)
user_options = [
('work_dir', self.work_dir, 'Paths','work'),
('data_dir', '', 'Paths','data'),
('plot_dir', '', 'Paths','plot'),
('plot', False, 'Utility', 'plot'),
('profile', False, 'Utility', 'profile'),
('file_id_scp' , os.path.join(self.work_dir, 'data/file_id_list.scp') , 'Paths', 'file_id_list'),
('test_id_scp' , os.path.join(self.work_dir, 'data/test_id_list.scp') , 'Paths', 'test_id_list'),
('GV_dir' , os.path.join(self.work_dir, 'data/GV' ) , 'Paths', 'GV_dir'),
('in_stepw_dir' , os.path.join(self.work_dir, 'data/stepw'), 'Paths', 'in_stepw_dir'),
('in_mgc_dir' , os.path.join(self.work_dir, 'data/mgc') , 'Paths', 'in_mgc_dir'),
('in_lf0_dir' , os.path.join(self.work_dir, 'data/lf0') , 'Paths', 'in_lf0_dir'),
('in_bap_dir' , os.path.join(self.work_dir, 'data/bap') , 'Paths', 'in_bap_dir'),
('in_sp_dir' , os.path.join(self.work_dir, 'data/sp' ) , 'Paths', 'in_sp_dir'),
('in_seglf0_dir', os.path.join(self.work_dir, 'data/lf03') , 'Paths', 'in_seglf0_dir'),
## for glottHMM
('in_F0_dir' , os.path.join(self.work_dir, 'data/F0') , 'Paths', 'in_F0_dir'),
('in_Gain_dir' , os.path.join(self.work_dir, 'data/Gain') , 'Paths', 'in_Gain_dir'),
('in_HNR_dir' , os.path.join(self.work_dir, 'data/HNR') , 'Paths', 'in_HNR_dir'),
('in_LSF_dir' , os.path.join(self.work_dir, 'data/LSF') , 'Paths', 'in_LSF_dir'),
('in_LSFsource_dir' , os.path.join(self.work_dir, 'data/LSFsource') , 'Paths', 'in_LSFsource_dir'),
## for joint duration
('in_seq_dur_dir' , os.path.join(self.work_dir, 'data/S2S_dur') , 'Paths', 'in_seq_dur_dir'),
('in_dur_dir' , os.path.join(self.work_dir, 'data/dur') , 'Paths', 'in_dur_dir'),
('nn_norm_temp_dir', os.path.join(self.work_dir, 'data/step_hidden9'), 'Paths', 'nn_norm_temp_dir'),
('process_labels_in_work_dir', False, 'Labels', 'process_labels_in_work_dir'),
('label_style' , 'HTS' , 'Labels', 'label_style'),
('label_type' , 'state_align' , 'Labels', 'label_type'),
('in_label_align_dir' , os.path.join(self.work_dir, 'data/label_state_align') , 'Labels', 'label_align'),
('question_file_name' , os.path.join(self.work_dir, 'data/questions.hed') , 'Labels', 'question_file_name'),
('silence_pattern' , ['*-#+*'] , 'Labels', 'silence_pattern'),
('subphone_feats' , 'full' , 'Labels', 'subphone_feats'),
('additional_features', {} , 'Labels', 'additional_features'),
('xpath_file_name', os.path.join(self.work_dir, 'data/xml_labels/xpaths.txt'), 'Labels', 'xpath_file_name'),
('label_config_file', 'configuration/examplelabelconfigfile.py', 'Labels', 'label_config'),
('add_frame_features', True, 'Labels', 'add_frame_features'),
('fill_missing_values', False, 'Labels', 'fill_missing_values'),
('xpath_label_align_dir', os.path.join(self.work_dir, 'data/label_state_align'), 'Labels', 'xpath_label_align'),
('enforce_silence', False, 'Labels', 'enforce_silence'),
('remove_silence_using_binary_labels', False, 'Labels', 'remove_silence_using_binary_labels'),
('precompile_xpaths', True, 'Labels', 'precompile_xpaths'),
('iterate_over_frames', True, 'Labels', 'iterate_over_frames'),
('appended_input_dim' , 0 , 'Labels' , 'appended_input_dim'),
('buffer_size', 200000, 'Data', 'buffer_size'),
('train_file_number', impossible_int, 'Data','train_file_number'),
('valid_file_number', impossible_int, 'Data','valid_file_number'),
('test_file_number' , impossible_int, 'Data','test_file_number'),
('log_path', os.path.join(self.work_dir, 'log'), 'Paths', 'log_path'),
('log_file', '', 'Paths','log_file'),
('log_config_file', 'configuration/exampleloggingconfigfile.conf', 'Paths', 'log_config_file'),
('sptk_bindir', 'tools/bin/SPTK-3.9', 'Paths','sptk'),
('straight_bindir', 'tools/bin/straight', 'Paths','straight'),
('world_bindir', 'tools/bin/WORLD', 'Paths','world'),
('network_type' , 'RNN' , 'Architecture', 'network_type'),
('model_type' , 'DNN' , 'Architecture', 'model_type'),
('hidden_layer_type' , ['TANH', 'TANH', 'TANH', 'TANH', 'TANH', 'TANH'] , 'Architecture', 'hidden_layer_type'),
('output_layer_type' , 'LINEAR' , 'Architecture', 'output_layer_type'),
('sequential_training' , False , 'Architecture', 'sequential_training'),
('dropout_rate' , 0.0 , 'Architecture', 'dropout_rate'),
## some config variables for token projection DNN
('scheme' , 'stagewise' , 'Architecture', 'scheme'),
('index_to_project' , 0 , 'Architecture', 'index_to_project'),
('projection_insize' , 10000 , 'Architecture', 'projection_insize'),
('projection_outsize' , 10 , 'Architecture', 'projection_outsize'),
('initial_projection_distrib' , 'gaussian' , 'Architecture', 'initial_projection_distrib'),
('projection_weights_output_dir' , 'some_path', 'Architecture', 'projection_weights_output_dir'),
('layers_with_projection_input' , [0], 'Architecture', 'layers_with_projection_input'),
('projection_learning_rate_scaling' , 1.0, 'Architecture', 'projection_learning_rate_scaling'),
('learning_rate' , 0.0002 , 'Architecture', 'learning_rate'),
('l2_reg' , 0.00001 , 'Architecture', 'L2_regularization'),
('l1_reg' , 0.0 , 'Architecture', 'L1_regularization'),
('batch_size' , 16 , 'Architecture', 'batch_size'),
('training_epochs' , 25 , 'Architecture', 'training_epochs'),
('hidden_activation' , 'tanh' , 'Architecture', 'hidden_activation'),
('output_activation' , 'linear' , 'Architecture', 'output_activation'),
('hidden_layer_size' , [1024, 1024, 1024, 1024, 1024, 1024], 'Architecture', 'hidden_layer_size'),
('private_hidden_sizes' , [1024] , 'Architecture', 'private_hidden_sizes'),
('stream_weights' , [1.0] , 'Architecture', 'stream_weights'),
('private_l2_reg' , 0.00001 , 'Architecture', 'private_l2_reg'),
('warmup_epoch' , 5 , 'Architecture', 'warmup_epoch'),
('warmup_momentum' , 0.3 , 'Architecture', 'warmup_momentum'),
('momentum' , 0.9 , 'Architecture', 'momentum'),
('warmup_epoch' , 5 , 'Architecture', 'warmup_epoch'),
('mdn_component', 1 , 'Architecture', 'mdn_component'),
('var_floor', 0.01 , 'Architecture', 'var_floor'),
('beta_opt', False , 'Architecture', 'beta_opt'),
('eff_sample_size', 0.8 , 'Architecture', 'eff_sample_size'),
('mean_log_det', -100.0 , 'Architecture', 'mean_log_det'),
('start_from_trained_model', '_' , 'Architecture', 'start_from_trained_model'),
('use_rprop', 0 , 'Architecture', 'use_rprop'),
('mgc_dim' ,60 ,'Outputs','mgc'),
('dmgc_dim',60 * 3 ,'Outputs','dmgc'),
('vuv_dim' ,1 ,'Outputs','vuv'),
('lf0_dim' ,1 ,'Outputs','lf0'),
('dlf0_dim',1 * 3 ,'Outputs','dlf0'),
('bap_dim' ,25 ,'Outputs','bap'),
('dbap_dim',25 * 3 ,'Outputs','dbap'),
('cmp_dim' ,(60 * 3) + 1 + (1 * 3) + (25 * 3) ,'Outputs','cmp'),
('stepw_dim' , 55, 'Outputs', 'stepw_dim'),
('temp_sp_dim' , 1025, 'Outputs', 'temp_sp_dim'),
('seglf0_dim' , 7 , 'Outputs', 'seglf0_dim'),
('delta_win' , [-0.5, 0.0, 0.5] , 'Outputs', 'delta_win'),
('acc_win' , [1.0, -2.0, 1.0] , 'Outputs', 'acc_win'),
('do_MLPG' , True , 'Outputs', 'do_MLPG'),
## for GlottHMM
('F0_dim' ,1 ,'Outputs','F0'),
('dF0_dim',1 * 3 ,'Outputs','dF0'),
('Gain_dim' ,1 ,'Outputs','Gain'),
('dGain_dim',1 * 3 ,'Outputs','dGain'),
('HNR_dim' ,5 ,'Outputs','HNR'),
('dHNR_dim',5 * 3 ,'Outputs','dHNR'),
('LSF_dim' ,30 ,'Outputs','LSF'),
('dLSF_dim',30 * 3 ,'Outputs','dLSF'),
('LSFsource_dim' ,10 ,'Outputs','LSFsource'),
('dLSFsource_dim',10 * 3 ,'Outputs','dLSFsource'),
## for joint dur:-
('seq_dur_dim' ,1 ,'Outputs','seq_dur'),
('remove_silence_from_dur' , True , 'Outputs', 'remove_silence_from_dur'),
('dur_dim' ,5 ,'Outputs','dur'),
('dur_feature_type' , 'numerical' , 'Outputs', 'dur_feature_type'),
('output_feature_normalisation', 'MVN', 'Outputs', 'output_feature_normalisation'),
('multistream_switch' , False , 'Streams', 'multistream_switch'),
# ('use_private_hidden' , False, 'Streams', 'use_private_hidden'),
('output_features' , ['mgc','lf0', 'vuv', 'bap'], 'Streams', 'output_features'),
('gen_wav_features', ['mgc', 'bap', 'lf0'] , 'Streams', 'gen_wav_features'),
# ('stream_mgc_hidden_size' , 192 , 'Streams', 'stream_mgc_hidden_size'),
# ('stream_lf0_hidden_size' , 32 , 'Streams', 'stream_lf0_hidden_size'),
# ('stream_vuv_hidden_size' , 32 , 'Streams', 'stream_vuv_hidden_size'),
# ('stream_bap_hidden_size' , 128 , 'Streams', 'stream_bap_hidden_size'),
# ('stream_stepw_hidden_size' , 64 , 'Streams', 'stream_stepw_hidden_size'),
# ('stream_seglf0_hidden_size', 64 , 'Streams', 'stream_seglf0_hidden_size'),
# ('stream_cmp_hidden_size' , 256 , 'Streams', 'stream_cmp_hidden_size'), #when multi-stream is disabled, use this to indicate the final hidden layer size
#if this is also not provided, use the top common hidden layer size
## Glott HMM -- dummy values -- haven't used private streams:--
# ('stream_F0_hidden_size' , 192 , 'Streams', 'stream_F0_hidden_size'),
# ('stream_Gain_hidden_size' , 192 , 'Streams', 'stream_Gain_hidden_size'),
# ('stream_HNR_hidden_size' , 192 , 'Streams', 'stream_HNR_hidden_size'),
# ('stream_LSF_hidden_size' , 192 , 'Streams', 'stream_LSF_hidden_size'),
# ('stream_LSFsource_hidden_size' , 192 , 'Streams', 'stream_LSFsource_hidden_size'),
## joint dur -- dummy values -- haven't used private streams:--
# ('stream_dur_hidden_size' , 192 , 'Streams', 'stream_dur_hidden_size'),
# ('stream_sp_hidden_size' , 1024, 'Streams', 'stream_sp_hidden_size'),
# ('stream_weight_mgc' , 1.0, 'Streams', 'stream_weight_mgc'),
# ('stream_weight_lf0' , 3.0, 'Streams', 'stream_weight_lf0'),
# ('stream_weight_vuv' , 1.0, 'Streams', 'stream_weight_vuv'),
# ('stream_weight_bap' , 1.0, 'Streams', 'stream_weight_bap'),
# ('stream_weight_stepw' , 0.0, 'Streams', 'stream_weight_stepw'),
# ('stream_weight_seglf0', 1.0, 'Streams', 'stream_weight_seglf0'),
# ('stream_weight_sp' , 1.0, 'Streams', 'stream_weight_sp'),
## Glott HMM - unused?
# ('stream_weight_F0' , 1.0, 'Streams', 'stream_weight_F0'),
# ('stream_weight_Gain' , 1.0, 'Streams', 'stream_weight_Gain'),
# ('stream_weight_HNR' , 1.0, 'Streams', 'stream_weight_HNR'),
# ('stream_weight_LSF' , 1.0, 'Streams', 'stream_weight_LSF'),
# ('stream_weight_LSFsource' , 1.0, 'Streams', 'stream_weight_LSFsource'),
## dur - unused?
# ('stream_weight_dur' , 1.0, 'Streams', 'stream_weight_dur'),
# ('stream_lf0_lr' , 0.5, 'Streams', 'stream_lf0_lr'),
# ('stream_vuv_lr' , 0.5, 'Streams', 'stream_vuv_lr'),
('vocoder_type' ,'STRAIGHT' ,'Waveform' , 'vocoder_type'),
('sr' ,48000 ,'Waveform' , 'samplerate'),
('fl' ,4096 ,'Waveform' , 'framelength'),
('shift' ,1000 * 240 / 48000 ,'Waveform' , 'frameshift'),
('sp_dim' ,(4096 / 2) + 1 ,'Waveform' , 'sp_dim'),
# fw_alpha: 'Bark' or 'ERB' allowing deduction of alpha, or explicity float value (e.g. 0.77)
('fw_alpha' ,0.77 ,'Waveform' , 'fw_alpha'),
('pf_coef' ,1.4 ,'Waveform' , 'postfilter_coef'),
('co_coef' ,2047 ,'Waveform' , 'minimum_phase_order'),
('use_cep_ap' ,True ,'Waveform' , 'use_cep_ap'),
('do_post_filtering',True ,'Waveform' , 'do_post_filtering'),
('apply_GV' ,False ,'Waveform' , 'apply_GV'),
('test_synth_dir' ,'test_synthesis/wav' ,'Waveform' , 'test_synth_dir'),
('DurationModel' , False, 'Processes', 'DurationModel'),
('AcousticModel' , False, 'Processes', 'AcousticModel'),
('GenTestList' , False, 'Processes', 'GenTestList'),
('NORMLAB' , False, 'Processes', 'NORMLAB'),
('MAKEDUR' , False, 'Processes', 'MAKEDUR'),
('MAKECMP' , False, 'Processes', 'MAKECMP'),
('NORMCMP' , False, 'Processes', 'NORMCMP'),
('TRAINDNN' , False, 'Processes', 'TRAINDNN'),
('DNNGEN' , False, 'Processes', 'DNNGEN'),
('GENWAV' , False, 'Processes', 'GENWAV'),
('CALMCD' , False, 'Processes', 'CALMCD'),
('NORMSTEP' , False, 'Processes', 'NORMSTEP'),
('GENBNFEA' , False, 'Processes', 'GENBNFEA'),
('mgc_ext' , '.mgc' , 'Extensions', 'mgc_ext'),
('bap_ext' , '.bap' , 'Extensions', 'bap_ext'),
('lf0_ext' , '.lf0' , 'Extensions', 'lf0_ext'),
('cmp_ext' , '.cmp' , 'Extensions', 'cmp_ext'),
('lab_ext' , '.lab' , 'Extensions', 'lab_ext'),
('utt_ext' , '.utt' , 'Extensions', 'utt_ext'),
('stepw_ext' , '.stepw' , 'Extensions', 'stepw_ext'),
('sp_ext' , '.sp' , 'Extensions', 'sp_ext'),
## GlottHMM
('F0_ext' , '.F0' , 'Extensions', 'F0_ext'),
('Gain_ext' , '.Gain' , 'Extensions', 'Gain_ext'),
('HNR_ext' , '.HNR' , 'Extensions', 'HNR_ext'),
('LSF_ext' , '.LSF' , 'Extensions', 'LSF_ext'),
('LSFsource_ext' , '.LSFsource' , 'Extensions', 'LSFsource_ext'),
## joint dur
('dur_ext' , '.dur' , 'Extensions', 'dur_ext'),
]
# this uses exec(...) which is potentially dangerous since arbitrary code could be executed
for (variable,default,section,option) in user_options:
value=None
try:
# first, look for a user-set value for this variable in the config file
value = configparser.get(section,option)
user_or_default='user'
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# use default value, if there is one
if (default == None) or \
(default == '') or \
((type(default) == int) and (default == impossible_int)) or \
((type(default) == float) and (default == impossible_float)) :
logger.critical('%20s has no value!' % (section+":"+option) )
raise Exception
else:
value = default
user_or_default='default'
if type(default) == str:
exec('self.%s = "%s"' % (variable,value))
elif type(default) == int:
exec('self.%s = int(%s)' % (variable,value))
elif type(default) == float:
exec('self.%s = float(%s)' % (variable,value))
elif type(default) == bool:
exec('self.%s = bool(%s)' % (variable,value))
elif type(default) == list:
exec('self.%s = list(%s)' % (variable,value))
elif type(default) == dict:
exec('self.%s = dict(%s)' % (variable,value))
else:
logger.critical('Variable %s has default value of unsupported type %s',variable,type(default))
raise Exception('Internal error in configuration settings: unsupported default type')
logger.info('%20s has %7s value %s' % (section+":"+option,user_or_default,value) )
self.combined_feature_name = ''
for feature_name in self.output_features:
self.combined_feature_name += '_'
self.combined_feature_name += feature_name
self.combined_model_name = self.model_type
for hidden_type in self.hidden_layer_type:
self.combined_model_name += '_' + hidden_type
self.combined_model_name += '_' + self.output_layer_type
def complete_configuration(self):
# to be called after reading any user-specific settings
# because the values set here depend on those user-specific settings
# get a logger
logger = logging.getLogger("configuration")
# tools
self.SPTK = {
'X2X' : os.path.join(self.sptk_bindir,'x2x'),
'MERGE' : os.path.join(self.sptk_bindir,'merge'),
'BCP' : os.path.join(self.sptk_bindir,'bcp'),
'MLPG' : os.path.join(self.sptk_bindir,'mlpg'),
'MGC2SP' : os.path.join(self.sptk_bindir,'mgc2sp'),
'VSUM' : os.path.join(self.sptk_bindir,'vsum'),
'VSTAT' : os.path.join(self.sptk_bindir,'vstat'),
'SOPR' : os.path.join(self.sptk_bindir,'sopr'),
'VOPR' : os.path.join(self.sptk_bindir,'vopr'),
'FREQT' : os.path.join(self.sptk_bindir,'freqt'),
'C2ACR' : os.path.join(self.sptk_bindir,'c2acr'),
'MC2B' : os.path.join(self.sptk_bindir,'mc2b'),
'B2MC' : os.path.join(self.sptk_bindir,'b2mc')
}
# self.NND = {
# 'FEATN' : os.path.join(self.nndata_bindir,'FeatureNormalization'),
# 'LF0IP' : os.path.join(self.nndata_bindir,'F0Interpolation'),
# 'F0VUV' : os.path.join(self.nndata_bindir,'F0VUVComposition')
# }
self.STRAIGHT = {
'SYNTHESIS_FFT' : os.path.join(self.straight_bindir, 'synthesis_fft'),
'BNDAP2AP' : os.path.join(self.straight_bindir, 'bndap2ap'),
}
self.WORLD = {
'SYNTHESIS' : os.path.join(self.world_bindir, 'synth'),
'ANALYSIS' : os.path.join(self.world_bindir, 'analysis'),
}
# STILL TO DO - test that all the above tools exist and are executable
###dimensions for the output features
### key name must follow the self.in_dimension_dict.
### If do not want to include dynamic feature, just use the same dimension as that self.in_dimension_dict
### if lf0 is one of the acoustic featues, the out_dimension_dict must have an additional 'vuv' key
### a bit confusing
###need to control the order of the key?
self.in_dir_dict = {} ##dimensions for each raw acoustic (output of NN) feature
self.out_dimension_dict = {}
self.in_dimension_dict = {}
self.private_hidden_sizes = []
self.stream_weights = []
logger.debug('setting up output features')
self.cmp_dim = 0
for feature_name in self.output_features:
logger.debug(' %s' % feature_name)
in_dimension = 0
out_dimension = 0
in_directory = ''
# current_stream_hidden_size = 0
# current_stream_weight = 0.0
# stream_lr_ratio = 0.0
if feature_name == 'mgc':
in_dimension = self.mgc_dim
out_dimension = self.dmgc_dim
in_directory = self.in_mgc_dir
# current_stream_hidden_size = self.stream_mgc_hidden_size
# current_stream_weight = self.stream_weight_mgc
elif feature_name == 'bap':
in_dimension = self.bap_dim
out_dimension = self.dbap_dim
in_directory = self.in_bap_dir
# current_stream_hidden_size = self.stream_bap_hidden_size
# current_stream_weight = self.stream_weight_bap
elif feature_name == 'lf0':
in_dimension = self.lf0_dim
out_dimension = self.dlf0_dim
in_directory = self.in_lf0_dir
# current_stream_hidden_size = self.stream_lf0_hidden_size
# current_stream_weight = self.stream_weight_lf0
elif feature_name == 'vuv':
out_dimension = 1
# current_stream_hidden_size = self.stream_vuv_hidden_size
# current_stream_weight = self.stream_weight_vuv
elif feature_name == 'stepw':
in_dimension = self.stepw_dim
out_dimension = self.stepw_dim
in_directory = self.in_stepw_dir
# current_stream_hidden_size = self.stream_stepw_hidden_size
# current_stream_weight = self.stream_weight_stepw
elif feature_name == 'sp':
in_dimension = self.sp_dim
out_dimension = self.sp_dim
in_directory = self.in_sp_dir
# current_stream_hidden_size = self.stream_sp_hidden_size
# current_stream_weight = self.stream_weight_sp
elif feature_name == 'seglf0':
in_dimension = self.seglf0_dim
out_dimension = self.seglf0_dim
in_directory = self.in_seglf0_dir
# current_stream_hidden_size = self.stream_seglf0_hidden_size
# current_stream_weight = self.stream_weight_seglf0
## for GlottHMM (start)
elif feature_name == 'F0':
in_dimension = self.F0_dim
out_dimension = self.dF0_dim
in_directory = self.in_F0_dir
# current_stream_hidden_size = self.stream_F0_hidden_size
# current_stream_weight = self.stream_weight_F0
elif feature_name == 'Gain':
in_dimension = self.Gain_dim
out_dimension = self.dGain_dim
in_directory = self.in_Gain_dir
# current_stream_hidden_size = self.stream_Gain_hidden_size
# current_stream_weight = self.stream_weight_Gain
elif feature_name == 'HNR':
in_dimension = self.HNR_dim
out_dimension = self.dHNR_dim
in_directory = self.in_HNR_dir
# current_stream_hidden_size = self.stream_HNR_hidden_size
# current_stream_weight = self.stream_weight_HNR
elif feature_name == 'LSF':
in_dimension = self.LSF_dim
out_dimension = self.dLSF_dim
in_directory = self.in_LSF_dir
# current_stream_hidden_size = self.stream_LSF_hidden_size
# current_stream_weight = self.stream_weight_LSF
elif feature_name == 'LSFsource':
in_dimension = self.LSFsource_dim
out_dimension = self.dLSFsource_dim
in_directory = self.in_LSFsource_dir
# current_stream_hidden_size = self.stream_LSFsource_hidden_size
# current_stream_weight = self.stream_weight_LSFsource
## for GlottHMM (end)
## for joint dur (start)
elif feature_name == 'dur':
in_dimension = self.dur_dim
out_dimension = self.dur_dim
in_directory = self.in_dur_dir
# current_stream_hidden_size = self.stream_dur_hidden_size
# current_stream_weight = self.stream_weight_dur
## for joint dur (end)
else:
logger.critical('%s feature is not supported right now. Please change the configuration.py to support it' %(feature_name))
raise
logger.info(' in_dimension: %d' % in_dimension)
logger.info(' out_dimension : %d' % out_dimension)
logger.info(' in_directory : %s' % in_directory)
# logger.info(' current_stream_hidden_size: %d' % current_stream_hidden_size)
# logger.info(' current_stream_weight: %d' % current_stream_weight)
if in_dimension > 0:
self.in_dimension_dict[feature_name] = in_dimension
if in_directory == '':
logger.critical('please provide the path for %s feature' %(feature_name))
raise
if out_dimension < in_dimension:
logger.critical('the dimensionality setting for %s feature is not correct!' %(feature_name))
raise
self.in_dir_dict[feature_name] = in_directory
if out_dimension > 0:
self.out_dimension_dict[feature_name] = out_dimension
# if (current_stream_hidden_size <= 0 or current_stream_weight <= 0.0) and self.multistream_switch:
# logger.critical('the hidden layer size or stream weight is not corrected setted for %s feature' %(feature_name))
# raise
# if self.multistream_switch:
# self.private_hidden_sizes.append(current_stream_hidden_size)
# self.stream_weights.append(current_stream_weight)
self.cmp_dim += out_dimension
# if not self.multistream_switch:
# self.private_hidden_sizes = []
# if self.stream_cmp_hidden_size > 0:
# self.private_hidden_sizes.append(self.stream_cmp_hidden_size)
# else:
# self.private_hidden_sizes.append(self.hidden_layer_size[-1]) ## use the same number of hidden layers if multi-stream is not supported
# self.stream_weights = []
# self.stream_weights.append(1.0)
self.stream_lr_weights = []
self.multistream_outs = []
if self.multistream_switch:
for feature_name in self.out_dimension_dict.keys():
self.multistream_outs.append(self.out_dimension_dict[feature_name])
# stream_lr_ratio = 0.5
# if feature_name == 'lf0':
# stream_lr_ratio = self.stream_lf0_lr
# if feature_name == 'vuv':
# stream_lr_ratio = self.stream_vuv_lr
# self.stream_lr_weights.append(stream_lr_ratio)
else:
### the new cmp is not the one for HTS, it includes all the features, such as that for main tasks and that for additional tasks
self.multistream_outs.append(self.cmp_dim)
# self.stream_lr_weights.append(0.5)
logger.info('multistream dimensions: %s' %(self.multistream_outs))
# to check whether all the input and output features' file extensions are here
self.file_extension_dict = {}
self.file_extension_dict['mgc'] = self.mgc_ext
self.file_extension_dict['lf0'] = self.lf0_ext
self.file_extension_dict['bap'] = self.bap_ext
self.file_extension_dict['stepw'] = self.stepw_ext
self.file_extension_dict['cmp'] = self.cmp_ext
self.file_extension_dict['seglf0'] = self.lf0_ext
## gHMM:
self.file_extension_dict['F0'] = self.F0_ext
self.file_extension_dict['Gain'] = self.Gain_ext
self.file_extension_dict['HNR'] = self.HNR_ext
self.file_extension_dict['LSF'] = self.LSF_ext
self.file_extension_dict['LSFsource'] = self.LSFsource_ext
## joint dur
self.file_extension_dict['dur'] = self.dur_ext
## hyper parameters for DNN. need to be setted by the user, as they depend on the architecture
self.hyper_params = { 'learning_rate' : '0.0002', ###
'l2_reg' : '0.00001',
'l1_reg' : '0.0',
'batch_size' : '16',
'training_epochs' : '25',
'early_stop_epochs' : '5',
'hidden_activation' : 'tanh',
'output_activation' : 'linear',
'do_pretraining' : False,
'pretraining_epochs' : '10',
'pretraining_lr' : '0.0001'}
self.hyper_params['warmup_momentum'] = self.warmup_momentum
self.hyper_params['momentum'] = self.momentum
self.hyper_params['warmup_epoch'] = self.warmup_epoch
self.hyper_params['learning_rate'] = self.learning_rate
self.hyper_params['l2_reg'] = self.l2_reg
self.hyper_params['l1_reg'] = self.l1_reg
self.hyper_params['batch_size'] = self.batch_size
self.hyper_params['training_epochs'] = self.training_epochs
self.hyper_params['hidden_activation'] = self.hidden_activation
self.hyper_params['output_activation'] = self.output_activation
self.hyper_params['hidden_layer_size'] = self.hidden_layer_size
self.hyper_params['warmup_epoch'] = self.warmup_epoch
self.hyper_params['use_rprop'] = self.use_rprop
# self.hyper_params['private_hidden_sizes'] = self.private_hidden_sizes
# self.hyper_params['stream_weights'] = self.stream_weights
# self.hyper_params['private_l2_reg'] = self.private_l2_reg
# self.hyper_params['stream_lr_weights'] = self.stream_lr_weights
# self.hyper_params['use_private_hidden'] = self.use_private_hidden
self.hyper_params['model_type'] = self.model_type
self.hyper_params['hidden_layer_type'] = self.hidden_layer_type
self.hyper_params['index_to_project'] = self.index_to_project
self.hyper_params['projection_insize'] = self.projection_insize
self.hyper_params['projection_outsize'] = self.projection_outsize
self.hyper_params['initial_projection_distrib'] = self.initial_projection_distrib
self.hyper_params['layers_with_projection_input'] = self.layers_with_projection_input
self.hyper_params['projection_learning_rate_scaling'] = self.projection_learning_rate_scaling
self.hyper_params['sequential_training'] = self.sequential_training
self.hyper_params['dropout_rate'] = self.dropout_rate
for hidden_type in self.hidden_layer_type:
if 'LSTM' in hidden_type or 'RNN' in hidden_type or 'GRU' in hidden_type:
self.hyper_params['sequential_training'] = self.sequential_training
#To be recorded in the logging file for reference
for param_name in self.hyper_params.keys():
logger.info('%s : %s' %(param_name, str(self.hyper_params[param_name])))
# input files
# set up the label processing
# currently must be one of two styles
if self.label_style == 'HTS':
# xpath_file_name is now obsolete - to remove
self.xpath_file_name=None
elif self.label_style == 'HTS_duration':
self.xpath_file_name=None
elif self.label_style == 'composed':
self.question_file_name=None
else:
logger.critical('unsupported label style requested: %s' % self.label_style)
raise Exception
def logging_configuration(self):
# get a logger
logger = logging.getLogger("configuration")
# logging configuration, see here for format description
# https://docs.python.org/2/library/logging.config.html#logging-config-fileformat
# what we really want to do is this dicitonary-based configuration, but it's only available from Python 2.7 onwards
# logging.config.dictConfig(cfg.logging_configuration)
# so we will settle for this file-based configuration procedure instead
try:
# open the logging configuration file
fp = open(self.log_config_file,'r')
logger.debug("loading logging configuration from %s" % self.log_config_file)
# load the logging configuration file into a string
config_string = fp.read()
fp.close()
except ValueError:
# this means that cfg.log_config_file does not exist and that no default was provided
# NOTE: currently this will never run
logging.warn('no logging configuration file provided - using default (console only, DEBUG level)')
# set up a default level and default handlers
# first, get the root logger - all other loggers will inherit its configuration
rootogger = logging.getLogger("")
# default logging level is DEBUG (a highly-verbose level)
rootlogger.setLevel(logging.DEBUG)
# add a handler to write to console
ch = logging.StreamHandler()
rootlogger.addHandler(ch)
# and a formatter
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
except IOError:
# this means that open(...) threw an error
logger.critical('could not load logging configuration file %s' % self.log_config_file)
raise
else:
# inject the config lines for the file handler, now that we know the name of the file it will write to
if not os.path.exists(self.log_path):
os.makedirs(self.log_path, 0755)
log_file_name = '%s_%s_%d_%d_%d_%d_%f_%s.log' %(self.combined_model_name, self.combined_feature_name, self.train_file_number,
self.cmp_dim, len(self.hidden_layer_size),
self.hidden_layer_size[-1], self.learning_rate,
datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y"))
self.log_file = os.path.join(self.log_path, log_file_name)
to_inject="""
[handler_file]
class=FileHandler
formatter=file
args=('"""+self.log_file+"""', 'w')
"""
# config file format doesn't allow leading white space on lines, so remove it with dedent
config_string = config_string + textwrap.dedent(to_inject)
try:
# pass that string as a filehandle
fh = StringIO.StringIO(config_string)
logging.config.fileConfig(fh)
fh.close()
logger.info("logging is now fully configured")
except IOError:
logger.critical('could not configure logging: perhaps log file path is wrong?')
sys.exit(1)
| {
"content_hash": "26b5652008322a1b2a734d70b01f40ae",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 169,
"avg_line_length": 49.938995215311,
"alnum_prop": 0.5233179237826056,
"repo_name": "ligz07/merlin",
"id": "4786e9e98c26f17b7cfead5a56fa044aaf9168a6",
"size": "43809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/configuration/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "10290"
},
{
"name": "Python",
"bytes": "1296946"
},
{
"name": "Scheme",
"bytes": "5478"
},
{
"name": "Shell",
"bytes": "107422"
}
],
"symlink_target": ""
} |
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.gw import Widgets, ColorBar
class Colorbar(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Colorbar, self).__init__(fv)
self._image = None
self.channel = {}
self.active = None
self.info = None
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_ColorBar')
#self.settings.addDefaults()
self.settings.load(onError='silent')
fv.add_callback('add-channel', self.add_channel_cb)
fv.add_callback('delete-channel', self.delete_channel_cb)
def build_gui(self, container):
cbar = ColorBar.ColorBar(self.logger)
cbar.set_cmap(self.fv.cm)
cbar.set_imap(self.fv.im)
cbar_w = Widgets.wrap(cbar)
self.colorbar = cbar
self.fv.add_callback('channel-change', self.change_cbar, cbar)
cbar.add_callback('motion', self.cbar_value_cb)
fr = Widgets.Frame()
fr.set_border_width(0)
fr.set_widget(cbar_w)
container.add_widget(fr, stretch=0)
def add_channel_cb(self, viewer, channel):
settings = channel.settings
settings.getSetting('cuts').add_callback('set',
self.change_range_cb, channel.fitsimage, self.colorbar)
chname = channel.name
info = Bunch.Bunch(chname=chname, channel=channel)
self.channel[chname] = info
fi = channel.fitsimage
rgbmap = fi.get_rgbmap()
rgbmap.add_callback('changed', self.rgbmap_cb, channel)
def delete_channel_cb(self, viewer, channel):
chname = channel.name
self.logger.debug("deleting channel %s" % (chname))
self.active = None
self.info = None
del self.channel[chname]
def _match_cmap(self, fitsimage, colorbar):
"""
Help method to change the ColorBar to match the cut levels or
colormap used in a ginga ImageView.
"""
rgbmap = fitsimage.get_rgbmap()
loval, hival = fitsimage.get_cut_levels()
colorbar.set_range(loval, hival)
# If we are sharing a ColorBar for all channels, then store
# to change the ColorBar's rgbmap to match our
colorbar.set_rgbmap(rgbmap)
def change_cbar(self, viewer, channel, cbar):
self._match_cmap(channel.fitsimage, cbar)
# def focus_cb(self, viewer, channel):
# chname = channel.name
# if self.active != chname:
# self.active = chname
# self.info = self.channel[self.active]
# image = channel.fitsimage.get_image()
# if image is None:
# return
# # install rgbmap
def change_range_cb(self, setting, value, fitsimage, cbar):
"""
This method is called when the cut level values (lo/hi) have
changed in a channel. We adjust them in the ColorBar to match.
"""
if cbar is None:
return
if fitsimage != self.fv.getfocus_fitsimage():
# values have changed in a channel that doesn't have the focus
return False
loval, hival = value
cbar.set_range(loval, hival)
def cbar_value_cb(self, cbar, value, event):
"""
This method is called when the user moves the mouse over the
ColorBar. It displays the value of the mouse position in the
ColorBar in the Readout (if any).
"""
channel = self.fv.get_channelInfo()
if channel is None:
return
readout = channel.extdata.get('readout', None)
if readout is not None:
maxv = readout.maxv
text = "Value: %-*.*s" % (maxv, maxv, value)
readout.set_text(text)
def rgbmap_cb(self, rgbmap, channel):
"""
This method is called when the RGBMap is changed. We update
the ColorBar to match.
"""
fitsimage = channel.fitsimage
if fitsimage != self.fv.getfocus_fitsimage():
return False
if self.colorbar is not None:
self.change_cbar(self.fv, channel, self.colorbar)
def start(self):
## names = self.fv.get_channelNames()
## for name in names:
## channel = self.fv.get_channelInfo(name)
## self.add_channel_cb(self.fv, channel)
pass
def __str__(self):
return 'colorbar'
#END
| {
"content_hash": "869d75fc2629ecd1632792727655db55",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 85,
"avg_line_length": 33.080882352941174,
"alnum_prop": 0.5941320293398533,
"repo_name": "Cadair/ginga",
"id": "ff0faba6456990df9e91ac7cce887992f55040e9",
"size": "4752",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ginga/misc/plugins/Colorbar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1939"
},
{
"name": "JavaScript",
"bytes": "8724"
},
{
"name": "Python",
"bytes": "2458171"
}
],
"symlink_target": ""
} |
'''Load fonts and render text.
This is a fairly-low level interface to text rendering. Obtain a font using
`load`::
from pyglet import font
arial = font.load('Arial', 14, bold=True, italic=False)
pyglet will load any system-installed fonts. You can add additional fonts
(for example, from your program resources) using `add_file` or
`add_directory`.
Obtain a list of `Glyph` objects for a string of text using the `Font`
object::
text = 'Hello, world!'
glyphs = arial.get_glyphs(text)
The most efficient way to render these glyphs is with a `GlyphString`::
glyph_string = GlyphString(text, glyphs)
glyph_string.draw()
There are also a variety of methods in both `Font` and
`GlyphString` to facilitate word-wrapping.
A convenient way to render a string of text is with a `Text`::
text = Text(font, text)
text.draw()
See the `pyglet.font.base` module for documentation on the base classes used
by this package.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
import os
import math
import weakref
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet import image
from pyglet import window
class GlyphString(object):
'''An immutable string of glyphs that can be rendered quickly.
This class is ideal for quickly rendering single or multi-line strings
of text that use the same font. To wrap text using a glyph string,
call `get_break_index` to find the optimal breakpoint for each line,
the repeatedly call `draw` for each breakpoint.
:deprecated: Use `pyglet.text.layout` classes.
'''
def __init__(self, text, glyphs, x=0, y=0):
'''Create a glyph string.
The `text` string is used to determine valid breakpoints; all glyphs
must have already been determined using
`pyglet.font.base.Font.get_glyphs`. The string
will be positioned with the baseline of the left-most glyph at the
given coordinates.
:Parameters:
`text` : str or unicode
String to represent.
`glyphs` : list of `pyglet.font.base.Glyph`
Glyphs representing `text`.
`x` : float
X coordinate of the left-side bearing of the left-most glyph.
`y` : float
Y coordinate of the baseline.
'''
# Create an interleaved array in GL_T2F_V3F format and determine
# state changes required.
lst = []
texture = None
self.text = text
self.states = []
self.cumulative_advance = [] # for fast post-string breaking
state_from = 0
state_length = 0
for i, glyph in enumerate(glyphs):
if glyph.owner != texture:
if state_length:
self.states.append((state_from, state_length, texture))
texture = glyph.owner
state_from = i
state_length = 0
state_length += 1
t = glyph.tex_coords
lst += [t[0], t[1], t[2], 1.,
x + glyph.vertices[0], y + glyph.vertices[1], 0., 1.,
t[3], t[4], t[5], 1.,
x + glyph.vertices[2], y + glyph.vertices[1], 0., 1.,
t[6], t[7], t[8], 1.,
x + glyph.vertices[2], y + glyph.vertices[3], 0., 1.,
t[9], t[10], t[11], 1.,
x + glyph.vertices[0], y + glyph.vertices[3], 0., 1.]
x += glyph.advance
self.cumulative_advance.append(x)
self.states.append((state_from, state_length, texture))
self.array = (c_float * len(lst))(*lst)
self.width = x
def get_break_index(self, from_index, width):
'''Find a breakpoint within the text for a given width.
Returns a valid breakpoint after `from_index` so that the text
between `from_index` and the breakpoint fits within `width` pixels.
This method uses precomputed cumulative glyph widths to give quick
answer, and so is much faster than
`pyglet.font.base.Font.get_glyphs_for_width`.
:Parameters:
`from_index` : int
Index of text to begin at, or 0 for the beginning of the
string.
`width` : float
Maximum width to use.
:rtype: int
:return: the index of text which will be used as the breakpoint, or
`from_index` if there is no valid breakpoint.
'''
to_index = from_index
if from_index >= len(self.text):
return from_index
if from_index:
width += self.cumulative_advance[from_index-1]
for i, (c, w) in enumerate(
zip(self.text[from_index:],
self.cumulative_advance[from_index:])):
if c in u'\u0020\u200b':
to_index = i + from_index + 1
if c == '\n':
return i + from_index + 1
if w > width:
return to_index
return to_index
def get_subwidth(self, from_index, to_index):
'''Return the width of a slice of this string.
:Parameters:
`from_index` : int
The start index of the string to measure.
`to_index` : int
The end index (exclusive) of the string to measure.
:rtype: float
'''
if to_index <= from_index:
return 0
width = self.cumulative_advance[to_index-1]
if from_index:
width -= self.cumulative_advance[from_index-1]
return width
def draw(self, from_index=0, to_index=None):
'''Draw a region of the glyph string.
Assumes texture state is enabled. To enable the texture state::
from pyglet.gl import *
glEnable(GL_TEXTURE_2D)
:Parameters:
`from_index` : int
Start index of text to render.
`to_index` : int
End index (exclusive) of text to render.
'''
if from_index >= len(self.text) or \
from_index == to_index or \
not self.text:
return
# XXX Safe to assume all required textures will use same blend state I
# think. (otherwise move this into loop)
self.states[0][2].apply_blend_state()
if from_index:
glPushMatrix()
glTranslatef(-self.cumulative_advance[from_index-1], 0, 0)
if to_index is None:
to_index = len(self.text)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glInterleavedArrays(GL_T4F_V4F, 0, self.array)
for state_from, state_length, texture in self.states:
if state_from + state_length < from_index:
continue
state_from = max(state_from, from_index)
state_length = min(state_length, to_index - state_from)
if state_length <= 0:
break
glBindTexture(GL_TEXTURE_2D, texture.id)
glDrawArrays(GL_QUADS, state_from * 4, state_length * 4)
glPopClientAttrib()
if from_index:
glPopMatrix()
class _TextZGroup(pyglet.graphics.Group):
z = 0
def set_state(self):
glTranslatef(0, 0, self.z)
def unset_state(self):
glTranslatef(0, 0, -self.z)
class Text(object):
'''Simple displayable text.
This is a convenience class for rendering strings of text. It takes
care of caching the vertices so the text can be rendered every frame with
little performance penalty.
Text can be word-wrapped by specifying a `width` to wrap into. If the
width is not specified, it gives the width of the text as laid out.
:Ivariables:
`x` : int
X coordinate of the text
`y` : int
Y coordinate of the text
:deprecated: Use `pyglet.text.Label`.
'''
# Alignment constants
#: Align the left edge of the text to the given X coordinate.
LEFT = 'left'
#: Align the horizontal center of the text to the given X coordinate.
CENTER = 'center'
#: Align the right edge of the text to the given X coordinate.
RIGHT = 'right'
#: Align the bottom of the descender of the final line of text with the
#: given Y coordinate.
BOTTOM = 'bottom'
#: Align the baseline of the first line of text with the given Y
#: coordinate.
BASELINE = 'baseline'
#: Align the top of the ascender of the first line of text with the given
#: Y coordinate.
TOP = 'top'
# None: no multiline
# 'width': multiline, wrapped to width
# 'multiline': multiline, no wrap
_wrap = None
# Internal bookkeeping for wrap only.
_width = None
def __init__(self, font, text='', x=0, y=0, z=0, color=(1,1,1,1),
width=None, halign=LEFT, valign=BASELINE):
'''Create displayable text.
:Parameters:
`font` : `Font`
Font to render the text in.
`text` : str
Initial string to render.
`x` : float
X coordinate of the left edge of the text.
`y` : float
Y coordinate of the baseline of the text. If the text is
word-wrapped, this refers to the first line of text.
`z` : float
Z coordinate of the text plane.
`color` : 4-tuple of float
Color to render the text in. Alpha values can be specified
in the fourth component.
`width` : float
Width to limit the rendering to. Text will be word-wrapped
if necessary.
`halign` : str
Alignment of the text. See `Text.halign` for details.
`valign` : str
Controls positioning of the text based off the y coordinate.
One of BASELINE, BOTTOM, CENTER or TOP. Defaults to BASELINE.
'''
multiline = False
if width is not None:
self._width = width
self._wrap = 'width'
multiline = True
elif '\n' in text:
self._wrap = 'multiline'
multiline = True
self._group = _TextZGroup()
self._document = pyglet.text.decode_text(text)
self._layout = pyglet.text.layout.TextLayout(self._document,
width=width,
multiline=multiline,
wrap_lines=width is not None,
dpi=font.dpi,
group=self._group)
self._layout.begin_update()
if self._wrap == 'multiline':
self._document.set_style(0, len(text), dict(wrap=False))
self.font = font
self.color = color
self._x = x
self.y = y
self.z = z
self.width = width
self.halign = halign
self.valign = valign
self._update_layout_halign()
self._layout.end_update()
def _get_font(self):
return self._font
def _set_font(self, font):
self._font = font
self._layout.begin_update()
self._document.set_style(0, len(self._document.text), {
'font_name': font.name,
'font_size': font.size,
'bold': font.bold,
'italic': font.italic,
})
self._layout._dpi = font.dpi
self._layout.end_update()
font = property(_get_font, _set_font)
def _get_color(self):
color = self._document.get_style('color')
if color is None:
return (1., 1., 1., 1.)
return tuple([c/255. for c in color])
def _set_color(self, color):
color = [int(c * 255) for c in color]
self._document.set_style(0, len(self._document.text), {
'color': color,
})
color = property(_get_color, _set_color)
def _update_layout_halign(self):
if self._layout.multiline:
# TextLayout has a different interpretation of halign that doesn't
# consider the width to be a special factor; here we emulate the
# old behaviour by fudging the layout x value.
if self._layout.anchor_x == 'left':
self._layout.x = self.x
elif self._layout.anchor_x == 'center':
self._layout.x = self.x + self._layout.width - \
self._layout.content_width // 2
elif self._layout.anchor_x == 'right':
self._layout.x = self.x + 2 * self._layout.width - \
self._layout.content_width
else:
self._layout.x = self.x
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = x
self._update_layout_halign()
x = property(_get_x, _set_x)
def _get_y(self):
return self._layout.y
def _set_y(self, y):
self._layout.y = y
y = property(_get_y, _set_y)
def _get_z(self):
return self._group.z
def _set_z(self, z):
self._group.z = z
z = property(_get_z, _set_z)
def _update_wrap(self):
if self._width is not None:
self._wrap = 'width'
elif '\n' in self.text:
self._wrap = 'multiline'
self._layout.begin_update()
if self._wrap == None:
self._layout.multiline = False
elif self._wrap == 'width':
self._layout.width = self._width
self._layout.multiline = True
self._document.set_style(0, len(self.text), dict(wrap=True))
elif self._wrap == 'multiline':
self._layout.multiline = True
self._document.set_style(0, len(self.text), dict(wrap=False))
self._update_layout_halign()
self._layout.end_update()
def _get_width(self):
if self._wrap == 'width':
return self._layout.width
else:
return self._layout.content_width
def _set_width(self, width):
self._width = width
self._layout._wrap_lines_flag = width is not None
self._update_wrap()
width = property(_get_width, _set_width,
doc='''Width of the text.
When set, this enables word-wrapping to the specified width.
Otherwise, the width of the text as it will be rendered can be
determined.
:type: float
''')
def _get_height(self):
return self._layout.content_height
height = property(_get_height,
doc='''Height of the text.
This property is the ascent minus the descent of the font, unless
there is more than one line of word-wrapped text, in which case
the height takes into account the line leading. Read-only.
:type: float
''')
def _get_text(self):
return self._document.text
def _set_text(self, text):
self._document.text = text
self._update_wrap()
text = property(_get_text, _set_text,
doc='''Text to render.
The glyph vertices are only recalculated as needed, so multiple
changes to the text can be performed with no performance penalty.
:type: str
''')
def _get_halign(self):
return self._layout.anchor_x
def _set_halign(self, halign):
self._layout.anchor_x = halign
self._update_layout_halign()
halign = property(_get_halign, _set_halign,
doc='''Horizontal alignment of the text.
The text is positioned relative to `x` and `width` according to this
property, which must be one of the alignment constants `LEFT`,
`CENTER` or `RIGHT`.
:type: str
''')
def _get_valign(self):
return self._layout.anchor_y
def _set_valign(self, valign):
self._layout.anchor_y = valign
valign = property(_get_valign, _set_valign,
doc='''Vertical alignment of the text.
The text is positioned relative to `y` according to this property,
which must be one of the alignment constants `BOTTOM`, `BASELINE`,
`CENTER` or `TOP`.
:type: str
''')
def _get_leading(self):
return self._document.get_style('leading') or 0
def _set_leading(self, leading):
self._document.set_style(0, len(self._document.text), {
'leading': leading,
})
leading = property(_get_leading, _set_leading,
doc='''Vertical space between adjacent lines, in pixels.
:type: int
''')
def _get_line_height(self):
return self._font.ascent - self._font.descent + self.leading
def _set_line_height(self, line_height):
self.leading = line_height - (self._font.ascent - self._font.descent)
line_height = property(_get_line_height, _set_line_height,
doc='''Vertical distance between adjacent baselines, in pixels.
:type: int
''')
def draw(self):
self._layout.draw()
if not getattr(sys, 'is_epydoc', False):
if sys.platform == 'darwin':
if pyglet.options['darwin_cocoa']:
from pyglet.font.quartz import QuartzFont
_font_class = QuartzFont
else:
from pyglet.font.carbon import CarbonFont
_font_class = CarbonFont
elif sys.platform in ('win32', 'cygwin'):
if pyglet.options['font'][0] == 'win32':
from pyglet.font.win32 import Win32Font
_font_class = Win32Font
elif pyglet.options['font'][0] == 'gdiplus':
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
assert False, 'Unknown font driver'
else:
from pyglet.font.freetype import FreeTypeFont
_font_class = FreeTypeFont
def load(name=None, size=None, bold=False, italic=False, dpi=None):
'''Load a font for rendering.
:Parameters:
`name` : str, or list of str
Font family, for example, "Times New Roman". If a list of names
is provided, the first one matching a known font is used. If no
font can be matched to the name(s), a default font is used. In
pyglet 1.1, the name may be omitted.
`size` : float
Size of the font, in points. The returned font may be an exact
match or the closest available. In pyglet 1.1, the size may be
omitted, and defaults to 12pt.
`bold` : bool
If True, a bold variant is returned, if one exists for the given
family and size.
`italic` : bool
If True, an italic variant is returned, if one exists for the given
family and size.
`dpi` : float
The assumed resolution of the display device, for the purposes of
determining the pixel size of the font. Defaults to 96.
:rtype: `Font`
'''
# Arbitrary default size
if size is None:
size = 12
if dpi is None:
dpi = 96
# Find first matching name
if type(name) in (tuple, list):
for n in name:
if _font_class.have_font(n):
name = n
break
else:
name = None
# Locate or create font cache
shared_object_space = gl.current_context.object_space
if not hasattr(shared_object_space, 'pyglet_font_font_cache'):
shared_object_space.pyglet_font_font_cache = \
weakref.WeakValueDictionary()
shared_object_space.pyglet_font_font_hold = []
font_cache = shared_object_space.pyglet_font_font_cache
font_hold = shared_object_space.pyglet_font_font_hold
# Look for font name in font cache
descriptor = (name, size, bold, italic, dpi)
if descriptor in font_cache:
return font_cache[descriptor]
# Not in cache, create from scratch
font = _font_class(name, size, bold=bold, italic=italic, dpi=dpi)
# Save parameters for new-style layout classes to recover
font.name = name
font.size = size
font.bold = bold
font.italic = italic
font.dpi = dpi
# Cache font in weak-ref dictionary to avoid reloading while still in use
font_cache[descriptor] = font
# Hold onto refs of last three loaded fonts to prevent them being
# collected if momentarily dropped.
del font_hold[3:]
font_hold.insert(0, font)
return font
def add_file(font):
'''Add a font to pyglet's search path.
In order to load a font that is not installed on the system, you must
call this method to tell pyglet that it exists. You can supply
either a filename or any file-like object.
The font format is platform-dependent, but is typically a TrueType font
file containing a single font face. Note that to load this file after
adding it you must specify the face name to `load`, not the filename.
:Parameters:
`font` : str or file
Filename or file-like object to load fonts from.
'''
if type(font) in (str, unicode):
font = open(font, 'rb')
if hasattr(font, 'read'):
font = font.read()
_font_class.add_font_data(font)
def add_directory(dir):
'''Add a directory of fonts to pyglet's search path.
This function simply calls `add_file` for each file with a ``.ttf``
extension in the given directory. Subdirectories are not searched.
:Parameters:
`dir` : str
Directory that contains font files.
'''
for file in os.listdir(dir):
if file[-4:].lower() == '.ttf':
add_file(os.path.join(dir, file))
| {
"content_hash": "11d8548c326fa9a30f67211b1a3b96f6",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 79,
"avg_line_length": 32.55705705705706,
"alnum_prop": 0.5730295623299358,
"repo_name": "niklaskorz/pyglet",
"id": "8739688047c7f64437b3256ab8cd3abf48533c9a",
"size": "23399",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyglet/font/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "6365413"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
import os
import sys
from setuptools import find_packages, setup
VERSION = "2.0.0"
LONG_DESCRIPTION = """
.. image:: http://pinaxproject.com/pinax-design/patches/pinax-forums.svg
:target: https://pypi.python.org/pypi/pinax-forums/
============
Pinax Forums
============
.. image:: https://img.shields.io/pypi/v/pinax-forums.svg
:target: https://pypi.python.org/pypi/pinax-forums/
\
.. image:: https://img.shields.io/circleci/project/github/pinax/pinax-forums.svg
:target: https://circleci.com/gh/pinax/pinax-forums
.. image:: https://img.shields.io/codecov/c/github/pinax/pinax-forums.svg
:target: https://codecov.io/gh/pinax/pinax-forums
.. image:: https://img.shields.io/github/contributors/pinax/pinax-forums.svg
:target: https://github.com/pinax/pinax-forums/graphs/contributors
.. image:: https://img.shields.io/github/issues-pr/pinax/pinax-forums.svg
:target: https://github.com/pinax/pinax-forums/pulls
.. image:: https://img.shields.io/github/issues-pr-closed/pinax/pinax-forums.svg
:target: https://github.com/pinax/pinax-forums/pulls?q=is%3Apr+is%3Aclosed
\
.. image:: http://slack.pinaxproject.com/badge.svg
:target: http://slack.pinaxproject.com/
.. image:: https://img.shields.io/badge/license-MIT-blue.svg
:target: https://opensource.org/licenses/MIT
\
``pinax-forums`` is an extensible forums app for Django and Pinax. It is
focused on core forum functionality and hence is expected to be combined with
other Pinax apps for broader features.
See ``pinax-project-forums`` for a full Django project incorporating numerous
apps with the goal of providing an out of the box forums solution.
Supported Django and Python Versions
------------------------------------
+-----------------+-----+-----+-----+
| Django / Python | 3.6 | 3.7 | 3.8 |
+=================+=====+=====+=====+
| 2.2 | * | * | * |
+-----------------+-----+-----+-----+
| 3.0 | * | * | * |
+-----------------+-----+-----+-----+
"""
setup(
author="Pinax Team",
author_email="[email protected]",
description="an extensible forum app for Django and Pinax",
name="pinax-forums",
long_description=LONG_DESCRIPTION,
version=VERSION,
url="http://github.com/pinax/pinax-forums/",
license="MIT",
packages=find_packages(),
package_data={
"forums": []
},
install_requires=[
"django>=2.2",
"django-appconf>=1.0.2"
],
test_suite="runtests.runtests",
tests_require=[
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| {
"content_hash": "f991e37f3f124e5111556648f8937e14",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 33.774193548387096,
"alnum_prop": 0.6115886660299268,
"repo_name": "pinax/pinax-forums",
"id": "85acca0288c5486696a406d3055dceebdad1c420",
"size": "3141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "49116"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name='pywwt',
packages = ['pywwt'],
version='0.2.0',
description = 'Python interface to World Wide Telescope',
author='John ZuHone',
author_email='[email protected]',
url='http://github.com/jzuhone/pywwt',
download_url='https://github.com/jzuhone/pywwt/tarball/0.2.0',
install_requires = ["numpy","beautifulsoup4","matplotlib",
"astropy","requests","lxml"],
classifiers=[
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| {
"content_hash": "5493461cef43355c203f1deaaeec4ac8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 39.35,
"alnum_prop": 0.5883100381194409,
"repo_name": "jzuhone/pywwt",
"id": "4a3894ecc4aad650379c08c931629b906461c5d0",
"size": "809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25804"
},
{
"name": "Shell",
"bytes": "105"
}
],
"symlink_target": ""
} |
from ..common import _load_win_dll # this also ensures that libdyndt is loaded first
import os, os.path
import sys
if os.name == 'nt':
_load_win_dll(os.path.dirname(os.path.dirname(__file__)), 'libdynd.dll')
from dynd.config import *
from .array import array, asarray, type_of, dshape_of, as_py, view, \
ones, zeros, empty, is_c_contiguous, is_f_contiguous, old_range, \
parse_json, squeeze, dtype_of, old_linspace, fields, ndim_of
from .callable import callable
inf = float('inf')
nan = float('nan')
from . import functional
| {
"content_hash": "4b9ec8e39112cf730633bf525f579252",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 84,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.6998158379373849,
"repo_name": "insertinterestingnamehere/dynd-python",
"id": "1cbfb93e45caa885ed97f5f49ddfbd9526a07297",
"size": "543",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dynd/nd/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4340"
},
{
"name": "C++",
"bytes": "306604"
},
{
"name": "CMake",
"bytes": "49463"
},
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "302422"
},
{
"name": "Ruby",
"bytes": "758"
},
{
"name": "Shell",
"bytes": "6449"
}
],
"symlink_target": ""
} |
import sqlite3
from keySearch import *
print "Beggining test on keySearch..."
conn = sqlite3.connect('test2.db')
c = conn.cursor()
for row in c.execute('SELECT source_code FROM appvulnerability WHERE vulnerability_id=10'):
result = keySearch(row[0])
if result[0]:
print "Passed Key test:\t" + inQuotes(row[0])
#print "line of code:\t" + row[0]
print "End of test on keySearch..."
print "DOES entry exist test"
print doesEntryExist("DasdfadsfdEBUG") #should return false
print doesEntryExist("DEBUG") # Should return true
| {
"content_hash": "f86cb403a9f8e440be37c84b23ac74d0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 91,
"avg_line_length": 28.36842105263158,
"alnum_prop": 0.7198515769944341,
"repo_name": "ksparakis/apekit",
"id": "ffa925fdf71e9481d38efdea94f1fc90ba1147be",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vulns/keySearch.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1480"
},
{
"name": "Python",
"bytes": "35847"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.iothubprovisioningservices import IotDpsClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-iothubprovisioningservices
# USAGE
python dps_get_operation_result.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = IotDpsClient(
credential=DefaultAzureCredential(),
subscription_id="91d12660-3dec-467a-be2a-213b5544ddc0",
)
response = client.iot_dps_resource.get_operation_result(
operation_id="MTY5OTNmZDctODI5Yy00N2E2LTkxNDQtMDU1NGIyYzY1ZjRl",
resource_group_name="myResourceGroup",
provisioning_service_name="myFirstProvisioningService",
)
print(response)
# x-ms-original-file: specification/deviceprovisioningservices/resource-manager/Microsoft.Devices/stable/2022-02-05/examples/DPSGetOperationResult.json
if __name__ == "__main__":
main()
| {
"content_hash": "307085cbbc5719c7865a223a6cb10c6d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 151,
"avg_line_length": 36.8235294117647,
"alnum_prop": 0.7531948881789138,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b403e64ded242e2bbb2db160b736fea7b2272156",
"size": "1720",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/iothub/azure-mgmt-iothubprovisioningservices/generated_samples/dps_get_operation_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addons', '0016_addonreviewerflags_auto_approval_disabled_until_next_approval'),
]
operations = [
migrations.AddField(
model_name='addonreviewerflags',
name='notified_about_expiring_delayed_rejections',
field=models.NullBooleanField(default=None),
),
]
| {
"content_hash": "6a60a01f2f3025b3081bd2d4797cdfb7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 89,
"avg_line_length": 26.9375,
"alnum_prop": 0.6473317865429234,
"repo_name": "bqbn/addons-server",
"id": "38f557de42b843f4fab9966ddd51aacb1ecd6d65",
"size": "481",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/olympia/addons/migrations/0017_addonreviewerflags_notified_about_expiring_delayed_rejections.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810080"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "585550"
},
{
"name": "JavaScript",
"bytes": "1071952"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5323934"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1503"
}
],
"symlink_target": ""
} |
"""
Test suite entry point module.
"""
# Add here all your test suites and import from them every test case.
# These test cases will be documented by autoapi:
# http://autoapi.readthedocs.org/
from .test_ovs_openflow import test_ovs_openflow
# Don't forget to list all your test cases here:
__all__ = [
'test_ovs_openflow'
]
| {
"content_hash": "d58829dd07d187bcfa899ac1eef1b4bc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 69,
"avg_line_length": 22.2,
"alnum_prop": 0.7117117117117117,
"repo_name": "open-switch/ops-switchd",
"id": "57a5e8979c20e4191443c187a86128bda5c0facd",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ops-tests/feature/openflow/test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "513647"
},
{
"name": "C++",
"bytes": "888"
},
{
"name": "CMake",
"bytes": "8929"
},
{
"name": "Python",
"bytes": "126428"
}
],
"symlink_target": ""
} |
"""I hold HTML generation helpers.
"""
from cgi import escape
from twisted.python import log
from twisted.python.compat import NativeStringIO as StringIO
from twisted.python.deprecate import deprecated
from incremental import Version
@deprecated(Version('Twisted', 15, 3, 0), replacement='twisted.web.template')
def PRE(text):
"Wrap <pre> tags around some text and HTML-escape it."
return "<pre>"+escape(text)+"</pre>"
@deprecated(Version('Twisted', 15, 3, 0), replacement='twisted.web.template')
def UL(lst):
io = StringIO()
io.write("<ul>\n")
for el in lst:
io.write("<li> %s</li>\n" % el)
io.write("</ul>")
return io.getvalue()
@deprecated(Version('Twisted', 15, 3, 0), replacement='twisted.web.template')
def linkList(lst):
io = StringIO()
io.write("<ul>\n")
for hr, el in lst:
io.write('<li> <a href="%s">%s</a></li>\n' % (hr, el))
io.write("</ul>")
return io.getvalue()
@deprecated(Version('Twisted', 15, 3, 0), replacement='twisted.web.template')
def output(func, *args, **kw):
"""output(func, *args, **kw) -> html string
Either return the result of a function (which presumably returns an
HTML-legal string) or a sparse HTMLized error message and a message
in the server log.
"""
try:
return func(*args, **kw)
except:
log.msg("Error calling %r:" % (func,))
log.err()
return PRE("An error occurred.")
| {
"content_hash": "cebfbc962a963948030b50fd1ad9819c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 26.77777777777778,
"alnum_prop": 0.6327800829875518,
"repo_name": "EricMuller/mywebmarks-backend",
"id": "b8fd0ab5b688d9866aa98ee1b0918e96b91758ed",
"size": "1573",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/web/html.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "23736"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "66211"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "1087560"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "23014526"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "15482"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
} |
from .port_handler import *
from .packet_handler import *
from .group_sync_read import *
from .group_sync_write import *
from .group_bulk_read import *
from .group_bulk_write import *
| {
"content_hash": "1b8fd0883460665e9eed45583dbf89ef",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 31,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.75,
"repo_name": "ROBOTIS-GIT/DynamixelSDK",
"id": "bc227b66df87cb238f7ca9d7b208b17107712085",
"size": "1007",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ros/dynamixel_sdk/src/dynamixel_sdk/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "366407"
},
{
"name": "C#",
"bytes": "789021"
},
{
"name": "C++",
"bytes": "763888"
},
{
"name": "CMake",
"bytes": "7413"
},
{
"name": "CSS",
"bytes": "31495"
},
{
"name": "Java",
"bytes": "217334"
},
{
"name": "JavaScript",
"bytes": "30966"
},
{
"name": "LabVIEW",
"bytes": "8871"
},
{
"name": "MATLAB",
"bytes": "246704"
},
{
"name": "Makefile",
"bytes": "421100"
},
{
"name": "Python",
"bytes": "288179"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup, find_packages
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import os
import sys
import codecs
import re
import ast
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pymongo_driver/__init__.py', 'rb') as f:
_version = str(ast.literal_eval(_version_re.search(f.read().decode('utf-8')).group(1)))
_name = 'pymongo_driver'
_keywords = ('pymongo', 'driver', 'mongodb', 'egg')
_packages = find_packages()
_zip_safe = False
_description = 'Python Mongodb Driver'
_long_description = codecs.open('README.rst', 'r', 'utf-8').read()
_author = 'Combo'
_author_email = '[email protected]'
_license = 'The MIT License'
_platforms = 'Independant'
_url = 'http://lecly.github.io/pymongo-driver/'
_classifiers = [
'Development Status :: Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: The MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
]
_install_requires = [
'pep8>=1.6',
'pymongo>=2.8',
]
setup(
name=_name,
version=_version,
keywords=_keywords,
packages=_packages,
zip_safe=_zip_safe,
description=_description,
long_description=_long_description,
author=_author,
author_email=_author_email,
license=_license,
platforms=_platforms,
url=_url,
classifiers=_classifiers,
install_requires=_install_requires,
)
# vim:ts=4:sw=4
| {
"content_hash": "e77e7aab2fed3d780988658abc3607b0",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 91,
"avg_line_length": 25.671875,
"alnum_prop": 0.660377358490566,
"repo_name": "lecly/pymongo-driver",
"id": "684f8be3d59d4442ea6156801480d4135421ad84",
"size": "1700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "705"
},
{
"name": "Python",
"bytes": "31079"
}
],
"symlink_target": ""
} |
from inspect import isfunction
from collections import namedtuple
from copy import deepcopy
import os
import json
import numpy as np
from scipy import linalg
from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
create_ecg_epochs)
from .eog import _find_eog_events, _get_eog_channel_index
from .infomax_ import infomax
from ..cov import compute_whitener
from .. import Covariance, Evoked
from ..io.pick import (pick_types, pick_channels, pick_info,
_pick_data_channels, _DATA_CH_TYPES_SPLIT)
from ..io.write import (write_double_matrix, write_string,
write_name_list, write_int, start_block,
end_block)
from ..io.tree import dir_tree_find
from ..io.open import fiff_open
from ..io.tag import read_tag
from ..io.meas_info import write_meas_info, read_meas_info
from ..io.constants import Bunch, FIFF
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..viz import (plot_ica_components, plot_ica_scores,
plot_ica_sources, plot_ica_overlay)
from ..viz.utils import (_prepare_trellis, tight_layout, plt_show,
_setup_vmin_vmax)
from ..viz.topomap import (_prepare_topo_plot, _check_outlines,
plot_topomap, _hide_frame)
from ..channels.channels import _contains_ch_type, ContainsMixin
from ..io.write import start_file, end_file, write_id
from ..utils import (check_version, logger, check_fname, verbose,
_reject_data_segments, check_random_state,
_get_fast_dot, compute_corr)
from ..fixes import _get_args
from ..filter import band_pass_filter
from .bads import find_outliers
from .ctps_ import ctps
from ..externals.six import string_types, text_type
__all__ = ['ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
'get_score_funcs', 'read_ica', 'run_ica']
def _make_xy_sfunc(func, ndim_output=False):
"""Aux function"""
if ndim_output:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])[:, 0]
else:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])
sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
sfunc.__doc__ = func.__doc__
return sfunc
# makes score funcs attr accessible for users
def get_score_funcs():
"""Helper to get the score functions"""
from scipy import stats
from scipy.spatial import distance
score_funcs = Bunch()
xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
if isfunction(f) and not n.startswith('_')]
xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
if isfunction(f) and not n.startswith('_')]
score_funcs.update(dict((n, _make_xy_sfunc(f))
for n, f in xy_arg_dist_funcs
if _get_args(f) == ['u', 'v']))
score_funcs.update(dict((n, _make_xy_sfunc(f, ndim_output=True))
for n, f in xy_arg_stats_funcs
if _get_args(f) == ['x', 'y']))
return score_funcs
class ICA(ContainsMixin):
"""M/EEG signal decomposition using Independent Component Analysis (ICA)
This object can be used to estimate ICA components and then
remove some from Raw or Epochs for data exploration or artifact
correction.
Caveat! If supplying a noise covariance keep track of the projections
available in the cov or in the raw object. For example, if you are
interested in EOG or ECG artifacts, EOG and ECG projections should be
temporally removed before fitting the ICA. You can say::
>> projs, raw.info['projs'] = raw.info['projs'], []
>> ica.fit(raw)
>> raw.info['projs'] = projs
.. note:: Methods implemented are FastICA (default), Infomax and
Extended-Infomax. Infomax can be quite sensitive to differences
in floating point arithmetic due to exponential non-linearity.
Extended-Infomax seems to be more stable in this respect
enhancing reproducibility and stability of results.
Parameters
----------
n_components : int | float | None
The number of components used for ICA decomposition. If int, it must be
smaller then max_pca_components. If None, all PCA components will be
used. If float between 0 and 1 components will be selected by the
cumulative percentage of explained variance.
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data. Defaults to None.
n_pca_components : int | float
The number of PCA components used after ICA recomposition. The ensuing
attribute allows to balance noise reduction against potential loss of
features due to dimensionality reduction. If greater than
``self.n_components_``, the next ``n_pca_components`` minus
``n_components_`` PCA components will be added before restoring the
sensor space data. The attribute gets updated each time the according
parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
If float, the number of components selected matches the number of
components with a cumulative explained variance below
`n_pca_components`.
noise_cov : None | instance of mne.cov.Covariance
Noise covariance used for whitening. If None, channels are just
z-scored.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results. Defaults to None.
method : {'fastica', 'infomax', 'extended-infomax'}
The ICA method to use. Defaults to 'fastica'.
fit_params : dict | None.
Additional parameters passed to the ICA estimator chosen by `method`.
max_iter : int, optional
Maximum number of iterations during fit.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
current_fit : str
Flag informing about which data type (raw or epochs) was used for
the fit.
ch_names : list-like
Channel names resulting from initial picking.
The number of components used for ICA decomposition.
``n_components_`` : int
If fit, the actual number of components used for ICA decomposition.
n_pca_components : int
See above.
max_pca_components : int
The number of components used for PCA dimensionality reduction.
verbose : bool, str, int, or None
See above.
``pca_components_` : ndarray
If fit, the PCA components
``pca_mean_`` : ndarray
If fit, the mean vector used to center the data before doing the PCA.
``pca_explained_variance_`` : ndarray
If fit, the variance explained by each PCA component
``mixing_matrix_`` : ndarray
If fit, the mixing matrix to restore observed data, else None.
``unmixing_matrix_`` : ndarray
If fit, the matrix to unmix observed data, else None.
exclude : list
List of sources indices to exclude, i.e. artifact components identified
throughout the ICA solution. Indices added to this list, will be
dispatched to the .pick_sources methods. Source indices passed to
the .pick_sources method via the 'exclude' argument are added to the
.exclude attribute. When saving the ICA also the indices are restored.
Hence, artifact components once identified don't have to be added
again. To dump this 'artifact memory' say: ica.exclude = []
info : None | instance of Info
The measurement info copied from the object fitted.
`n_samples_` : int
the number of samples used on fit.
`labels_` : dict
A dictionary of independent component indices, grouped by types of
independent components. This attribute is set by some of the artifact
detection functions.
"""
@verbose
def __init__(self, n_components=None, max_pca_components=None,
n_pca_components=None, noise_cov=None, random_state=None,
method='fastica', fit_params=None, max_iter=200,
verbose=None):
methods = ('fastica', 'infomax', 'extended-infomax')
if method not in methods:
raise ValueError('`method` must be "%s". You passed: "%s"' %
('" or "'.join(methods), method))
if not check_version('sklearn', '0.12'):
raise RuntimeError('the scikit-learn package (version >= 0.12)'
'is required for ICA')
self.noise_cov = noise_cov
if max_pca_components is not None and \
n_components > max_pca_components:
raise ValueError('n_components must be smaller than '
'max_pca_components')
if isinstance(n_components, float) \
and not 0 < n_components <= 1:
raise ValueError('Selecting ICA components by explained variance '
'needs values between 0.0 and 1.0 ')
self.current_fit = 'unfitted'
self.verbose = verbose
self.n_components = n_components
self.max_pca_components = max_pca_components
self.n_pca_components = n_pca_components
self.ch_names = None
self.random_state = random_state
if fit_params is None:
fit_params = {}
fit_params = deepcopy(fit_params) # avoid side effects
if "extended" in fit_params:
raise ValueError("'extended' parameter provided. You should "
"rather use method='extended-infomax'.")
if method == 'fastica':
update = {'algorithm': 'parallel', 'fun': 'logcosh',
'fun_args': None}
fit_params.update(dict((k, v) for k, v in update.items() if k
not in fit_params))
elif method == 'infomax':
fit_params.update({'extended': False})
elif method == 'extended-infomax':
fit_params.update({'extended': True})
if 'max_iter' not in fit_params:
fit_params['max_iter'] = max_iter
self.max_iter = max_iter
self.fit_params = fit_params
self.exclude = []
self.info = None
self.method = method
def __repr__(self):
"""ICA fit information"""
if self.current_fit == 'unfitted':
s = 'no'
elif self.current_fit == 'raw':
s = 'raw data'
else:
s = 'epochs'
s += ' decomposition, '
s += 'fit (%s): %s samples, ' % (self.method,
str(getattr(self, 'n_samples_', '')))
s += ('%s components' % str(self.n_components_) if
hasattr(self, 'n_components_') else
'no dimension reduction')
if self.info is not None:
ch_fit = ['"%s"' % c for c in _DATA_CH_TYPES_SPLIT if c in self]
s += ', channels used: {0}'.format('; '.join(ch_fit))
if self.exclude:
s += ', %i sources marked for exclusion' % len(self.exclude)
return '<ICA | %s>' % s
@verbose
def fit(self, inst, picks=None, start=None, stop=None, decim=None,
reject=None, flat=None, tstep=2.0, verbose=None):
"""Run the ICA decomposition on raw data
Caveat! If supplying a noise covariance keep track of the projections
available in the cov, the raw or the epochs object. For example,
if you are interested in EOG or ECG artifacts, EOG and ECG projections
should be temporally removed before fitting the ICA.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Raw measurements to be decomposed.
picks : array-like of int
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
decim : int | None
Increment for selecting each nth time slice. If None, all samples
within ``start`` and ``stop`` are used.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if `inst` is of type Raw.
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg'.
Values are floats that set the minimum acceptable peak-to-peak
amplitude. If flat is None then no rejection is done.
It only applies if `inst` is of type Raw.
tstep : float
Length of data chunks for artifact rejection in seconds.
It only applies if `inst` is of type Raw.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
self : instance of ICA
Returns the modified instance.
"""
if isinstance(inst, _BaseRaw):
self._fit_raw(inst, picks, start, stop, decim, reject, flat,
tstep, verbose)
elif isinstance(inst, _BaseEpochs):
self._fit_epochs(inst, picks, decim, verbose)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return self
def _reset(self):
"""Aux method"""
del self._pre_whitener
del self.unmixing_matrix_
del self.mixing_matrix_
del self.n_components_
del self.n_samples_
del self.pca_components_
del self.pca_explained_variance_
del self.pca_mean_
if hasattr(self, 'drop_inds_'):
del self.drop_inds_
def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
verbose):
"""Aux method
"""
if self.current_fit != 'unfitted':
self._reset()
if picks is None: # just use good data channels
picks = _pick_data_channels(raw.info, exclude='bads',
with_ref_meg=False)
logger.info('Fitting ICA to data using %i channels. \n'
'Please be patient, this may take some time' % len(picks))
if self.max_pca_components is None:
self.max_pca_components = len(picks)
logger.info('Inferring max_pca_components from picks.')
self.info = pick_info(raw.info, picks)
if self.info['comps']:
self.info['comps'] = []
self.ch_names = self.info['ch_names']
start, stop = _check_start_stop(raw, start, stop)
# this will be a copy
data = raw[picks, start:stop][0]
# this will be a view
if decim is not None:
data = data[:, ::decim]
# this will make a copy
if (reject is not None) or (flat is not None):
data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
decim, self.info,
tstep)
self.n_samples_ = data.shape[1]
# this may operate inplace or make a copy
data, self._pre_whitener = self._pre_whiten(data, raw.info, picks)
self._fit(data, self.max_pca_components, 'raw')
return self
def _fit_epochs(self, epochs, picks, decim, verbose):
"""Aux method
"""
if self.current_fit != 'unfitted':
self._reset()
if picks is None:
picks = _pick_data_channels(epochs.info, exclude='bads',
with_ref_meg=False)
logger.info('Fitting ICA to data using %i channels. \n'
'Please be patient, this may take some time' % len(picks))
# filter out all the channels the raw wouldn't have initialized
self.info = pick_info(epochs.info, picks)
if self.info['comps']:
self.info['comps'] = []
self.ch_names = self.info['ch_names']
if self.max_pca_components is None:
self.max_pca_components = len(picks)
logger.info('Inferring max_pca_components from picks.')
# this should be a copy (picks a list of int)
data = epochs.get_data()[:, picks]
# this will be a view
if decim is not None:
data = data[:, :, ::decim]
self.n_samples_ = np.prod(data[:, 0, :].shape)
# This will make at least one copy (one from hstack, maybe one
# more from _pre_whiten)
data, self._pre_whitener = \
self._pre_whiten(np.hstack(data), epochs.info, picks)
self._fit(data, self.max_pca_components, 'epochs')
return self
def _pre_whiten(self, data, info, picks):
"""Aux function"""
fast_dot = _get_fast_dot()
has_pre_whitener = hasattr(self, '_pre_whitener')
if not has_pre_whitener and self.noise_cov is None:
# use standardization as whitener
# Scale (z-score) the data by channel type
info = pick_info(info, picks)
pre_whitener = np.empty([len(data), 1])
for ch_type in _DATA_CH_TYPES_SPLIT:
if _contains_ch_type(info, ch_type):
if ch_type == 'seeg':
this_picks = pick_types(info, meg=False, seeg=True)
elif ch_type == 'ecog':
this_picks = pick_types(info, meg=False, ecog=True)
elif ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type)
pre_whitener[this_picks] = np.std(data[this_picks])
data /= pre_whitener
elif not has_pre_whitener and self.noise_cov is not None:
pre_whitener, _ = compute_whitener(self.noise_cov, info, picks)
assert data.shape[0] == pre_whitener.shape[1]
data = fast_dot(pre_whitener, data)
elif has_pre_whitener and self.noise_cov is None:
data /= self._pre_whitener
pre_whitener = self._pre_whitener
else:
data = fast_dot(self._pre_whitener, data)
pre_whitener = self._pre_whitener
return data, pre_whitener
def _fit(self, data, max_pca_components, fit_type):
"""Aux function """
from sklearn.decomposition import RandomizedPCA
random_state = check_random_state(self.random_state)
# XXX fix copy==True later. Bug in sklearn, see PR #2273
pca = RandomizedPCA(n_components=max_pca_components, whiten=True,
copy=True, random_state=random_state)
if isinstance(self.n_components, float):
# compute full feature variance before doing PCA
full_var = np.var(data, axis=1).sum()
data = pca.fit_transform(data.T)
if isinstance(self.n_components, float):
# compute eplained variance manually, cf. sklearn bug
# fixed in #2664
explained_variance_ratio_ = pca.explained_variance_ / full_var
n_components_ = np.sum(explained_variance_ratio_.cumsum() <=
self.n_components)
if n_components_ < 1:
raise RuntimeError('One PCA component captures most of the '
'explained variance, your threshold resu'
'lts in 0 components. You should select '
'a higher value.')
logger.info('Selection by explained variance: %i components' %
n_components_)
sel = slice(n_components_)
else:
if self.n_components is not None: # normal n case
sel = slice(self.n_components)
logger.info('Selection by number: %i components' %
self.n_components)
else: # None case
logger.info('Using all PCA components: %i'
% len(pca.components_))
sel = slice(len(pca.components_))
# the things to store for PCA
self.pca_mean_ = pca.mean_
self.pca_components_ = pca.components_
# unwhiten pca components and put scaling in unmixintg matrix later.
self.pca_explained_variance_ = exp_var = pca.explained_variance_
self.pca_components_ *= np.sqrt(exp_var[:, None])
del pca
# update number of components
self.n_components_ = sel.stop
if self.n_pca_components is not None:
if self.n_pca_components > len(self.pca_components_):
self.n_pca_components = len(self.pca_components_)
# Take care of ICA
if self.method == 'fastica':
from sklearn.decomposition import FastICA # to avoid strong dep.
ica = FastICA(whiten=False,
random_state=random_state, **self.fit_params)
ica.fit(data[:, sel])
# get unmixing and add scaling
self.unmixing_matrix_ = getattr(ica, 'components_',
'unmixing_matrix_')
elif self.method in ('infomax', 'extended-infomax'):
self.unmixing_matrix_ = infomax(data[:, sel],
random_state=random_state,
**self.fit_params)
self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :]
self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
self.current_fit = fit_type
def _transform(self, data):
"""Compute sources from data (operates inplace)"""
fast_dot = _get_fast_dot()
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
# Apply first PCA
pca_data = fast_dot(self.pca_components_[:self.n_components_], data)
# Apply unmixing to low dimension PCA
sources = fast_dot(self.unmixing_matrix_, pca_data)
return sources
def _transform_raw(self, raw, start, stop):
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Raw doesn\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Raw compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data, _ = self._pre_whiten(raw[picks, start:stop][0], raw.info, picks)
return self._transform(data)
def _transform_epochs(self, epochs, concatenate):
"""Aux method
"""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA')
picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data = np.hstack(epochs.get_data()[:, picks])
data, _ = self._pre_whiten(data, epochs.info, picks)
sources = self._transform(data)
if not concatenate:
# Put the data back in 3D
sources = np.array(np.split(sources, len(epochs.events), 1))
return sources
def _transform_evoked(self, evoked):
"""Aux method
"""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please first fit ICA')
picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked doesn\'t match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide Evoked compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data, _ = self._pre_whiten(evoked.data[picks], evoked.info, picks)
sources = self._transform(data)
return sources
def get_sources(self, inst, add_channels=None, start=None, stop=None):
"""Estimate sources given the unmixing matrix
This method will return the sources in the container format passed.
Typical usecases:
1. pass Raw object to use `raw.plot` for ICA sources
2. pass Epochs object to compute trial-based statistics in ICA space
3. pass Evoked object to investigate time-locking in ICA space
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from and to represent sources in.
add_channels : None | list of str
Additional channels to be added. Useful to e.g. compare sources
with some reference. Defaults to None
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
Returns
-------
sources : instance of Raw, Epochs or Evoked
The ICA sources time series.
"""
if isinstance(inst, _BaseRaw):
sources = self._sources_as_raw(inst, add_channels, start, stop)
elif isinstance(inst, _BaseEpochs):
sources = self._sources_as_epochs(inst, add_channels, False)
elif isinstance(inst, Evoked):
sources = self._sources_as_evoked(inst, add_channels)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return sources
def _sources_as_raw(self, raw, add_channels, start, stop):
"""Aux method
"""
# merge copied instance and picked data with sources
sources = self._transform_raw(raw, start=start, stop=stop)
if raw.preload: # get data and temporarily delete
data = raw._data
del raw._data
out = raw.copy() # copy and reappend
if raw.preload:
raw._data = data
# populate copied raw.
start, stop = _check_start_stop(raw, start, stop)
if add_channels is not None:
raw_picked = raw.copy().pick_channels(add_channels)
data_, times_ = raw_picked[:, start:stop]
data_ = np.r_[sources, data_]
else:
data_ = sources
_, times_ = raw[0, start:stop]
out._data = data_
out._times = times_
out._filenames = list()
out.preload = True
# update first and last samples
out._first_samps = np.array([raw.first_samp +
(start if start else 0)])
out._last_samps = np.array([out.first_samp + stop
if stop else raw.last_samp])
out._projector = None
self._export_info(out.info, raw, add_channels)
out._update_times()
return out
def _sources_as_epochs(self, epochs, add_channels, concatenate):
"""Aux method"""
out = epochs.copy()
sources = self._transform_epochs(epochs, concatenate)
if add_channels is not None:
picks = [epochs.ch_names.index(k) for k in add_channels]
else:
picks = []
out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
axis=1) if len(picks) > 0 else sources
self._export_info(out.info, epochs, add_channels)
out.preload = True
out._raw = None
out._projector = None
return out
def _sources_as_evoked(self, evoked, add_channels):
"""Aux method
"""
if add_channels is not None:
picks = [evoked.ch_names.index(k) for k in add_channels]
else:
picks = []
sources = self._transform_evoked(evoked)
if len(picks) > 1:
data = np.r_[sources, evoked.data[picks]]
else:
data = sources
out = evoked.copy()
out.data = data
self._export_info(out.info, evoked, add_channels)
return out
def _export_info(self, info, container, add_channels):
"""Aux method
"""
# set channel names and info
ch_names = []
ch_info = info['chs'] = []
for ii in range(self.n_components_):
this_source = 'ICA %03d' % (ii + 1)
ch_names.append(this_source)
ch_info.append(dict(ch_name=this_source, cal=1,
logno=ii + 1, coil_type=FIFF.FIFFV_COIL_NONE,
kind=FIFF.FIFFV_MISC_CH,
coord_Frame=FIFF.FIFFV_COORD_UNKNOWN,
loc=np.array([0., 0., 0., 1.] * 3, dtype='f4'),
unit=FIFF.FIFF_UNIT_NONE,
range=1.0, scanno=ii + 1, unit_mul=0))
if add_channels is not None:
# re-append additionally picked ch_names
ch_names += add_channels
# re-append additionally picked ch_info
ch_info += [k for k in container.info['chs'] if k['ch_name'] in
add_channels]
info['bads'] = [ch_names[k] for k in self.exclude]
info['projs'] = [] # make sure projections are removed.
info._update_redundant()
info._check_consistency()
@verbose
def score_sources(self, inst, target=None, score_func='pearsonr',
start=None, stop=None, l_freq=None, h_freq=None,
verbose=None):
"""Assign score to components based on statistic or metric
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The object to reconstruct the sources from.
target : array-like | ch_name | None
Signal to which the sources shall be compared. It has to be of
the same shape as the sources. If some string is supplied, a
routine will try to find a matching channel. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
score_func : callable | str label
Callable taking as arguments either two input arrays
(e.g. Pearson correlation) or one input
array (e. g. skewness) and returns a float. For convenience the
most common score_funcs are available via string labels:
Currently, all distance metrics from scipy.spatial and All
functions from scipy.stats taking compatible input arguments are
supported. These function have been modified to support iteration
over the rows of a 2D array.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
scores : ndarray
scores for each source as returned from score_func
"""
if isinstance(inst, _BaseRaw):
sources = self._transform_raw(inst, start, stop)
elif isinstance(inst, _BaseEpochs):
sources = self._transform_epochs(inst, concatenate=True)
elif isinstance(inst, Evoked):
sources = self._transform_evoked(inst)
else:
raise ValueError('Input must be of Raw, Epochs or Evoked type')
if target is not None: # we can have univariate metrics without target
target = self._check_target(target, inst, start, stop)
if sources.shape[-1] != target.shape[-1]:
raise ValueError('Sources and target do not have the same'
'number of time slices.')
# auto target selection
if verbose is None:
verbose = self.verbose
if isinstance(inst, (_BaseRaw, _BaseRaw)):
sources, target = _band_pass_filter(self, sources, target,
l_freq, h_freq, verbose)
scores = _find_sources(sources, target, score_func)
return scores
def _check_target(self, target, inst, start, stop):
"""Aux Method"""
if isinstance(inst, _BaseRaw):
start, stop = _check_start_stop(inst, start, stop)
if hasattr(target, 'ndim'):
if target.ndim < 2:
target = target.reshape(1, target.shape[-1])
if isinstance(target, string_types):
pick = _get_target_ch(inst, target)
target, _ = inst[pick, start:stop]
elif isinstance(inst, _BaseEpochs):
if isinstance(target, string_types):
pick = _get_target_ch(inst, target)
target = inst.get_data()[:, pick]
if hasattr(target, 'ndim'):
if target.ndim == 3 and min(target.shape) == 1:
target = target.ravel()
elif isinstance(inst, Evoked):
if isinstance(target, string_types):
pick = _get_target_ch(inst, target)
target = inst.data[pick]
return target
@verbose
def find_bads_ecg(self, inst, ch_name=None, threshold=None,
start=None, stop=None, l_freq=8, h_freq=16,
method='ctps', verbose=None):
"""Detect ECG related components using correlation
Note. If no ECG channel is available, routine attempts to create
an artificial ECG based on cross-channel averaging.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for ECG peak detection.
The argument is mandatory if the dataset contains no ECG
channels.
threshold : float
The value above which a feature is classified as outlier. If
method is 'ctps', defaults to 0.25, else defaults to 3.0.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
method : {'ctps', 'correlation'}
The method used for detection. If 'ctps', cross-trial phase
statistics [1] are used to detect ECG related components.
Thresholding is then based on the significance value of a Kuiper
statistic.
If 'correlation', detection is based on Pearson correlation
between the filtered data and the filtered ECG channel.
Thresholding is based on iterative z-scoring. The above
threshold components will be masked and the z-score will
be recomputed until no supra-threshold component remains.
Defaults to 'ctps'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
ecg_idx : list of int
The indices of ECG related components.
scores : np.ndarray of float, shape (``n_components_``)
The correlation scores.
See also
--------
find_bads_eog
References
----------
[1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
and phase statistics for complete artifact removal in independent
components of neuromagnetic recordings. Biomedical
Engineering, IEEE Transactions on 55 (10), 2353-2362.
"""
if verbose is None:
verbose = self.verbose
idx_ecg = _get_ecg_channel_index(ch_name, inst)
if idx_ecg is None:
if verbose is not None:
verbose = self.verbose
ecg, times = _make_ecg(inst, start, stop, verbose)
ch_name = 'ECG-MAG'
else:
ecg = inst.ch_names[idx_ecg]
# some magic we need inevitably ...
if inst.ch_names != self.ch_names:
extra_picks = pick_types(inst.info, meg=False, ecg=True)
ch_names_to_pick = (self.ch_names +
[inst.ch_names[k] for k in extra_picks])
inst = inst.copy().pick_channels(ch_names_to_pick)
if method == 'ctps':
if threshold is None:
threshold = 0.25
if isinstance(inst, _BaseRaw):
sources = self.get_sources(create_ecg_epochs(inst)).get_data()
elif isinstance(inst, _BaseEpochs):
sources = self.get_sources(inst).get_data()
else:
raise ValueError('With `ctps` only Raw and Epochs input is '
'supported')
_, p_vals, _ = ctps(sources)
scores = p_vals.max(-1)
ecg_idx = np.where(scores >= threshold)[0]
elif method == 'correlation':
if threshold is None:
threshold = 3.0
scores = self.score_sources(inst, target=ecg,
score_func='pearsonr',
start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq,
verbose=verbose)
ecg_idx = find_outliers(scores, threshold=threshold)
else:
raise ValueError('Method "%s" not supported.' % method)
# sort indices by scores
ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
if not hasattr(self, 'labels_') or self.labels_ is None:
self.labels_ = dict()
self.labels_['ecg'] = list(ecg_idx)
self.labels_['ecg/%s' % ch_name] = list(ecg_idx)
return self.labels_['ecg'], scores
@verbose
def find_bads_eog(self, inst, ch_name=None, threshold=3.0,
start=None, stop=None, l_freq=1, h_freq=10,
verbose=None):
"""Detect EOG related components using correlation
Detection is based on Pearson correlation between the
filtered data and the filtered EOG channel.
Thresholding is based on adaptive z-scoring. The above threshold
components will be masked and the z-score will be recomputed
until no supra-threshold component remains.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG
channels.
threshold : int | float
The value above which a feature is classified as outlier.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
eog_idx : list of int
The indices of EOG related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg
"""
if verbose is None:
verbose = self.verbose
eog_inds = _get_eog_channel_index(ch_name, inst)
if len(eog_inds) > 2:
eog_inds = eog_inds[:1]
logger.info('Using EOG channel %s' % inst.ch_names[eog_inds[0]])
scores, eog_idx = [], []
eog_chs = [inst.ch_names[k] for k in eog_inds]
# some magic we need inevitably ...
# get targets befor equalizing
targets = [self._check_target(k, inst, start, stop) for k in eog_chs]
if inst.ch_names != self.ch_names:
inst = inst.copy().pick_channels(self.ch_names)
if not hasattr(self, 'labels_') or self.labels_ is None:
self.labels_ = dict()
for ii, (eog_ch, target) in enumerate(zip(eog_chs, targets)):
scores += [self.score_sources(inst, target=target,
score_func='pearsonr',
start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq,
verbose=verbose)]
# pick last scores
this_idx = find_outliers(scores[-1], threshold=threshold)
eog_idx += [this_idx]
self.labels_[('eog/%i/' % ii) + eog_ch] = list(this_idx)
# remove duplicates but keep order by score, even across multiple
# EOG channels
scores_ = np.concatenate([scores[ii][inds]
for ii, inds in enumerate(eog_idx)])
eog_idx_ = np.concatenate(eog_idx)[np.abs(scores_).argsort()[::-1]]
eog_idx_unique = list(np.unique(eog_idx_))
eog_idx = []
for i in eog_idx_:
if i in eog_idx_unique:
eog_idx.append(i)
eog_idx_unique.remove(i)
if len(scores) == 1:
scores = scores[0]
self.labels_['eog'] = list(eog_idx)
return self.labels_['eog'], scores
def apply(self, inst, include=None, exclude=None, n_pca_components=None,
start=None, stop=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data,
zero out components, and inverse transform the data.
This procedure will reconstruct M/EEG signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be processed.
include : array_like of int.
The indices referring to columns in the ummixing matrix. The
components to be kept.
exclude : array_like of int.
The indices referring to columns in the ummixing matrix. The
components to be zeroed out.
n_pca_components : int | float | None
The number of PCA components to be kept, either absolute (int)
or percentage of the explained variance (float). If None (default),
all PCA components will be used.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
"""
if isinstance(inst, _BaseRaw):
out = self._apply_raw(raw=inst, include=include,
exclude=exclude,
n_pca_components=n_pca_components,
start=start, stop=stop)
elif isinstance(inst, _BaseEpochs):
out = self._apply_epochs(epochs=inst, include=include,
exclude=exclude,
n_pca_components=n_pca_components)
elif isinstance(inst, Evoked):
out = self._apply_evoked(evoked=inst, include=include,
exclude=exclude,
n_pca_components=n_pca_components)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return out
def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop):
"""Aux method"""
if not raw.preload:
raise ValueError('Raw data must be preloaded to apply ICA')
if exclude is None:
exclude = list(set(self.exclude))
else:
exclude = list(set(self.exclude + exclude))
if n_pca_components is not None:
self.n_pca_components = n_pca_components
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, meg=False, include=self.ch_names,
exclude='bads', ref_meg=False)
data = raw[picks, start:stop][0]
data, _ = self._pre_whiten(data, raw.info, picks)
data = self._pick_sources(data, include, exclude)
raw[picks, start:stop] = data
return raw
def _apply_epochs(self, epochs, include, exclude, n_pca_components):
if not epochs.preload:
raise ValueError('Epochs must be preloaded to apply ICA')
picks = pick_types(epochs.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
if n_pca_components is not None:
self.n_pca_components = n_pca_components
data = np.hstack(epochs.get_data()[:, picks])
data, _ = self._pre_whiten(data, epochs.info, picks)
data = self._pick_sources(data, include=include, exclude=exclude)
# restore epochs, channels, tsl order
epochs._data[:, picks] = np.array(np.split(data,
len(epochs.events), 1))
epochs.preload = True
return epochs
def _apply_evoked(self, evoked, include, exclude, n_pca_components):
picks = pick_types(evoked.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where evoked come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked does not match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide an Evoked object that\'s compatible '
'with ica.ch_names' % (len(self.ch_names),
len(picks)))
if n_pca_components is not None:
self.n_pca_components = n_pca_components
data = evoked.data[picks]
data, _ = self._pre_whiten(data, evoked.info, picks)
data = self._pick_sources(data, include=include,
exclude=exclude)
# restore evoked
evoked.data[picks] = data
return evoked
def _pick_sources(self, data, include, exclude):
"""Aux function"""
fast_dot = _get_fast_dot()
if exclude is None:
exclude = self.exclude
else:
exclude = list(set(self.exclude + list(exclude)))
_n_pca_comp = self._check_n_pca_components(self.n_pca_components)
if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
raise ValueError('n_pca_components must be >= '
'n_components and <= max_pca_components.')
n_components = self.n_components_
logger.info('Transforming to ICA space (%i components)' % n_components)
# Apply first PCA
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
sel_keep = np.arange(n_components)
if include not in (None, []):
sel_keep = np.unique(include)
elif exclude not in (None, []):
sel_keep = np.setdiff1d(np.arange(n_components), exclude)
logger.info('Zeroing out %i ICA components'
% (n_components - len(sel_keep)))
unmixing = np.eye(_n_pca_comp)
unmixing[:n_components, :n_components] = self.unmixing_matrix_
unmixing = np.dot(unmixing, self.pca_components_[:_n_pca_comp])
mixing = np.eye(_n_pca_comp)
mixing[:n_components, :n_components] = self.mixing_matrix_
mixing = np.dot(self.pca_components_[:_n_pca_comp].T, mixing)
if _n_pca_comp > n_components:
sel_keep = np.concatenate(
(sel_keep, range(n_components, _n_pca_comp)))
proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :])
data = fast_dot(proj_mat, data)
if self.pca_mean_ is not None:
data += self.pca_mean_[:, None]
# restore scaling
if self.noise_cov is None: # revert standardization
data *= self._pre_whitener
else:
data = fast_dot(linalg.pinv(self._pre_whitener), data)
return data
@verbose
def save(self, fname):
"""Store ICA solution into a fiff file.
Parameters
----------
fname : str
The absolute path of the file name to save the ICA solution into.
The file name should end with -ica.fif or -ica.fif.gz.
"""
if self.current_fit == 'unfitted':
raise RuntimeError('No fit available. Please first fit ICA')
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz'))
logger.info('Writing ica solution to %s...' % fname)
fid = start_file(fname)
try:
_write_ica(fid, self)
except Exception:
os.remove(fname)
raise
end_file(fid)
return self
def copy(self):
"""Copy the ICA object
Returns
-------
ica : instance of ICA
The copied object.
"""
return deepcopy(self)
def plot_components(self, picks=None, ch_type=None, res=64, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=False, title=None, show=True, outlines='head',
contours=6, image_interp='bilinear', head_pos=None):
"""Project unmixing matrix on interpolated sensor topography.
Parameters
----------
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will
be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object.
"""
return plot_ica_components(self, picks=picks, ch_type=ch_type,
res=res, layout=layout, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title, show=show,
outlines=outlines, contours=contours,
image_interp=image_interp,
head_pos=head_pos)
def plot_sources(self, inst, picks=None, exclude=None, start=None,
stop=None, title=None, show=True, block=False):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
picks : ndarray | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str | None
The figure title. If None a default is provided.
show : bool
If True, all open plots will be shown.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to
False.
Returns
-------
fig : instance of pyplot.Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close. The independent components can be viewed as
topographies by clicking on the component name on the left of of the
main axes. The topography view tries to infer the correct electrode
layout from the data. This should work at least for Neuromag data.
.. versionadded:: 0.10.0
"""
return plot_ica_sources(self, inst=inst, picks=picks, exclude=exclude,
title=title, start=start, stop=stop, show=show,
block=block)
def plot_scores(self, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=(12, 6),
show=True):
"""Plot scores related to detected components.
Use this function to assess how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
scores : array_like of float, shape (n ica components,) | list of array
Scores based on arbitrary metric to characterize ICA components.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of `scores`.
If 'ecg' or 'eog', the ``labels_`` attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int
The figure size. Defaults to (12, 6).
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object.
"""
return plot_ica_scores(
ica=self, scores=scores, exclude=exclude, labels=labels,
axhline=axhline, title=title, figsize=figsize, show=show)
def plot_overlay(self, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used that were included on fitting).
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str
The figure title.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of pyplot.Figure
The figure.
"""
return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
start=start, stop=stop, title=title, show=show)
def detect_artifacts(self, raw, start_find=None, stop_find=None,
ecg_ch=None, ecg_score_func='pearsonr',
ecg_criterion=0.1, eog_ch=None,
eog_score_func='pearsonr',
eog_criterion=0.1, skew_criterion=-1,
kurt_criterion=-1, var_criterion=0,
add_nodes=None):
"""Run ICA artifacts detection workflow.
Note. This is still experimental and will most likely change. Over
the next releases. For maximum control use the workflow exposed in
the examples.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- please check your results. Detection by kurtosis and variance
may be powerful but misclassification of brain signals as
noise cannot be precluded.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of the defaults)::
ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will
be interpreted as time in seconds. If None, data will be used to
the last sample.
ecg_ch : str | ndarray | None
The `target` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The `score_func` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The `target` argument or the list of target arguments subsequently
passed to ica.find_sources_raw. Either the name of the vertical EOG
channel or the corresponding EOG time series. If None, this step
will be skipped.
eog_score_func : str | callable
The `score_func` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
add_nodes : list of ica_nodes
Additional list if tuples carrying the following parameters:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example:
add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
Returns
-------
self : instance of ICA
The ica object with the detected artifact indices marked for
exclusion
"""
logger.info(' Searching for artifacts...')
_detect_artifacts(self, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion,
eog_ch=eog_ch, eog_score_func=eog_score_func,
eog_criterion=eog_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return self
@verbose
def _check_n_pca_components(self, _n_pca_comp, verbose=None):
"""Aux function"""
if isinstance(_n_pca_comp, float):
_n_pca_comp = ((self.pca_explained_variance_ /
self.pca_explained_variance_.sum()).cumsum() <=
_n_pca_comp).sum()
logger.info('Selected %i PCA components by explained '
'variance' % _n_pca_comp)
elif _n_pca_comp is None:
_n_pca_comp = self.max_pca_components
elif _n_pca_comp < self.n_components_:
_n_pca_comp = self.n_components_
return _n_pca_comp
def _check_start_stop(raw, start, stop):
"""Aux function"""
return [c if (isinstance(c, int) or c is None) else
raw.time_as_index(c)[0] for c in (start, stop)]
@verbose
def ica_find_ecg_events(raw, ecg_source, event_id=999,
tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',
verbose=None):
"""Find ECG peaks from one selected ICA source
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
ecg_source : ndarray
ICA source resembling ECG to find peaks from.
event_id : int
The index to assign to found events.
tstart : float
Start detection after tstart seconds. Useful when beginning
of run is noisy.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
ecg_events : array
Events.
ch_ECG : string
Name of channel used.
average_pulse : float.
Estimated average pulse.
"""
logger.info('Using ICA source to identify heart beats')
# detecting QRS and generating event file
ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
tstart=tstart, thresh_value=qrs_threshold,
l_freq=l_freq, h_freq=h_freq)
n_events = len(ecg_events)
ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
event_id * np.ones(n_events)]
return ecg_events
@verbose
def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
h_freq=10, verbose=None):
"""Locate EOG artifacts from one selected ICA source
Parameters
----------
raw : instance of Raw
The raw data.
eog_source : ndarray
ICA source resembling EOG to find peaks from.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_events : array
Events
"""
eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
l_freq=l_freq, h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp)
return eog_events
def _get_target_ch(container, target):
"""Aux function"""
# auto target selection
picks = pick_channels(container.ch_names, include=[target])
ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)
if len(ref_picks) > 0:
picks = list(set(picks) - set(ref_picks))
if len(picks) == 0:
raise ValueError('%s not in channel list (%s)' %
(target, container.ch_names))
return picks
def _find_sources(sources, target, score_func):
"""Aux function"""
if isinstance(score_func, string_types):
score_func = get_score_funcs().get(score_func, score_func)
if not callable(score_func):
raise ValueError('%s is not a valid score_func.' % score_func)
scores = (score_func(sources, target) if target is not None
else score_func(sources, 1))
return scores
def _serialize(dict_, outer_sep=';', inner_sep=':'):
"""Aux function"""
s = []
for key, value in dict_.items():
if callable(value):
value = value.__name__
elif isinstance(value, int):
value = int(value)
elif isinstance(value, dict):
# py35 json does not support numpy int64
for subkey, subvalue in value.items():
if isinstance(subvalue, list):
if len(subvalue) > 0:
if isinstance(subvalue[0], (int, np.integer)):
value[subkey] = [int(i) for i in subvalue]
for cls in (np.random.RandomState, Covariance):
if isinstance(value, cls):
value = cls.__name__
s.append(key + inner_sep + json.dumps(value))
return outer_sep.join(s)
def _deserialize(str_, outer_sep=';', inner_sep=':'):
"""Aux Function"""
out = {}
for mapping in str_.split(outer_sep):
k, v = mapping.split(inner_sep, 1)
vv = json.loads(v)
out[k] = vv if not isinstance(vv, text_type) else str(vv)
return out
def _write_ica(fid, ica):
"""Write an ICA object
Parameters
----------
fid: file
The file descriptor
ica:
The instance of ICA to write
"""
ica_init = dict(noise_cov=ica.noise_cov,
n_components=ica.n_components,
n_pca_components=ica.n_pca_components,
max_pca_components=ica.max_pca_components,
current_fit=ica.current_fit)
if ica.info is not None:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if ica.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])
# Write measurement info
write_meas_info(fid, ica.info)
end_block(fid, FIFF.FIFFB_MEAS)
start_block(fid, FIFF.FIFFB_MNE_ICA)
# ICA interface params
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# Channel names
if ica.ch_names is not None:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
# samples on fit
n_samples = getattr(ica, 'n_samples_', None)
ica_misc = {'n_samples_': (None if n_samples is None else int(n_samples)),
'labels_': getattr(ica, 'labels_', None),
'method': getattr(ica, 'method', None)}
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# ICA misct params
write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,
_serialize(ica_misc))
# Whitener
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica._pre_whitener)
# PCA components_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
ica.pca_components_)
# PCA mean_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
# PCA explained_variance_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
ica.pca_explained_variance_)
# ICA unmixing
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
# Write bad components
write_int(fid, FIFF.FIFF_MNE_ICA_BADS, ica.exclude)
# Done!
end_block(fid, FIFF.FIFFB_MNE_ICA)
@verbose
def read_ica(fname):
"""Restore ICA solution from fif file.
Parameters
----------
fname : str
Absolute path to fif file containing ICA matrices.
The file name should end with -ica.fif or -ica.fif.gz.
Returns
-------
ica : instance of ICA
The ICA estimator.
"""
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz'))
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
try:
# we used to store bads that weren't part of the info...
info, meas = read_meas_info(fid, tree, clean_bads=True)
except ValueError:
logger.info('Could not find the measurement info. \n'
'Functionality requiring the info won\'t be'
' available.')
info = None
else:
info['filename'] = fname
ica_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ICA)
if len(ica_data) == 0:
ica_data = dir_tree_find(tree, 123) # Constant 123 Used before v 0.11
if len(ica_data) == 0:
fid.close()
raise ValueError('Could not find ICA data')
my_ica_data = ica_data[0]
for d in my_ica_data['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
tag = read_tag(fid, pos)
ica_init = tag.data
elif kind == FIFF.FIFF_MNE_ROW_NAMES:
tag = read_tag(fid, pos)
ch_names = tag.data
elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
tag = read_tag(fid, pos)
pre_whitener = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
tag = read_tag(fid, pos)
pca_components = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
tag = read_tag(fid, pos)
pca_explained_variance = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
tag = read_tag(fid, pos)
pca_mean = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
tag = read_tag(fid, pos)
unmixing_matrix = tag.data
elif kind == FIFF.FIFF_MNE_ICA_BADS:
tag = read_tag(fid, pos)
exclude = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:
tag = read_tag(fid, pos)
ica_misc = tag.data
fid.close()
ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
current_fit = ica_init.pop('current_fit')
if ica_init['noise_cov'] == Covariance.__name__:
logger.info('Reading whitener drawn from noise covariance ...')
logger.info('Now restoring ICA solution ...')
# make sure dtypes are np.float64 to satisfy fast_dot
def f(x):
return x.astype(np.float64)
ica_init = dict((k, v) for k, v in ica_init.items()
if k in _get_args(ICA.__init__))
ica = ICA(**ica_init)
ica.current_fit = current_fit
ica.ch_names = ch_names.split(':')
ica._pre_whitener = f(pre_whitener)
ica.pca_mean_ = f(pca_mean)
ica.pca_components_ = f(pca_components)
ica.n_components_ = unmixing_matrix.shape[0]
ica.pca_explained_variance_ = f(pca_explained_variance)
ica.unmixing_matrix_ = f(unmixing_matrix)
ica.mixing_matrix_ = linalg.pinv(ica.unmixing_matrix_)
ica.exclude = [] if exclude is None else list(exclude)
ica.info = info
if 'n_samples_' in ica_misc:
ica.n_samples_ = ica_misc['n_samples_']
if 'labels_' in ica_misc:
ica.labels_ = ica_misc['labels_']
if 'method' in ica_misc:
ica.method = ica_misc['method']
logger.info('Ready.')
return ica
_ica_node = namedtuple('Node', 'name target score_func criterion')
def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
ecg_criterion, eog_ch, eog_score_func, eog_criterion,
skew_criterion, kurt_criterion, var_criterion,
add_nodes):
"""Aux Function"""
from scipy import stats
nodes = []
if ecg_ch is not None:
nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]
if eog_ch not in [None, []]:
if not isinstance(eog_ch, list):
eog_ch = [eog_ch]
for idx, ch in enumerate(eog_ch):
nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,
eog_criterion)]
if skew_criterion is not None:
nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]
if kurt_criterion is not None:
nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]
if var_criterion is not None:
nodes += [_ica_node('variance', None, np.var, var_criterion)]
if add_nodes is not None:
nodes.extend(add_nodes)
for node in nodes:
scores = ica.score_sources(raw, start=start_find, stop=stop_find,
target=node.target,
score_func=node.score_func)
if isinstance(node.criterion, float):
found = list(np.where(np.abs(scores) > node.criterion)[0])
else:
found = list(np.atleast_1d(abs(scores).argsort()[node.criterion]))
case = (len(found), 's' if len(found) > 1 else '', node.name)
logger.info(' found %s artifact%s by %s' % case)
ica.exclude += found
logger.info('Artifact indices found:\n ' + str(ica.exclude).strip('[]'))
if len(set(ica.exclude)) != len(ica.exclude):
logger.info(' Removing duplicate indices...')
ica.exclude = list(set(ica.exclude))
logger.info('Ready.')
@verbose
def run_ica(raw, n_components, max_pca_components=100,
n_pca_components=64, noise_cov=None, random_state=None,
picks=None, start=None, stop=None, start_find=None,
stop_find=None, ecg_ch=None, ecg_score_func='pearsonr',
ecg_criterion=0.1, eog_ch=None, eog_score_func='pearsonr',
eog_criterion=0.1, skew_criterion=-1, kurt_criterion=-1,
var_criterion=0, add_nodes=None, verbose=None):
"""Run ICA decomposition on raw data and identify artifact sources
This function implements an automated artifact removal work flow.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- Please check your results. Detection by kurtosis and variance
can be powerful but misclassification of brain signals as
noise cannot be precluded. If you are not sure set those to None.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of defaults)::
ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
ecg_ch='MEG 1531', eog_ch='EOG 061')
Parameters
----------
raw : instance of Raw
The raw data to decompose.
n_components : int | float | None
The number of components used for ICA decomposition. If int, it must be
smaller then max_pca_components. If None, all PCA components will be
used. If float between 0 and 1 components can will be selected by the
cumulative percentage of explained variance.
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data.
n_pca_components
The number of PCA components used after ICA recomposition. The ensuing
attribute allows to balance noise reduction against potential loss of
features due to dimensionality reduction. If greater than
``self.n_components_``, the next ``'n_pca_components'`` minus
``'n_components_'`` PCA components will be added before restoring the
sensor space data. The attribute gets updated each time the according
parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
noise_cov : None | instance of mne.cov.Covariance
Noise covariance used for whitening. If None, channels are just
z-scored.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results.
picks : array-like of int
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
start : int | float | None
First sample to include for decomposition. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop : int | float | None
Last sample to not include for decomposition. If float, data will be
interpreted as time in seconds. If None, data will be used to the
last sample.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used to the last
sample.
ecg_ch : str | ndarray | None
The ``target`` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The ``target`` argument or the list of target arguments subsequently
passed to ica.find_sources_raw. Either the name of the vertical EOG
channel or the corresponding EOG time series. If None, this step
will be skipped.
eog_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
add_nodes : list of ica_nodes
Additional list if tuples carrying the following parameters:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example::
add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
ica : instance of ICA
The ica object with detected artifact sources marked for exclusion
"""
ica = ICA(n_components=n_components, max_pca_components=max_pca_components,
n_pca_components=n_pca_components, noise_cov=noise_cov,
random_state=random_state, verbose=verbose)
ica.fit(raw, start=start, stop=stop, picks=picks)
logger.info('%s' % ica)
logger.info(' Now searching for artifacts...')
_detect_artifacts(ica=ica, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion, eog_ch=eog_ch,
eog_score_func=eog_score_func,
eog_criterion=eog_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return ica
@verbose
def _band_pass_filter(ica, sources, target, l_freq, h_freq, verbose=None):
if l_freq is not None and h_freq is not None:
logger.info('... filtering ICA sources')
# use fft, here, steeper is better here.
sources = band_pass_filter(sources, ica.info['sfreq'],
l_freq, h_freq, method='fft',
verbose=verbose)
logger.info('... filtering target')
target = band_pass_filter(target, ica.info['sfreq'],
l_freq, h_freq, method='fft',
verbose=verbose)
elif l_freq is not None or h_freq is not None:
raise ValueError('Must specify both pass bands')
return sources, target
# #############################################################################
# CORRMAP
def _get_ica_map(ica, components=None):
"""Get ICA topomap for components"""
fast_dot = _get_fast_dot()
if components is None:
components = list(range(ica.n_components_))
maps = fast_dot(ica.mixing_matrix_[:, components].T,
ica.pca_components_[:ica.n_components_])
return maps
def _find_max_corrs(all_maps, target, threshold):
"""Compute correlations between template and target components"""
all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
abs_corrs = [np.abs(a) for a in all_corrs]
corr_polarities = [np.sign(a) for a in all_corrs]
if threshold <= 1:
max_corrs = [list(np.nonzero(s_corr > threshold)[0])
for s_corr in abs_corrs]
else:
max_corrs = [list(find_outliers(s_corr, threshold=threshold))
for s_corr in abs_corrs]
am = [l[i] for l, i_s in zip(abs_corrs, max_corrs)
for i in i_s]
median_corr_with_target = np.median(am) if len(am) > 0 else 0
polarities = [l[i] for l, i_s in zip(corr_polarities, max_corrs)
for i in i_s]
maxmaps = [l[i] for l, i_s in zip(all_maps, max_corrs)
for i in i_s]
if len(maxmaps) == 0:
return [], 0, 0, []
newtarget = np.zeros(maxmaps[0].size)
std_of_maps = np.std(np.asarray(maxmaps))
mean_of_maps = np.std(np.asarray(maxmaps))
for maxmap, polarity in zip(maxmaps, polarities):
newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
newtarget /= len(maxmaps)
newtarget *= std_of_maps
sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
return newtarget, median_corr_with_target, sim_i_o, max_corrs
def _plot_corrmap(data, subjs, indices, ch_type, ica, label, show, outlines,
layout, cmap, contours, template=True):
"""Customized ica.plot_components for corrmap"""
if not template:
title = 'Detected components'
if label is not None:
title += ' of type ' + label
else:
title = "Supplied template"
picks = list(range(len(data)))
p = 20
if len(picks) > p: # plot components by sets of 20
n_components = len(picks)
figs = [_plot_corrmap(data[k:k + p], subjs[k:k + p],
indices[k:k + p], ch_type, ica, label, show,
outlines=outlines, layout=layout, cmap=cmap,
contours=contours)
for k in range(0, n_components, p)]
return figs
elif np.isscalar(picks):
picks = [picks]
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(
ica, ch_type, layout)
pos, outlines = _check_outlines(pos, outlines)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(picks), max_col=5)
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
if template:
ttl = 'Subj. {0}, IC {1}'.format(subject, idx)
ax.set_title(ttl, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
res=64, axes=ax, cmap=cmap, outlines=outlines,
image_mask=None, contours=contours, show=False,
image_interp='bilinear')[0]
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.8)
fig.canvas.draw()
plt_show(show)
return fig
@verbose
def corrmap(icas, template, threshold="auto", label=None, ch_type="eeg",
plot=True, show=True, verbose=None, outlines='head', layout=None,
sensors=True, contours=6, cmap=None):
"""Find similar Independent Components across subjects by map similarity.
Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group
match to a supplied template. Typically, feed it a list of fitted ICAs and
a template IC, for example, the blink for the first subject, to identify
specific ICs across subjects.
The specific procedure consists of two iterations. In a first step, the
maps best correlating with the template are identified. In the step, the
analysis is repeated with the mean of the maps identified in the first
stage.
Run with `plot` and `show` set to `True` and `label=False` to find
good parameters. Then, run with labelling enabled to apply the
labelling in the IC objects. (Running with both `plot` and `labels`
off does nothing.)
Outputs a list of fitted ICAs with the indices of the marked ICs in a
specified field.
The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
Parameters
----------
icas : list of mne.preprocessing.ICA
A list of fitted ICA objects.
template : tuple | np.ndarray, shape (n_components,)
Either a tuple with two elements (int, int) representing the list
indices of the set from which the template should be chosen, and the
template. E.g., if template=(1, 0), the first IC of the 2nd ICA object
is used.
Or a numpy array whose size corresponds to each IC map from the
supplied maps, in which case this map is chosen as the template.
threshold : "auto" | list of float | float
Correlation threshold for identifying ICs
If "auto", search for the best map by trying all correlations between
0.6 and 0.95. In the original proposal, lower values are considered,
but this is not yet implemented.
If list of floats, search for the best map in the specified range of
correlation strengths. As correlation values, must be between 0 and 1
If float > 0, select ICs correlating better than this.
If float > 1, use find_outliers to identify ICs within subjects (not in
original Corrmap)
Defaults to "auto".
label : None | str
If not None, categorised ICs are stored in a dictionary "labels_" under
the given name. Preexisting entries will be appended to (excluding
repeats), not overwritten. If None, a dry run is performed and
the supplied ICs are not changed.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. Defaults to 'eeg'.
plot : bool
Should constructed template and selected maps be plotted? Defaults
to True.
show : bool
Show figures if True.
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
cmap : None | matplotlib colormap
Colormap for the plot. If ``None``, defaults to 'Reds_r' for norm data,
otherwise to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
outlines : 'head' | dict | None
The outlines to be drawn. If 'head', a head scheme will be drawn. If
dict, each key refers to a tuple of x and y positions. The values in
'mask_pos' will serve as image mask. If None, nothing will be drawn.
Defaults to 'head'. If dict, the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside the
outline. Moreover, a matplotlib patch object can be passed for
advanced masking options, either directly or as a function that returns
patches (required for multi-axis plots).
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
template_fig : fig
Figure showing the template.
labelled_ics : fig
Figure showing the labelled ICs in all ICA decompositions.
"""
if not isinstance(plot, bool):
raise ValueError("`plot` must be of type `bool`")
if threshold == 'auto':
threshold = np.arange(60, 95, dtype=np.float64) / 100.
all_maps = [_get_ica_map(ica) for ica in icas]
# check if template is an index to one IC in one ICA object, or an array
if len(template) == 2:
target = all_maps[template[0]][template[1]]
is_subject = True
elif template.ndim == 1 and len(template) == all_maps[0].shape[1]:
target = template
is_subject = False
else:
raise ValueError("`template` must be a length-2 tuple or an array the "
"size of the ICA maps.")
template_fig, labelled_ics = None, None
if plot is True:
if is_subject: # plotting from an ICA object
ttl = 'Template from subj. {0}'.format(str(template[0]))
template_fig = icas[template[0]].plot_components(
picks=template[1], ch_type=ch_type, title=ttl,
outlines=outlines, cmap=cmap, contours=contours, layout=layout,
show=show)
else: # plotting an array
template_fig = _plot_corrmap([template], [0], [0], ch_type,
icas[0].copy(), "Template",
outlines=outlines, cmap=cmap,
contours=contours, layout=layout,
show=show, template=True)
template_fig.subplots_adjust(top=0.8)
template_fig.canvas.draw()
# first run: use user-selected map
if isinstance(threshold, (int, float)):
if len(all_maps) == 0:
logger.info('No component detected using find_outliers.'
' Consider using threshold="auto"')
return icas
nt, mt, s, mx = _find_max_corrs(all_maps, target, threshold)
elif len(threshold) > 1:
paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
# find iteration with highest avg correlation with target
nt, mt, s, mx = paths[np.argmax([path[2] for path in paths])]
# second run: use output from first run
if isinstance(threshold, (int, float)):
if len(all_maps) == 0 or len(nt) == 0:
if threshold > 1:
logger.info('No component detected using find_outliers. '
'Consider using threshold="auto"')
return icas
nt, mt, s, mx = _find_max_corrs(all_maps, nt, threshold)
elif len(threshold) > 1:
paths = [_find_max_corrs(all_maps, nt, t) for t in threshold]
# find iteration with highest avg correlation with target
nt, mt, s, mx = paths[np.argmax([path[1] for path in paths])]
allmaps, indices, subjs, nones = [list() for _ in range(4)]
logger.info('Median correlation with constructed map: %0.3f' % mt)
if plot is True:
logger.info('Displaying selected ICs per subject.')
for ii, (ica, max_corr) in enumerate(zip(icas, mx)):
if (label is not None) and (not hasattr(ica, 'labels_')):
ica.labels_ = dict()
if len(max_corr) > 0:
if isinstance(max_corr[0], np.ndarray):
max_corr = max_corr[0]
if label is not None:
ica.labels_[label] = list(set(list(max_corr) +
ica.labels_.get(label, list())))
if plot is True:
allmaps.extend(_get_ica_map(ica, components=max_corr))
subjs.extend([ii] * len(max_corr))
indices.extend(max_corr)
else:
if (label is not None) and (label not in ica.labels_):
ica.labels_[label] = list()
nones.append(ii)
if len(nones) == 0:
logger.info('At least 1 IC detected for each subject.')
else:
logger.info('No maps selected for subject(s) ' +
', '.join([str(x) for x in nones]) +
', consider a more liberal threshold.')
if plot is True:
labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,
label, outlines=outlines, cmap=cmap,
contours=contours, layout=layout,
show=show)
return template_fig, labelled_ics
else:
return None
| {
"content_hash": "55cb7e82d9188a8cb578aaf0b86b0109",
"timestamp": "",
"source": "github",
"line_count": 2494,
"max_line_length": 79,
"avg_line_length": 42.09542902967121,
"alnum_prop": 0.578181852818471,
"repo_name": "ARudiuk/mne-python",
"id": "65f2dfa7fca16a7a7d6e2ba069963f16c28fac7f",
"size": "105196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/preprocessing/ica.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5086775"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pipes
import pwd
import random
import re
import string
from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.utils.boolean import boolean
__all__ = ['PlayContext']
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
connection = ('ansible_connection',),
remote_addr = ('ansible_ssh_host', 'ansible_host'),
remote_user = ('ansible_ssh_user', 'ansible_user'),
port = ('ansible_ssh_port', 'ansible_port'),
ssh_executable = ('ansible_ssh_executable',),
accelerate_port = ('ansible_accelerate_port',),
password = ('ansible_ssh_pass', 'ansible_password'),
private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'),
pipelining = ('ansible_ssh_pipelining', 'ansible_pipelining'),
shell = ('ansible_shell_type',),
become = ('ansible_become',),
become_method = ('ansible_become_method',),
become_user = ('ansible_become_user',),
become_pass = ('ansible_become_password','ansible_become_pass'),
become_exe = ('ansible_become_exe',),
become_flags = ('ansible_become_flags',),
ssh_common_args = ('ansible_ssh_common_args',),
docker_extra_args= ('ansible_docker_extra_args',),
sftp_extra_args = ('ansible_sftp_extra_args',),
scp_extra_args = ('ansible_scp_extra_args',),
ssh_extra_args = ('ansible_ssh_extra_args',),
sudo = ('ansible_sudo',),
sudo_user = ('ansible_sudo_user',),
sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe = ('ansible_sudo_exe',),
sudo_flags = ('ansible_sudo_flags',),
su = ('ansible_su',),
su_user = ('ansible_su_user',),
su_pass = ('ansible_su_password', 'ansible_su_pass'),
su_exe = ('ansible_su_exe',),
su_flags = ('ansible_su_flags',),
executable = ('ansible_shell_executable',),
module_compression = ('ansible_module_compression',),
)
b_SU_PROMPT_LOCALIZATIONS = [
to_bytes('Password'),
to_bytes('암호'),
to_bytes('パスワード'),
to_bytes('Adgangskode'),
to_bytes('Contraseña'),
to_bytes('Contrasenya'),
to_bytes('Hasło'),
to_bytes('Heslo'),
to_bytes('Jelszó'),
to_bytes('Lösenord'),
to_bytes('Mật khẩu'),
to_bytes('Mot de passe'),
to_bytes('Parola'),
to_bytes('Parool'),
to_bytes('Pasahitza'),
to_bytes('Passord'),
to_bytes('Passwort'),
to_bytes('Salasana'),
to_bytes('Sandi'),
to_bytes('Senha'),
to_bytes('Wachtwoord'),
to_bytes('ססמה'),
to_bytes('Лозинка'),
to_bytes('Парола'),
to_bytes('Пароль'),
to_bytes('गुप्तशब्द'),
to_bytes('शब्दकूट'),
to_bytes('సంకేతపదము'),
to_bytes('හස්පදය'),
to_bytes('密码'),
to_bytes('密碼'),
to_bytes('口令'),
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'become_flags',
'connection',
'docker_extra_args',
'delegate_to',
'no_log',
'remote_user',
)
RESET_VARS = (
'ansible_connection',
'ansible_docker_extra_args',
'ansible_ssh_host',
'ansible_ssh_pass',
'ansible_ssh_port',
'ansible_ssh_user',
'ansible_ssh_private_key_file',
'ansible_ssh_pipelining',
'ansible_ssh_executable',
'ansible_user',
'ansible_host',
'ansible_port',
)
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_docker_extra_args = FieldAttribute(isa='string')
_remote_addr = FieldAttribute(isa='string')
_password = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_shell = FieldAttribute(isa='string')
_ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
_ssh_common_args = FieldAttribute(isa='string')
_sftp_extra_args = FieldAttribute(isa='string')
_scp_extra_args = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
_connection_lockfd= FieldAttribute(isa='int')
_pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_SSH_PIPELINING)
_accelerate = FieldAttribute(isa='bool', default=False)
_accelerate_ipv6 = FieldAttribute(isa='bool', default=False, always_post_validate=True)
_accelerate_port = FieldAttribute(isa='int', default=C.ACCELERATE_PORT, always_post_validate=True)
_executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
_module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string')
_become_flags = FieldAttribute(isa='string')
_prompt = FieldAttribute(isa='string')
# backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string')
_sudo_flags = FieldAttribute(isa='string')
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string')
_su_flags = FieldAttribute(isa='string')
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttribute(isa='set', default=set())
_check_mode = FieldAttribute(isa='bool', default=False)
_force_handlers = FieldAttribute(isa='bool', default=False)
_start_at_task = FieldAttribute(isa='string')
_step = FieldAttribute(isa='bool', default=False)
_diff = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, options=None, passwords=None, connection_lockfd=None):
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass','')
self.become_pass = passwords.get('become_pass','')
self.prompt = ''
self.success_key = ''
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if options:
self.set_options(options)
if play:
self.set_play(play)
def set_play(self, play):
'''
Configures this connection information instance with data from
the play class.
'''
# special handling for accelerated mode, as it is set in a separate
# play option from the connection parameter
self.accelerate = play.accelerate
self.accelerate_ipv6 = play.accelerate_ipv6
self.accelerate_port = play.accelerate_port
if play.connection:
self.connection = play.connection
if play.remote_user:
self.remote_user = play.remote_user
if play.port:
self.port = int(play.port)
if play.become is not None:
self.become = play.become
if play.become_method:
self.become_method = play.become_method
if play.become_user:
self.become_user = play.become_user
if play.force_handlers is not None:
self.force_handlers = play.force_handlers
def set_options(self, options):
'''
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
'''
# privilege escalation
self.become = options.become
self.become_method = options.become_method
self.become_user = options.become_user
self.check_mode = boolean(options.check)
# get ssh options FIXME: make these common to all connections
for flag in ['ssh_common_args', 'docker_extra_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args']:
setattr(self, flag, getattr(options,flag, ''))
# general flags (should we move out?)
for flag in ['connection','remote_user', 'private_key_file', 'verbosity', 'force_handlers', 'step', 'start_at_task', 'diff']:
attribute = getattr(options, flag, False)
if attribute:
setattr(self, flag, attribute)
if hasattr(options, 'timeout') and options.timeout:
self.timeout = int(options.timeout)
# get the tag info from options, converting a comma-separated list
# of values into a proper list if need be. We check to see if the
# options have the attribute, as it is not always added via the CLI
if hasattr(options, 'tags'):
if isinstance(options.tags, list):
self.only_tags.update(options.tags)
elif isinstance(options.tags, string_types):
self.only_tags.update(options.tags.split(','))
if len(self.only_tags) == 0:
self.only_tags = set(['all'])
if hasattr(options, 'skip_tags'):
if isinstance(options.skip_tags, list):
self.skip_tags.update(options.skip_tags)
elif isinstance(options.skip_tags, string_types):
self.skip_tags.update(options.skip_tags.split(','))
def set_task_and_variable_override(self, task, variables, templar):
'''
Sets attributes from the task if they are set, which will override
those from the play.
'''
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list.
# If the value 'ansible_delegated_vars' is in the variables, it means
# we have a delegated-to host, so we check there first before looking
# at the variables in general
if task.delegate_to is not None:
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
delegated_host_name = templar.template(task.delegate_to)
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
delegated_transport = C.DEFAULT_TRANSPORT
for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
if transport_var in delegated_vars:
delegated_transport = delegated_vars[transport_var]
break
# make sure this delegated_to host has something set for its remote
# address, otherwise we default to connecting to it by name. This
# may happen when users put an IP entry into their inventory, or if
# they rely on DNS for a non-inventory hostname
for address_var in MAGIC_VARIABLE_MAPPING.get('remote_addr'):
if address_var in delegated_vars:
break
else:
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
delegated_vars['ansible_host'] = delegated_host_name
# reset the port back to the default if none was specified, to prevent
# the delegated host from inheriting the original host's setting
for port_var in MAGIC_VARIABLE_MAPPING.get('port'):
if port_var in delegated_vars:
break
else:
if delegated_transport == 'winrm':
delegated_vars['ansible_port'] = 5986
else:
delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
# and likewise for the remote user
for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
if user_var in delegated_vars and delegated_vars[user_var]:
break
else:
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
else:
delegated_vars = dict()
# setup shell
for exe_var in MAGIC_VARIABLE_MAPPING.get('executable'):
if exe_var in variables:
setattr(new_info, 'executable', variables.get(exe_var))
attrs_considered = []
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
if attr in attrs_considered:
continue
# if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
if task.delegate_to is not None:
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
attrs_considered.append(attr)
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
attrs_considered.append(attr)
# no else, as no other vars should be considered
# become legacy updates -- from commandline
if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass:
setattr(new_info, 'become_pass', new_info.sudo_pass)
elif new_info.become_method == 'su' and new_info.su_pass:
setattr(new_info, 'become_pass', new_info.su_pass)
# become legacy updates -- from inventory file (inventory overrides
# commandline)
for become_pass_name in MAGIC_VARIABLE_MAPPING.get('become_pass'):
if become_pass_name in variables:
break
else: # This is a for-else
if new_info.become_method == 'sudo':
for sudo_pass_name in MAGIC_VARIABLE_MAPPING.get('sudo_pass'):
if sudo_pass_name in variables:
setattr(new_info, 'become_pass', variables[sudo_pass_name])
break
if new_info.become_method == 'sudo':
for su_pass_name in MAGIC_VARIABLE_MAPPING.get('su_pass'):
if su_pass_name in variables:
setattr(new_info, 'become_pass', variables[su_pass_name])
break
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# special overrides for the connection setting
if len(delegated_vars) > 0:
# in the event that we were using local before make sure to reset the
# connection type to the default transport for the delegated-to host,
# if not otherwise specified
for connection_type in MAGIC_VARIABLE_MAPPING.get('connection'):
if connection_type in delegated_vars:
break
else:
remote_addr_local = new_info.remote_addr in C.LOCALHOST
inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
if remote_addr_local and inv_hostname_local:
setattr(new_info, 'connection', 'local')
elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
# if the final connection type is local, reset the remote_user value
# to that of the currently logged in user, to ensure any become settings
# are obeyed correctly
# additionally, we need to do this check after final connection has been
# correctly set above ...
if new_info.connection == 'local':
new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name
# set no_log to default if it was not previouslly set
if new_info.no_log is None:
new_info.no_log = C.DEFAULT_NO_LOG
# set become defaults if not previouslly set
task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user)
if task.always_run:
display.deprecated("always_run is deprecated. Use check_mode = no instead.", version="2.4", removed=False)
new_info.check_mode = False
# check_mode replaces always_run, overwrite always_run if both are given
if task.check_mode is not None:
new_info.check_mode = task.check_mode
return new_info
def make_become_cmd(self, cmd, executable=None):
""" helper function to create privilege escalation commands """
prompt = None
success_key = None
self.prompt = None
if self.become:
if not executable:
executable = self.executable
becomecmd = None
randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
if executable:
command = '%s -c %s' % (executable, success_cmd)
else:
command = success_cmd
# set executable to use for the privilege escalation method, with various overrides
exe = self.become_exe or \
getattr(self, '%s_exe' % self.become_method, None) or \
C.DEFAULT_BECOME_EXE or \
getattr(C, 'DEFAULT_%s_EXE' % self.become_method.upper(), None) or \
self.become_method
# set flags to use for the privilege escalation method, with various overrides
flags = self.become_flags or \
getattr(self, '%s_flags' % self.become_method, None) or \
C.DEFAULT_BECOME_FLAGS or \
getattr(C, 'DEFAULT_%s_FLAGS' % self.become_method.upper(), None) or \
''
if self.become_method == 'sudo':
# If we have a password, we run sudo with a randomly-generated
# prompt set using -p. Otherwise we run it with default -n, which makes
# it fail if it would have prompted for a password.
# Cannot rely on -n as it can be removed from defaults, which should be
# done for older versions of sudo that do not support the option.
#
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell.
# force quick error if password is required but not supplied, should prevent sudo hangs.
if self.become_pass:
prompt = '[sudo via ansible, key=%s] password: ' % randbits
becomecmd = '%s %s -p "%s" -u %s %s' % (exe, flags.replace('-n',''), prompt, self.become_user, command)
else:
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, command)
elif self.become_method == 'su':
# passing code ref to examine prompt as simple string comparisson isn't good enough with su
def detect_su_prompt(b_data):
b_password_string = b"|".join([b'(\w+\'s )?' + x for x in b_SU_PROMPT_LOCALIZATIONS])
# Colon or unicode fullwidth colon
b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
b_SU_PROMPT_LOCALIZATIONS_RE = re.compile(b_password_string, flags=re.IGNORECASE)
return bool(b_SU_PROMPT_LOCALIZATIONS_RE.match(b_data))
prompt = detect_su_prompt
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote(command))
elif self.become_method == 'pbrun':
prompt='Password:'
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'ksu':
def detect_ksu_prompt(b_data):
return re.match(b"Kerberos password for .*@.*:", b_data)
prompt = detect_ksu_prompt
becomecmd = '%s %s %s -e %s' % (exe, self.become_user, flags, command)
elif self.become_method == 'pfexec':
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
elif self.become_method == 'runas':
raise AnsibleError("'runas' is not yet implemented")
#FIXME: figure out prompt
# this is not for use with winrm plugin but if they ever get ssh native on windoez
becomecmd = '%s %s /user:%s "%s"' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'doas':
prompt = 'doas (%s@' % self.remote_user
exe = self.become_exe or 'doas'
if not self.become_pass:
flags += ' -n '
if self.become_user:
flags += ' -u %s ' % self.become_user
#FIXME: make shell independant
becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)
elif self.become_method == 'dzdo':
exe = self.become_exe or 'dzdo'
if self.become_pass:
prompt = '[dzdo via ansible, key=%s] password: ' % randbits
becomecmd = '%s -p %s -u %s %s' % (exe, pipes.quote(prompt), self.become_user, command)
else:
becomecmd = '%s -u %s %s' % (exe, self.become_user, command)
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
if self.become_pass:
self.prompt = prompt
self.success_key = success_key
return becomecmd
return cmd
def update_vars(self, variables):
'''
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
'''
for prop, var_list in MAGIC_VARIABLE_MAPPING.items():
try:
if 'become' in prop:
continue
var_val = getattr(self, prop)
for var_opt in var_list:
if var_opt not in variables and var_val is not None:
variables[var_opt] = var_val
except AttributeError:
continue
| {
"content_hash": "ee5c8c274ee3c24fda0d50705e43ab99",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 154,
"avg_line_length": 42.31958762886598,
"alnum_prop": 0.5863580998781973,
"repo_name": "abtreece/ansible",
"id": "745ea094c750c9efda67e8c125fd126b01610420",
"size": "25521",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable-2.2",
"path": "lib/ansible/playbook/play_context.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import copy
import uuid
import mox
from heat.engine import environment
from heat.tests.v1_1 import fakes
from heat.common import exception
from heat.common import template_format
from heat.engine import clients
from heat.engine import parser
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.resources import nova_utils
from heat.engine.resources import server as servers
from heat.openstack.common import uuidutils
from heat.openstack.common.gettextutils import _
from heat.tests.common import HeatTestCase
from heat.tests import utils
from novaclient import exceptions
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"key_name" : {
"Description" : "key_name",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "OS::Nova::Server",
"Properties": {
"image" : "F17-x86_64-gold",
"flavor" : "m1.large",
"key_name" : "test",
"user_data" : "wordpress"
}
}
}
}
'''
class ServersTest(HeatTestCase):
def setUp(self):
super(ServersTest, self).setUp()
self.fc = fakes.FakeClient()
utils.setup_dummy_db()
self.limits = self.m.CreateMockAnything()
self.limits.absolute = self._limits_absolute()
def _limits_absolute(self):
max_personality = self.m.CreateMockAnything()
max_personality.name = 'maxPersonality'
max_personality.value = 5
max_personality_size = self.m.CreateMockAnything()
max_personality_size.name = 'maxPersonalitySize'
max_personality_size.value = 10240
max_server_meta = self.m.CreateMockAnything()
max_server_meta.name = 'maxServerMeta'
max_server_meta.value = 3
yield max_personality
yield max_personality_size
yield max_server_meta
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
template = parser.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, template,
environment.Environment({'key_name': 'test'}),
stack_id=str(uuid.uuid4()))
return (t, stack)
def _setup_test_server(self, return_server, name, image_id=None,
override_name=False, stub_create=True):
stack_name = '%s_s' % name
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['image'] = \
image_id or 'CentOS 5.2'
t['Resources']['WebServer']['Properties']['flavor'] = \
'256 MB Server'
server_name = '%s' % name
if override_name:
t['Resources']['WebServer']['Properties']['name'] = \
server_name
server = servers.Server(server_name,
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
server.t = server.stack.resolve_runtime_data(server.t)
if stub_create:
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name='test',
name=override_name and server.name or utils.PhysName(
stack_name, server.name),
security_groups=[],
userdata=mox.IgnoreArg(), scheduler_hints=None,
meta=None, nics=None, availability_zone=None,
block_device_mapping=None, config_drive=None,
disk_config=None, reservation_id=None, files={}).AndReturn(
return_server)
return server
def _create_test_server(self, return_server, name, override_name=False,
stub_create=True):
server = self._setup_test_server(return_server, name,
stub_create=stub_create)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
return server
def test_server_create(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'test_server_create')
# this makes sure the auto increment worked on server creation
self.assertTrue(server.id > 0)
public_ip = return_server.networks['public'][0]
self.assertEqual(public_ip,
server.FnGetAtt('addresses')['public'][0]['addr'])
self.assertEqual(public_ip,
server.FnGetAtt('networks')['public'][0])
private_ip = return_server.networks['private'][0]
self.assertEqual(private_ip,
server.FnGetAtt('addresses')['private'][0]['addr'])
self.assertEqual(private_ip,
server.FnGetAtt('networks')['private'][0])
self.assertIn(
server.FnGetAtt('first_address'), (private_ip, public_ip))
self.assertEqual(return_server._info, server.FnGetAtt('show'))
self.assertEqual('sample-server2', server.FnGetAtt('instance_name'))
self.assertEqual('192.0.2.0', server.FnGetAtt('accessIPv4'))
self.assertEqual('::babe:4317:0A83', server.FnGetAtt('accessIPv6'))
self.m.VerifyAll()
def _test_server_error_during_create(self, exception):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server, 'test_create_500')
server.resource_id = 1234
# Override the get_servers_1234 handler
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'BUILD'
d2['server']['status'] = 'ACTIVE'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndRaise(exception)
get().AndReturn((200, d2))
self.m.ReplayAll()
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server.status)
self.m.VerifyAll()
def test_server_create_500_error(self):
msg = ("ClientException: The server has either erred or is "
"incapable of performing the requested operation.")
exc = clients.novaclient.exceptions.ClientException(500, msg)
self._test_server_error_during_create(exc)
def _test_server_error_during_suspend(self, exception):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server, 'test_suspend_500')
server.resource_id = 1234
# Override the get_servers_1234 handler
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
d2['server']['status'] = 'SUSPENDED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndRaise(exception)
get().AndReturn((200, d2))
self.m.ReplayAll()
scheduler.TaskRunner(server.suspend)()
self.assertEqual('SUSPEND', server.action)
self.assertEqual('COMPLETE', server.status)
self.m.VerifyAll()
def test_server_suspend_500_error(self):
msg = ("ClientException: The server has either erred or is "
"incapable of performing the requested operation.")
exc = clients.novaclient.exceptions.ClientException(500, msg)
self._test_server_error_during_suspend(exc)
def test_server_create_metadata(self):
return_server = self.fc.servers.list()[1]
stack_name = 'create_metadata_test_stack'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['metadata'] = \
{'a': 1}
server = servers.Server('create_metadata_test_server',
t['Resources']['WebServer'], stack)
server.t = server.stack.resolve_runtime_data(server.t)
instance_meta = {'a': "1"}
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=mox.IgnoreArg(), flavor=mox.IgnoreArg(), key_name='test',
name=mox.IgnoreArg(), security_groups=[],
userdata=mox.IgnoreArg(), scheduler_hints=None,
meta=instance_meta, nics=None, availability_zone=None,
block_device_mapping=None, config_drive=None,
disk_config=None, reservation_id=None, files={}).AndReturn(
return_server)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.m.VerifyAll()
def test_server_create_with_image_id(self):
return_server = self.fc.servers.list()[1]
server = self._setup_test_server(return_server,
'test_server_create_image_id',
image_id='1',
override_name=True)
self.m.StubOutWithMock(uuidutils, "is_uuid_like")
uuidutils.is_uuid_like('1').AndReturn(True)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
# this makes sure the auto increment worked on server creation
self.assertTrue(server.id > 0)
public_ip = return_server.networks['public'][0]
self.assertEqual(
server.FnGetAtt('addresses')['public'][0]['addr'], public_ip)
self.assertEqual(
server.FnGetAtt('networks')['public'][0], public_ip)
private_ip = return_server.networks['private'][0]
self.assertEqual(
server.FnGetAtt('addresses')['private'][0]['addr'], private_ip)
self.assertEqual(
server.FnGetAtt('networks')['private'][0], private_ip)
self.assertIn(
server.FnGetAtt('first_address'), (private_ip, public_ip))
self.m.VerifyAll()
def test_server_create_image_name_err(self):
stack_name = 'img_name_err'
(t, stack) = self._setup_test_stack(stack_name)
# create an server with non exist image name
t['Resources']['WebServer']['Properties']['image'] = 'Slackware'
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertRaises(exception.ImageNotFound, server.handle_create)
self.m.VerifyAll()
def test_server_create_duplicate_image_name_err(self):
stack_name = 'img_dup_err'
(t, stack) = self._setup_test_stack(stack_name)
# create an server with a non unique image name
t['Resources']['WebServer']['Properties']['image'] = 'CentOS 5.2'
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(self.fc.client, "get_images_detail")
self.fc.client.get_images_detail().AndReturn((
200, {'images': [{'id': 1, 'name': 'CentOS 5.2'},
{'id': 4, 'name': 'CentOS 5.2'}]}))
self.m.ReplayAll()
self.assertRaises(exception.PhysicalResourceNameAmbiguity,
server.handle_create)
self.m.VerifyAll()
def test_server_create_image_id_err(self):
stack_name = 'img_id_err'
(t, stack) = self._setup_test_stack(stack_name)
# create an server with non exist image Id
t['Resources']['WebServer']['Properties']['image'] = '1'
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(uuidutils, "is_uuid_like")
uuidutils.is_uuid_like('1').AndReturn(True)
self.m.StubOutWithMock(self.fc.client, "get_images_1")
self.fc.client.get_images_1().AndRaise(
servers.clients.novaclient.exceptions.NotFound(404))
self.m.ReplayAll()
self.assertRaises(exception.ImageNotFound, server.handle_create)
self.m.VerifyAll()
def test_server_create_unexpected_status(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'cr_unexp_sts')
return_server.get = lambda: None
return_server.status = 'BOGUS'
self.assertRaises(exception.Error,
server.check_create_complete,
return_server)
def test_server_create_error_status(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'cr_err_sts')
return_server.status = 'ERROR'
return_server.fault = {
'message': 'NoValidHost',
'code': 500,
'created': '2013-08-14T03:12:10Z'
}
self.m.StubOutWithMock(return_server, 'get')
return_server.get()
self.m.ReplayAll()
self.assertRaises(exception.Error,
server.check_create_complete,
return_server)
self.m.VerifyAll()
def test_server_create_raw_userdata(self):
return_server = self.fc.servers.list()[1]
stack_name = 'raw_userdata_s'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['user_data_format'] = \
'RAW'
server = servers.Server('WebServer',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
server.t = server.stack.resolve_runtime_data(server.t)
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=744, flavor=3, key_name='test',
name=utils.PhysName(stack_name, server.name),
security_groups=[],
userdata='wordpress', scheduler_hints=None,
meta=None, nics=None, availability_zone=None,
block_device_mapping=None, config_drive=None,
disk_config=None, reservation_id=None, files={}).AndReturn(
return_server)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.m.VerifyAll()
def test_server_validate(self):
stack_name = 'srv_val'
(t, stack) = self._setup_test_stack(stack_name)
# create an server with non exist image Id
t['Resources']['WebServer']['Properties']['image'] = '1'
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.StubOutWithMock(uuidutils, "is_uuid_like")
uuidutils.is_uuid_like('1').AndReturn(True)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_server_validate_with_bootable_vol(self):
stack_name = 'srv_val_bootvol'
(t, stack) = self._setup_test_stack(stack_name)
# create an server with bootable volume
web_server = t['Resources']['WebServer']
del web_server['Properties']['image']
def create_server(device_name):
web_server['Properties']['block_device_mapping'] = [{
"device_name": device_name,
"volume_id": "5d7e27da-6703-4f7e-9f94-1f67abef734c",
"delete_on_termination": False
}]
server = servers.Server('server_with_bootable_volume',
web_server, stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
return server
server = create_server(u'vda')
self.assertIsNone(server.validate())
server = create_server('vda')
self.assertIsNone(server.validate())
server = create_server('vdb')
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertEqual('Neither image nor bootable volume is specified for '
'instance server_with_bootable_volume', str(ex))
self.m.VerifyAll()
def test_server_validate_with_nova_keypair_resource(self):
stack_name = 'srv_val_test'
nova_keypair_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Resources" : {
"WebServer": {
"Type": "OS::Nova::Server",
"Properties": {
"image" : "F17-x86_64-gold",
"flavor" : "m1.large",
"key_name" : { "Ref": "SSHKey" },
"user_data" : "wordpress"
}
},
"SSHKey": {
"Type": "OS::Nova::KeyPair",
"Properties": {
"name": "my_key"
}
}
}
}
'''
t = template_format.parse(nova_keypair_template)
template = parser.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, template,
stack_id=str(uuid.uuid4()))
server = servers.Server('server_validate_test',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_server_validate_with_invalid_ssh_key(self):
stack_name = 'srv_val_test'
(t, stack) = self._setup_test_stack(stack_name)
web_server = t['Resources']['WebServer']
# Make the ssh key have an invalid name
web_server['Properties']['key_name'] = 'test2'
server = servers.Server('server_validate_test',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
ex = self.assertRaises(exception.UserKeyPairMissing,
server.validate)
self.assertIn("The Key (test2) could not be found.", str(ex))
self.m.VerifyAll()
def test_server_validate_delete_policy(self):
stack_name = 'srv_val_delpol'
(t, stack) = self._setup_test_stack(stack_name)
# create an server with non exist image Id
t['Resources']['WebServer']['DeletionPolicy'] = 'SelfDestruct'
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.ReplayAll()
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertEqual('Invalid DeletionPolicy SelfDestruct',
str(ex))
self.m.VerifyAll()
def test_server_validate_with_networks(self):
stack_name = 'srv_net'
(t, stack) = self._setup_test_stack(stack_name)
network_name = 'public'
# create an server with 'uuid' and 'network' properties
t['Resources']['WebServer']['Properties']['networks'] = (
[{'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': network_name}])
server = servers.Server('server_validate_with_networks',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertIn(_('Properties "uuid" and "network" are both set to '
'the network "%(network)s" for the server '
'"%(server)s". The "uuid" property is deprecated. '
'Use only "network" property.'
'') % dict(network=network_name, server=server.name),
str(ex))
self.m.VerifyAll()
def test_server_delete(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'create_delete')
server.resource_id = 1234
# this makes sure the auto increment worked on server creation
self.assertTrue(server.id > 0)
server_get = self.fc.client.get_servers_1234()
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn(server_get)
get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
mox.Replay(get)
self.m.ReplayAll()
scheduler.TaskRunner(server.delete)()
self.assertIsNone(server.resource_id)
self.assertEqual((server.DELETE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_delete_notfound(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'create_delete2')
server.resource_id = 1234
# this makes sure the auto increment worked on server creation
self.assertTrue(server.id > 0)
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
mox.Replay(get)
scheduler.TaskRunner(server.delete)()
self.assertIsNone(server.resource_id)
self.assertEqual((server.DELETE, server.COMPLETE), server.state)
self.m.VerifyAll()
server.state_set(server.CREATE, server.COMPLETE, 'to delete again')
scheduler.TaskRunner(server.delete)()
self.assertEqual((server.DELETE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_update_metadata(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'md_update')
update_template = copy.deepcopy(server.t)
update_template['Metadata'] = {'test': 123}
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual({'test': 123}, server.metadata)
server.t['Metadata'] = {'test': 456}
server.metadata_update()
self.assertEqual({'test': 456}, server.metadata)
def test_server_update_nova_metadata(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'md_update')
new_meta = {'test': 123}
self.m.StubOutWithMock(self.fc.servers, 'set_meta')
self.fc.servers.set_meta(return_server,
nova_utils.meta_serialize(
new_meta)).AndReturn(None)
self.m.ReplayAll()
update_template = copy.deepcopy(server.t)
update_template['Properties']['metadata'] = new_meta
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual((server.UPDATE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_update_nova_metadata_complex(self):
"""
Test that complex metadata values are correctly serialized
to JSON when sent to Nova.
"""
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'md_update')
new_meta = {'test': {'testkey': 'testvalue'}}
self.m.StubOutWithMock(self.fc.servers, 'set_meta')
# If we're going to call set_meta() directly we
# need to handle the serialization ourselves.
self.fc.servers.set_meta(return_server,
nova_utils.meta_serialize(
new_meta)).AndReturn(None)
self.m.ReplayAll()
update_template = copy.deepcopy(server.t)
update_template['Properties']['metadata'] = new_meta
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual((server.UPDATE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_update_nova_metadata_with_delete(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'md_update')
# part one, add some metadata
new_meta = {'test': '123', 'this': 'that'}
self.m.StubOutWithMock(self.fc.servers, 'set_meta')
self.fc.servers.set_meta(return_server,
new_meta).AndReturn(None)
self.m.ReplayAll()
update_template = copy.deepcopy(server.t)
update_template['Properties']['metadata'] = new_meta
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual((server.UPDATE, server.COMPLETE), server.state)
self.m.VerifyAll()
self.m.UnsetStubs()
# part two change the metadata (test removing the old key)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
new_meta = {'new_key': 'yeah'}
self.m.StubOutWithMock(self.fc.servers, 'delete_meta')
new_return_server = self.fc.servers.list()[5]
self.fc.servers.delete_meta(new_return_server,
['test', 'this']).AndReturn(None)
self.m.StubOutWithMock(self.fc.servers, 'set_meta')
self.fc.servers.set_meta(new_return_server,
new_meta).AndReturn(None)
self.m.ReplayAll()
update_template = copy.deepcopy(server.t)
update_template['Properties']['metadata'] = new_meta
# new fake with the correct metadata
server.resource_id = '56789'
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual((server.UPDATE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_update_server_flavor(self):
"""
Server.handle_update supports changing the flavor, and makes
the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = 1234
server = self._create_test_server(return_server,
'srv_update')
update_template = copy.deepcopy(server.t)
update_template['Properties']['flavor'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(1234).AndReturn(return_server)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
self.fc.client.post_servers_1234_action(
body={'confirmResize': None}).AndReturn((202, None))
self.m.ReplayAll()
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual((server.UPDATE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_update_server_flavor_failed(self):
"""
If the status after a resize is not VERIFY_RESIZE, it means the resize
call failed, so we raise an explicit error.
"""
return_server = self.fc.servers.list()[1]
return_server.id = 1234
server = self._create_test_server(return_server,
'srv_update2')
update_template = copy.deepcopy(server.t)
update_template['Properties']['flavor'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(1234).AndReturn(return_server)
def activate_status(server):
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
self.m.ReplayAll()
updater = scheduler.TaskRunner(server.update, update_template)
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: Resizing to 'm1.small' failed, status 'ACTIVE'",
str(error))
self.assertEqual((server.UPDATE, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_update_server_flavor_replace(self):
stack_name = 'update_flvrep'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties'][
'flavor_update_policy'] = 'REPLACE'
server = servers.Server('server_server_update_flavor_replace',
t['Resources']['WebServer'], stack)
update_template = copy.deepcopy(server.t)
update_template['Properties']['flavor'] = 'm1.smigish'
updater = scheduler.TaskRunner(server.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def test_server_update_server_flavor_policy_update(self):
stack_name = 'update_flvpol'
(t, stack) = self._setup_test_stack(stack_name)
server = servers.Server('server_server_update_flavor_replace',
t['Resources']['WebServer'], stack)
update_template = copy.deepcopy(server.t)
# confirm that when flavor_update_policy is changed during
# the update then the updated policy is followed for a flavor
# update
update_template['Properties']['flavor_update_policy'] = 'REPLACE'
update_template['Properties']['flavor'] = 'm1.smigish'
updater = scheduler.TaskRunner(server.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def test_server_update_image_replace(self):
stack_name = 'update_imgrep'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties'][
'image_update_policy'] = 'REPLACE'
server = servers.Server('server_update_image_replace',
t['Resources']['WebServer'], stack)
update_template = copy.deepcopy(server.t)
update_template['Properties']['image'] = self.getUniqueString()
updater = scheduler.TaskRunner(server.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def _test_server_update_image_rebuild(self, status, policy='REBUILD'):
# Server.handle_update supports changing the image, and makes
# the change making a rebuild API call against Nova.
return_server = self.fc.servers.list()[1]
return_server.id = 1234
server = self._create_test_server(return_server,
'srv_updimgrbld')
new_image = 'F17-x86_64-gold'
update_template = copy.deepcopy(server.t)
update_template['Properties']['image'] = new_image
server.t['Properties']['image_update_policy'] = policy
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(1234).MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(self.fc.servers, 'rebuild')
# 744 is a static lookup from the fake images list
if 'REBUILD' == policy:
self.fc.servers.rebuild(
return_server, 744, password=None, preserve_ephemeral=False)
else:
self.fc.servers.rebuild(
return_server, 744, password=None, preserve_ephemeral=True)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
for stat in status:
def activate_status(serv):
serv.status = stat
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(server.update, update_template)()
self.assertEqual((server.UPDATE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_update_image_rebuild_status_rebuild(self):
# Normally we will see 'REBUILD' first and then 'ACTIVE".
self._test_server_update_image_rebuild(status=('REBUILD', 'ACTIVE'))
def test_server_update_image_rebuild_status_active(self):
# It is possible for us to miss the REBUILD status.
self._test_server_update_image_rebuild(status=('ACTIVE',))
def test_server_update_image_rebuild_status_rebuild_keep_ephemeral(self):
# Normally we will see 'REBUILD' first and then 'ACTIVE".
self._test_server_update_image_rebuild(
policy='REBUILD_PRESERVE_EPHEMERAL', status=('REBUILD', 'ACTIVE'))
def test_server_update_image_rebuild_status_active_keep_ephemeral(self):
# It is possible for us to miss the REBUILD status.
self._test_server_update_image_rebuild(
policy='REBUILD_PRESERVE_EPHEMERAL', status=('ACTIVE'))
def test_server_update_image_rebuild_failed(self):
# If the status after a rebuild is not REBUILD or ACTIVE, it means the
# rebuild call failed, so we raise an explicit error.
return_server = self.fc.servers.list()[1]
return_server.id = 1234
server = self._create_test_server(return_server,
'srv_updrbldfail')
new_image = 'F17-x86_64-gold'
update_template = copy.deepcopy(server.t)
update_template['Properties']['image'] = new_image
server.t['Properties']['image_update_policy'] = 'REBUILD'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(1234).MultipleTimes().AndReturn(return_server)
self.m.StubOutWithMock(self.fc.servers, 'rebuild')
# 744 is a static lookup from the fake images list
self.fc.servers.rebuild(
return_server, 744, password=None, preserve_ephemeral=False)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
def activate_status(server):
server.status = 'REBUILD'
return_server.get = activate_status.__get__(return_server)
def activate_status2(server):
server.status = 'ERROR'
return_server.get = activate_status2.__get__(return_server)
self.m.ReplayAll()
updater = scheduler.TaskRunner(server.update, update_template)
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: Rebuilding server failed, status 'ERROR'",
str(error))
self.assertEqual((server.UPDATE, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_update_attr_replace(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'update_rep')
update_template = copy.deepcopy(server.t)
update_template['UpdatePolicy'] = {'test': 123}
updater = scheduler.TaskRunner(server.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def test_server_update_properties(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'update_prop')
update_template = copy.deepcopy(server.t)
update_template['Properties']['key_name'] = 'mustreplace'
updater = scheduler.TaskRunner(server.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
def test_server_status_build(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'sts_build')
server.resource_id = 1234
# Bind fake get method which Server.check_create_complete will call
def activate_status(server):
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_server_status_suspend_no_resource_id(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_sus1')
server.resource_id = None
self.m.ReplayAll()
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.suspend))
self.assertEqual('Error: Cannot suspend srv_sus1, '
'resource_id not set',
str(ex))
self.assertEqual((server.SUSPEND, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_status_suspend_not_found(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_sus2')
server.resource_id = 1234
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
mox.Replay(get)
self.m.ReplayAll()
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.suspend))
self.assertEqual('NotFound: Failed to find server 1234',
str(ex))
self.assertEqual((server.SUSPEND, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_status_suspend_immediate(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_suspend3')
server.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d['server']['status'] = 'SUSPENDED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d))
mox.Replay(get)
scheduler.TaskRunner(server.suspend)()
self.assertEqual((server.SUSPEND, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_status_resume_immediate(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_resume1')
server.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED
d = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d['server']['status'] = 'ACTIVE'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d))
mox.Replay(get)
server.state_set(server.SUSPEND, server.COMPLETE)
scheduler.TaskRunner(server.resume)()
self.assertEqual((server.RESUME, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_status_suspend_wait(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_susp_w')
server.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED, but
# return the ACTIVE state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
d2['server']['status'] = 'SUSPENDED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
scheduler.TaskRunner(server.suspend)()
self.assertEqual((server.SUSPEND, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_status_suspend_unknown_status(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_susp_uk')
server.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to SUSPENDED, but
# return the ACTIVE state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
d2['server']['status'] = 'TRANSMOGRIFIED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.suspend))
self.assertEqual('Error: Suspend of server sample-server failed '
'with unknown status: TRANSMOGRIFIED',
str(ex))
self.assertEqual((server.SUSPEND, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_status_resume_wait(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_res_w')
server.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to ACTIVE, but
# return the SUSPENDED state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'SUSPENDED'
d2['server']['status'] = 'ACTIVE'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
server.state_set(server.SUSPEND, server.COMPLETE)
scheduler.TaskRunner(server.resume)()
self.assertEqual((server.RESUME, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_server_status_resume_no_resource_id(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_susp_norid')
server.resource_id = None
self.m.ReplayAll()
server.state_set(server.SUSPEND, server.COMPLETE)
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.resume))
self.assertEqual('Error: Cannot resume srv_susp_norid, '
'resource_id not set',
str(ex))
self.assertEqual((server.RESUME, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_status_resume_not_found(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'srv_res_nf')
server.resource_id = 1234
self.m.ReplayAll()
# Override the get_servers_1234 handler status to ACTIVE, but
# return the SUSPENDED state first (twice, so we sleep)
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndRaise(servers.clients.novaclient.exceptions.NotFound(404))
self.m.ReplayAll()
server.state_set(server.SUSPEND, server.COMPLETE)
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.resume))
self.assertEqual('NotFound: Failed to find server 1234',
str(ex))
self.assertEqual((server.RESUME, server.FAILED), server.state)
self.m.VerifyAll()
def test_server_status_build_spawning(self):
self._test_server_status_not_build_active('BUILD(SPAWNING)')
def test_server_status_hard_reboot(self):
self._test_server_status_not_build_active('HARD_REBOOT')
def test_server_status_password(self):
self._test_server_status_not_build_active('PASSWORD')
def test_server_status_reboot(self):
self._test_server_status_not_build_active('REBOOT')
def test_server_status_rescue(self):
self._test_server_status_not_build_active('RESCUE')
def test_server_status_resize(self):
self._test_server_status_not_build_active('RESIZE')
def test_server_status_revert_resize(self):
self._test_server_status_not_build_active('REVERT_RESIZE')
def test_server_status_shutoff(self):
self._test_server_status_not_build_active('SHUTOFF')
def test_server_status_suspended(self):
self._test_server_status_not_build_active('SUSPENDED')
def test_server_status_verify_resize(self):
self._test_server_status_not_build_active('VERIFY_RESIZE')
def _test_server_status_not_build_active(self, uncommon_status):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
check_iterations = [0]
# Bind fake get method which Server.check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
server.status = uncommon_status
if check_iterations[0] > 2:
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
self.m.VerifyAll()
def test_build_nics(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
'test_server_create')
self.assertIsNone(server._build_nics([]))
self.assertIsNone(server._build_nics(None))
self.assertEqual([{'port-id': 'aaaabbbb'},
{'v4-fixed-ip': '192.0.2.0'}],
server._build_nics([{'port': 'aaaabbbb'},
{'fixed_ip': '192.0.2.0'}]))
self.assertEqual([{'net-id': '1234abcd'}],
server._build_nics([{'uuid': '1234abcd'}]))
self.assertEqual([{'net-id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}],
server._build_nics(
[{'network':
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}]
))
self.assertEqual([{'net-id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}],
server._build_nics([{'network': 'public'}]))
self.assertRaises(exceptions.NoUniqueMatch, server._build_nics,
([{'network': 'foo'}]))
self.assertRaises(exceptions.NotFound, server._build_nics,
([{'network': 'bar'}]))
def test_server_without_ip_address(self):
return_server = self.fc.servers.list()[3]
server = self._create_test_server(return_server,
'wo_ipaddr')
self.assertEqual({'empty_net': []}, server.FnGetAtt('addresses'))
self.assertEqual({'empty_net': []}, server.FnGetAtt('networks'))
self.assertEqual('', server.FnGetAtt('first_address'))
def test_build_block_device_mapping(self):
self.assertIsNone(servers.Server._build_block_device_mapping([]))
self.assertIsNone(servers.Server._build_block_device_mapping(None))
self.assertEqual({
'vda': '1234:',
'vdb': '1234:snap',
}, servers.Server._build_block_device_mapping([
{'device_name': 'vda', 'volume_id': '1234'},
{'device_name': 'vdb', 'snapshot_id': '1234'},
]))
self.assertEqual({
'vdc': '1234::10',
'vdd': '1234:snap:0:True'
}, servers.Server._build_block_device_mapping([
{
'device_name': 'vdc',
'volume_id': '1234',
'volume_size': 10
},
{
'device_name': 'vdd',
'snapshot_id': '1234',
'delete_on_termination': True
}
]))
def test_validate_block_device_mapping_volume_size_valid_int(self):
stack_name = 'val_vsize_valid'
t, stack = self._setup_test_stack(stack_name)
bdm = [{'device_name': 'vda', 'volume_id': '1234',
'volume_size': 10}]
t['Resources']['WebServer']['Properties']['block_device_mapping'] = bdm
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_validate_block_device_mapping_volume_size_valid_str(self):
stack_name = 'val_vsize_valid'
t, stack = self._setup_test_stack(stack_name)
bdm = [{'device_name': 'vda', 'volume_id': '1234',
'volume_size': '10'}]
t['Resources']['WebServer']['Properties']['block_device_mapping'] = bdm
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_validate_block_device_mapping_volume_size_invalid_str(self):
stack_name = 'val_vsize_invalid'
t, stack = self._setup_test_stack(stack_name)
bdm = [{'device_name': 'vda', 'volume_id': '1234',
'volume_size': '10a'}]
t['Resources']['WebServer']['Properties']['block_device_mapping'] = bdm
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
exc = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertIn("Value '10a' is not an integer", str(exc))
def test_validate_conflict_block_device_mapping_props(self):
stack_name = 'val_blkdev1'
(t, stack) = self._setup_test_stack(stack_name)
bdm = [{'device_name': 'vdb', 'snapshot_id': '1234',
'volume_id': '1234'}]
t['Resources']['WebServer']['Properties']['block_device_mapping'] = bdm
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertRaises(exception.ResourcePropertyConflict, server.validate)
self.m.VerifyAll()
def test_validate_insufficient_block_device_mapping_props(self):
stack_name = 'val_blkdev2'
(t, stack) = self._setup_test_stack(stack_name)
bdm = [{'device_name': 'vdb', 'volume_size': 1,
'delete_on_termination': True}]
t['Resources']['WebServer']['Properties']['block_device_mapping'] = bdm
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
msg = 'Either volume_id or snapshot_id must be specified for device' +\
' mapping vdb'
self.assertEqual(msg, str(ex))
self.m.VerifyAll()
def test_validate_without_image_or_bootable_volume(self):
stack_name = 'val_imgvol'
(t, stack) = self._setup_test_stack(stack_name)
del t['Resources']['WebServer']['Properties']['image']
bdm = [{'device_name': 'vdb', 'volume_id': '1234'}]
t['Resources']['WebServer']['Properties']['block_device_mapping'] = bdm
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
msg = 'Neither image nor bootable volume is specified for instance %s'\
% server.name
self.assertEqual(msg, str(ex))
self.m.VerifyAll()
def test_validate_metadata_too_many(self):
stack_name = 'srv_val_metadata'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['metadata'] = {'a': 1,
'b': 2,
'c': 3,
'd': 4}
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(self.fc.limits, 'get')
self.fc.limits.get().MultipleTimes().AndReturn(self.limits)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertIn('Instance metadata must not contain greater than 3 '
'entries', str(ex))
self.m.VerifyAll()
def test_validate_metadata_okay(self):
stack_name = 'srv_val_metadata'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['metadata'] = {'a': 1,
'b': 2,
'c': 3}
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(self.fc.limits, 'get')
self.fc.limits.get().MultipleTimes().AndReturn(self.limits)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_server_validate_too_many_personality(self):
stack_name = 'srv_val'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['personality'] = \
{"/fake/path1": "fake contents1",
"/fake/path2": "fake_contents2",
"/fake/path3": "fake_contents3",
"/fake/path4": "fake_contents4",
"/fake/path5": "fake_contents5",
"/fake/path6": "fake_contents6"}
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(self.fc.limits, 'get')
self.fc.limits.get().MultipleTimes().AndReturn(self.limits)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
exc = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertEqual("The personality property may not contain "
"greater than 5 entries.", str(exc))
self.m.VerifyAll()
def test_server_validate_personality_okay(self):
stack_name = 'srv_val'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['personality'] = \
{"/fake/path1": "fake contents1",
"/fake/path2": "fake_contents2",
"/fake/path3": "fake_contents3",
"/fake/path4": "fake_contents4",
"/fake/path5": "fake_contents5"}
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(self.fc.limits, 'get')
self.fc.limits.get().MultipleTimes().AndReturn(self.limits)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_server_validate_personality_file_size_okay(self):
stack_name = 'srv_val'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['personality'] = \
{"/fake/path1": "a" * 10240}
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(self.fc.limits, 'get')
self.fc.limits.get().MultipleTimes().AndReturn(self.limits)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(server.validate())
self.m.VerifyAll()
def test_server_validate_personality_file_size_too_big(self):
stack_name = 'srv_val'
(t, stack) = self._setup_test_stack(stack_name)
t['Resources']['WebServer']['Properties']['personality'] = \
{"/fake/path1": "a" * 10241}
server = servers.Server('server_create_image_err',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(self.fc.limits, 'get')
self.fc.limits.get().MultipleTimes().AndReturn(self.limits)
self.m.StubOutWithMock(server, 'nova')
server.nova().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
exc = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertEqual("The contents of personality file \"/fake/path1\" "
"is larger than the maximum allowed personality "
"file size (10240 bytes).", str(exc))
self.m.VerifyAll()
| {
"content_hash": "22d18aaed2b8c196fb696eff2b3a9c17",
"timestamp": "",
"source": "github",
"line_count": 1488,
"max_line_length": 79,
"avg_line_length": 40.55040322580645,
"alnum_prop": 0.5816138815691344,
"repo_name": "ntt-sic/heat",
"id": "60ae9ada02df63e2e2cf3f2d99681610a53d8b25",
"size": "60957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3336181"
},
{
"name": "Shell",
"bytes": "22168"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.core import mail
from django_dynamic_fixture import get
from django.utils.translation import ugettext_lazy as _
from oscar.test.testcases import WebTestCase
from oscar.apps.dashboard.users.views import IndexView
from oscar.core.compat import get_user_model
from webtest import AppError
User = get_user_model()
class IndexViewTests(WebTestCase):
is_staff = True
active_users_ids = []
inactive_users_ids = []
csrf_checks = False
def setUp(self):
super(IndexViewTests, self).setUp()
for i in range(1, 25):
get(User, is_active=True)
for i in range(1, 25):
get(User, is_active=False)
user_queryset = User.objects.all()
self.active_users_ids = user_queryset.filter(is_active=True).values_list('id', flat=True)
self.inactive_users_ids = user_queryset.filter(is_active=False).values_list('id', flat=True)
def test_user_list_view(self):
response = self.get(reverse('dashboard:users-index'))
self.assertInContext(response, 'user_list')
self.assertEqual(len(response.context['user_list']), IndexView.paginate_by)
def test_make_active(self):
params = {'action': 'make_active',
'selected_user': self.inactive_users_ids}
response = self.post(reverse('dashboard:users-index'), params=params)
ex_inactive = User.objects.get(id=self.inactive_users_ids[10])
self.assertIsRedirect(response)
self.assertTrue(ex_inactive.is_active)
def test_make_inactive(self):
params = {'action': 'make_inactive',
'selected_user': self.active_users_ids}
response = self.post(reverse('dashboard:users-index'), params=params)
ex_active = User.objects.get(id=self.active_users_ids[10])
self.assertIsRedirect(response)
self.assertFalse(ex_active.is_active)
class DetailViewTests(WebTestCase):
is_staff = True
def test_user_detail_view(self):
response = self.get(reverse('dashboard:user-detail', kwargs={'pk': 1}))
self.assertInContext(response, 'user')
self.assertIsOk(response)
class TestDetailViewForStaffUser(WebTestCase):
is_staff = True
def setUp(self):
self.customer = get(User, username='jane',
email='[email protected]',
password='password')
super(TestDetailViewForStaffUser, self).setUp()
def test_password_reset_url_only_available_via_post(self):
try:
reset_url = reverse(
'dashboard:user-password-reset',
kwargs={'pk': self.customer.id}
)
self.get(reset_url)
except AppError as e:
self.assertIn('405', e.args[0])
def test_admin_can_reset_user_passwords(self):
customer_page_url = reverse(
'dashboard:user-detail',
kwargs={'pk': self.customer.id}
)
customer_page = self.get(customer_page_url)
reset_form = customer_page.forms['password_reset_form']
response = reset_form.submit()
# Check that the staff user is redirected back to the customer page
self.assertRedirects(response, customer_page_url)
# Check that the reset email has been sent
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Resetting your password", mail.outbox[0].subject)
# Check that success message shows up
self.assertContains(
response.follow(),
_("A password reset email has been sent")
)
| {
"content_hash": "97f449e3b8abf81879641ff7aa271cc2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 100,
"avg_line_length": 34.875,
"alnum_prop": 0.6346843121036669,
"repo_name": "marcoantoniooliveira/labweb",
"id": "83d66151b8e9907935174637786a7b0517bd40d3",
"size": "3627",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/functional/dashboard/user_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
} |
from django import template
from django.templatetags.static import static
register = template.Library()
@register.simple_tag
def foundation_js(js_name=None):
js_path = "foundation/js/foundation/foundation.{0}.js".format(js_name)
if js_name is None:
js_path = "foundation/js/foundation.min.js".format(js_name)
return '<script src="{0}"></script>'.format(static(js_path))
@register.simple_tag
def foundation_vendor(vendor_name):
vendor_path = "foundation/js/vendor/{0}.js".format(vendor_name)
return '<script src="{0}"></script>'.format(static(vendor_path))
@register.simple_tag
def foundation_css(css_name):
css_path = "foundation/css/{0}.css".format(css_name)
return '<link rel="stylesheet" href="{0}"/>'.format(static(css_path))
| {
"content_hash": "f666e0b945d55c2f717b3a315d1a5903",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 35,
"alnum_prop": 0.7038961038961039,
"repo_name": "bop/bauhaus",
"id": "0885ed292424afd18943f6035c211e03a56bad15",
"size": "770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/foundation/templatetags/foundation_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "145210"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1013469"
},
{
"name": "JavaScript",
"bytes": "267371"
},
{
"name": "Python",
"bytes": "6660999"
},
{
"name": "Shell",
"bytes": "4317"
}
],
"symlink_target": ""
} |
'''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os.path
from PySide import QtGui
from opencmiss.neon.ui.problems.base import BaseProblem
from opencmiss.neon.core.problems.constants import RespirationConstants
from opencmiss.neon.ui.problems.ui_ventilationwidget import Ui_VentilationWidget
from opencmiss.neon.core.problems.ventilation import getExecutableForPlatform
class Ventilation(BaseProblem):
def __init__(self, shared_opengl_widget, parent=None):
super(Ventilation, self).__init__(parent)
self._ui = Ui_VentilationWidget()
self._ui.setupUi(self)
self._setupUi()
self._location = None
self._createMaps()
self._makeConnections()
def _setupUi(self):
enums = []
self._ui.tabWidget.setCurrentIndex(2)
self._ui.comboBoxExpirationType.clear()
class_attributes = RespirationConstants.ExpirationType.__dict__
for a in class_attributes:
if not a.startswith('__'):
enums.append((class_attributes[a], a))
enums = sorted(enums)
self._map_expiration_index_to_string = {e[0]: e[1].lower() for e in enums}
self._map_string_to_expiration_index = {e[1].lower(): e[0] for e in enums}
self._ui.comboBoxExpirationType.addItems([e[1] for e in enums])
def _createMaps(self):
self._map_keys_to_ui = {}
self._map_ui_to_keys = {}
self._map_chooser_to_line_edit = {}
# The executable ui is not added to the map, it is dealt with separately
# File input outputs
self._map_keys_to_ui['tree_inbuilt'] = self._ui.checkBoxInBuiltTree
self._map_ui_to_keys[self._ui.checkBoxInBuiltTree] = 'tree_inbuilt'
self._map_keys_to_ui['tree_ipelem'] = self._ui.lineEditIpElem
self._map_ui_to_keys[self._ui.lineEditIpElem] = 'tree_ipelem'
self._map_keys_to_ui['tree_ipnode'] = self._ui.lineEditIpNode
self._map_ui_to_keys[self._ui.lineEditIpNode] = 'tree_ipnode'
self._map_keys_to_ui['tree_ipfield'] = self._ui.lineEditIpField
self._map_ui_to_keys[self._ui.lineEditIpField] = 'tree_ipfield'
self._map_keys_to_ui['tree_ipmesh'] = self._ui.lineEditIpMesh
self._map_ui_to_keys[self._ui.lineEditIpMesh] = 'tree_ipmesh'
self._map_keys_to_ui['flow_inbuilt'] = self._ui.checkBoxInBuiltFlow
self._map_ui_to_keys[self._ui.checkBoxInBuiltFlow] = 'flow_inbuilt'
self._map_keys_to_ui['flow_exelem'] = self._ui.lineEditFlow
self._map_ui_to_keys[self._ui.lineEditFlow] = 'flow_exelem'
self._map_keys_to_ui['terminal_exnode'] = self._ui.lineEditTerminalExNode
self._map_ui_to_keys[self._ui.lineEditTerminalExNode] = 'terminal_exnode'
self._map_keys_to_ui['tree_exnode'] = self._ui.lineEditTreeExNode
self._map_ui_to_keys[self._ui.lineEditTreeExNode] = 'tree_exnode'
self._map_keys_to_ui['tree_exelem'] = self._ui.lineEditTreeExElem
self._map_ui_to_keys[self._ui.lineEditTreeExElem] = 'tree_exelem'
self._map_keys_to_ui['ventilation_exelem'] = self._ui.lineEditVentilationExElem
self._map_ui_to_keys[self._ui.lineEditVentilationExElem] = 'ventilation_exelem'
self._map_keys_to_ui['radius_exelem'] = self._ui.lineEditRadiusExElem
self._map_ui_to_keys[self._ui.lineEditRadiusExElem] = 'radius_exelem'
# Main parameters
self._map_keys_to_ui['dt'] = self._ui.doubleSpinBoxTimeStep
self._map_ui_to_keys[self._ui.doubleSpinBoxTimeStep] = 'dt'
self._map_keys_to_ui['num_itns'] = self._ui.spinBoxNumberOfIterations
self._map_ui_to_keys[self._ui.spinBoxNumberOfIterations] = 'num_itns'
self._map_keys_to_ui['num_brths'] = self._ui.spinBoxNumberOfBreaths
self._map_ui_to_keys[self._ui.spinBoxNumberOfBreaths] = 'num_brths'
self._map_keys_to_ui['err_tol'] = self._ui.doubleSpinBoxErrorTolerance
self._map_ui_to_keys[self._ui.doubleSpinBoxErrorTolerance] = 'err_tol'
# Flow parameters
self._map_keys_to_ui['FRC'] = self._ui.doubleSpinBoxFRC
self._map_ui_to_keys[self._ui.doubleSpinBoxFRC] = 'FRC'
self._map_keys_to_ui['constrict'] = self._ui.doubleSpinBoxConstrict
self._map_ui_to_keys[self._ui.doubleSpinBoxConstrict] = 'constrict'
self._map_keys_to_ui['T_interval'] = self._ui.doubleSpinBoxTInterval
self._map_ui_to_keys[self._ui.doubleSpinBoxTInterval] = 'T_interval'
self._map_keys_to_ui['Gdirn'] = self._ui.spinBoxGdirn
self._map_ui_to_keys[self._ui.spinBoxGdirn] = 'Gdirn'
self._map_keys_to_ui['press_in'] = self._ui.doubleSpinBoxPressIn
self._map_ui_to_keys[self._ui.doubleSpinBoxPressIn] = 'press_in'
self._map_keys_to_ui['COV'] = self._ui.doubleSpinBoxCOV
self._map_ui_to_keys[self._ui.doubleSpinBoxCOV] = 'COV'
self._map_keys_to_ui['RMaxMean'] = self._ui.doubleSpinBoxRMaxMean
self._map_ui_to_keys[self._ui.doubleSpinBoxRMaxMean] = 'RMaxMean'
self._map_keys_to_ui['RMinMean'] = self._ui.doubleSpinBoxRMinMean
self._map_ui_to_keys[self._ui.doubleSpinBoxRMinMean] = 'RMinMean'
self._map_keys_to_ui['i_to_e_ratio'] = self._ui.doubleSpinBoxIERatio
self._map_ui_to_keys[self._ui.doubleSpinBoxIERatio] = 'i_to_e_ratio'
self._map_keys_to_ui['refvol'] = self._ui.doubleSpinBoxRefVolume
self._map_ui_to_keys[self._ui.doubleSpinBoxRefVolume] = 'refvol'
self._map_keys_to_ui['volume_target'] = self._ui.doubleSpinBoxVolumeTarget
self._map_ui_to_keys[self._ui.doubleSpinBoxVolumeTarget] = 'volume_target'
self._map_keys_to_ui['pmus_step'] = self._ui.doubleSpinBoxPMusStep
self._map_ui_to_keys[self._ui.doubleSpinBoxPMusStep] = 'pmus_step'
self._map_keys_to_ui['expiration_type'] = self._ui.comboBoxExpirationType
self._map_ui_to_keys[self._ui.comboBoxExpirationType] = 'expiration_type'
self._map_keys_to_ui['chest_wall_compliance'] = self._ui.doubleSpinBoxChestWallCompliance
self._map_ui_to_keys[self._ui.doubleSpinBoxChestWallCompliance] = 'chest_wall_compliance'
# Chooser button buddies
self._map_chooser_to_line_edit[self._ui.pushButtonChooseExecutable] = self._ui.lineEditExecutable
self._map_chooser_to_line_edit[self._ui.pushButtonChooseTreeExElem] = self._ui.lineEditTreeExElem
self._map_chooser_to_line_edit[self._ui.pushButtonChooseTreeExNode] = self._ui.lineEditTreeExNode
self._map_chooser_to_line_edit[self._ui.pushButtonChooseFlow] = self._ui.lineEditFlow
self._map_chooser_to_line_edit[self._ui.pushButtonChooseIpElem] = self._ui.lineEditIpElem
self._map_chooser_to_line_edit[self._ui.pushButtonChooseIpField] = self._ui.lineEditIpField
self._map_chooser_to_line_edit[self._ui.pushButtonChooseIpNode] = self._ui.lineEditIpNode
self._map_chooser_to_line_edit[self._ui.pushButtonChooseIpMesh] = self._ui.lineEditIpMesh
self._map_chooser_to_line_edit[self._ui.pushButtonChooseTerminalExNode] = self._ui.lineEditTerminalExNode
self._map_chooser_to_line_edit[self._ui.pushButtonChooseVentilationExElem] = self._ui.lineEditVentilationExElem
self._map_chooser_to_line_edit[self._ui.pushButtonChooseRadiusExElem] = self._ui.lineEditRadiusExElem
def _makeConnections(self):
self._ui.pushButtonChooseExecutable.clicked.connect(self._executableChooserClicked)
self._ui.pushButtonChooseTreeExElem.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseTreeExNode.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseFlow.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseIpElem.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseIpField.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseIpNode.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseIpMesh.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseTerminalExNode.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseVentilationExElem.clicked.connect(self._chooserClicked)
self._ui.pushButtonChooseRadiusExElem.clicked.connect(self._chooserClicked)
self._ui.checkBoxInBuiltExecutable.clicked.connect(self._inBuiltExecutableClicked)
self._ui.lineEditExecutable.editingFinished.connect(self._executableLocationChanged)
self._ui.checkBoxInBuiltFlow.clicked.connect(self._inBuiltFlowClicked)
self._ui.checkBoxInBuiltTree.clicked.connect(self._inBuiltTreeClicked)
# Main parameters
self._ui.doubleSpinBoxTimeStep.valueChanged.connect(self._updateMainParameterValue)
self._ui.spinBoxNumberOfIterations.valueChanged.connect(self._updateMainParameterValue)
self._ui.spinBoxNumberOfBreaths.valueChanged.connect(self._updateMainParameterValue)
self._ui.doubleSpinBoxErrorTolerance.valueChanged.connect(self._updateMainParameterValue)
# Flow parameters
self._ui.doubleSpinBoxFRC.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxConstrict.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxTInterval.valueChanged.connect(self._updateFlowParameterValue)
self._ui.spinBoxGdirn.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxPressIn.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxCOV.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxRMaxMean.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxRMinMean.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxIERatio.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxRefVolume.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxVolumeTarget.valueChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxPMusStep.valueChanged.connect(self._updateFlowParameterValue)
self._ui.comboBoxExpirationType.currentIndexChanged.connect(self._updateFlowParameterValue)
self._ui.doubleSpinBoxChestWallCompliance.valueChanged.connect(self._updateFlowParameterValue)
def _inBuiltExecutableClicked(self):
state = self._ui.checkBoxInBuiltExecutable.isChecked()
self._ui.lineEditExecutable.setEnabled(not state)
self._ui.pushButtonChooseExecutable.setEnabled(not state)
if state:
self._ui.lineEditExecutable.clear()
self._problem.setInBuiltExecutable(getExecutableForPlatform())
def _inBuiltFlowClicked(self):
state = self._ui.checkBoxInBuiltFlow.isChecked()
key = self._map_ui_to_keys[self._ui.checkBoxInBuiltFlow]
self._problem.updateFileInputOutputs({key: state})
self._ui.lineEditFlow.setEnabled(not state)
self._ui.pushButtonChooseFlow.setEnabled(not state)
def _inBuiltTreeClicked(self):
state = self._ui.checkBoxInBuiltTree.isChecked()
key = self._map_ui_to_keys[self._ui.checkBoxInBuiltTree]
self._problem.updateFileInputOutputs({key: state})
self._ui.lineEditIpElem.setEnabled(not state)
self._ui.pushButtonChooseIpElem.setEnabled(not state)
self._ui.lineEditIpField.setEnabled(not state)
self._ui.pushButtonChooseIpField.setEnabled(not state)
self._ui.lineEditIpNode.setEnabled(not state)
self._ui.pushButtonChooseIpNode.setEnabled(not state)
def _isEnumParameter(self, parameter):
enum_parameters = ['expiration_type']
return parameter in enum_parameters
def _isCheckBox(self, key):
check_boxes = ['tree_inbuilt', 'flow_inbuilt']
return key in check_boxes
def _isOutputFile(self, key):
output_files = ['terminal_exnode', 'tree_exnode', 'tree_exelem', 'ventilation_exelem', 'radius_exelem']
return key in output_files
def _updateExecutableParameters(self):
state = self._problem.isInBuiltExecutable()
self._ui.checkBoxInBuiltExecutable.setChecked(state)
self._inBuiltExecutableClicked()
if not state:
self._ui.lineEditExecutable.setText(self._problem.getExecutable())
def _updateFileInputOutputs(self):
p = self._problem.getFileInputOutputs()
for k in p:
ui = self._map_keys_to_ui[k]
if self._isCheckBox(k):
ui.setChecked(p[k])
else:
ui.setText(p[k])
self._inBuiltFlowClicked()
self._inBuiltTreeClicked()
def _updateMainParameters(self):
p = self._problem.getMainParameters()
for k in p:
ui = self._map_keys_to_ui[k]
ui.setValue(p[k])
def _updateFlowParameters(self):
p = self._problem.getFlowParameters()
for k in p:
ui = self._map_keys_to_ui[k]
if self._isEnumParameter(k):
ui.setCurrentIndex(self._map_string_to_expiration_index[p[k]])
else:
ui.setValue(p[k])
def _executableLocationChanged(self):
self._problem.setExecutable(self._ui.lineEditExecutable.text())
def _executableChooserClicked(self):
sender = self.sender()
line_edit = self._map_chooser_to_line_edit[sender]
text = line_edit.text()
location = os.path.dirname(text) if text else self._location if self._location is not None else os.path.expanduser("~")
filename, _ = QtGui.QFileDialog.getOpenFileName(self, caption='Choose executable ...', dir=location,
filter="Executable (*.exe *);;All (*.* *)")
if filename:
self._location = os.path.dirname(filename)
self._problem.setExecutable(filename)
line_edit.setText(filename)
def _chooserClicked(self):
sender = self.sender()
line_edit = self._map_chooser_to_line_edit[sender]
key = self._map_ui_to_keys[line_edit]
text = line_edit.text()
location = os.path.dirname(text) if text else self._location if self._location is not None else os.path.expanduser("~")
if self._isOutputFile(key):
filename, _ = QtGui.QFileDialog.getSaveFileName(self, caption='Choose file ...', dir=location,
filter="Iron, Zinc Files (*.exnode *.exelem);;All (*.* *)")
else:
filename, _ = QtGui.QFileDialog.getOpenFileName(self, caption='Choose file ...', dir=location,
filter="Iron, Zinc Files (*.exnode *.exelem *.ipelem *.ipnode *.ipfiel);;All (*.* *)")
if filename:
self._location = os.path.dirname(filename)
self._problem.updateFileInputOutputs({key: filename})
line_edit.setText(filename)
def _updateMainParameterValue(self):
sender = self.sender()
key = self._map_ui_to_keys[sender]
self._problem.updateMainParameters({key: sender.value()})
def _updateFlowParameterValue(self):
sender = self.sender()
key = self._map_ui_to_keys[sender]
if self._isEnumParameter(key):
self._problem.updateFlowParameters({key: self._map_expiration_index_to_string[sender.currentIndex()]})
else:
self._problem.updateFlowParameters({key: sender.value()})
def serialize(self):
d = {}
d['location'] = self._location
d['active_tab'] = self._ui.tabWidget.currentIndex()
d['problem'] = self._problem.serialize()
return json.dumps(d)
def deserialize(self, string):
d = json.loads(string)
self._location = d['location'] if 'location' in d else None
self._ui.tabWidget.setCurrentIndex(d['active_tab'] if 'active_tab' in d else 2)
if 'problem' in d:
self._problem.deserialize(d['problem'])
self.updateUi()
def updateUi(self):
self._updateExecutableParameters()
self._updateFileInputOutputs()
self._updateMainParameters()
self._updateFlowParameters()
| {
"content_hash": "6cf74eac10395d5ef055509838e88c71",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 146,
"avg_line_length": 52.040247678018574,
"alnum_prop": 0.6770182640252246,
"repo_name": "alan-wu/neon",
"id": "9fae6ae2e2bc5aca2c605bd4dfcc77fcf55ac0f6",
"size": "16809",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/opencmiss/neon/ui/problems/ventilation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "NSIS",
"bytes": "29534"
},
{
"name": "Python",
"bytes": "1449198"
}
],
"symlink_target": ""
} |
from conans.client.output import Color
from conans.model.ref import PackageReference
from conans.model.ref import ConanFileReference
from collections import OrderedDict
class Printer(object):
""" Print some specific information """
INDENT_COLOR = {0: Color.BRIGHT_CYAN,
1: Color.BRIGHT_RED,
2: Color.BRIGHT_GREEN,
3: Color.BRIGHT_YELLOW,
4: Color.BRIGHT_MAGENTA}
INDENT_SPACES = 4
def __init__(self, out):
self._out = out
def print_graph(self, deps_graph, registry):
""" Simple pretty printing of a deps graph, can be improved
with options, info like licenses, etc
"""
self._out.writeln("Requirements", Color.BRIGHT_YELLOW)
for node in sorted(deps_graph.nodes):
ref, _ = node
if not ref:
continue
remote = registry.get_ref(ref)
from_text = "from local" if not remote else "from %s" % remote.name
self._out.writeln(" %s %s" % (repr(ref), from_text), Color.BRIGHT_CYAN)
self._out.writeln("Packages", Color.BRIGHT_YELLOW)
for node in sorted(deps_graph.nodes):
ref, conanfile = node
if not ref:
continue
ref = PackageReference(ref, conanfile.info.package_id())
self._out.writeln(" %s" % repr(ref), Color.BRIGHT_CYAN)
self._out.writeln("")
def print_info(self, deps_graph, project_reference, _info, registry, graph_updates_info=None,
remote=None):
""" Print the dependency information for a conan file
Attributes:
deps_graph: the dependency graph of conan file references to print
placeholder_reference: the conan file reference that represents the conan
file for a project on the path. This may be None,
in which case the project itself will not be part
of the printed dependencies.
remote: Remote specified in install command.
Could be different from the registry one.
"""
def show(field):
if _info is True:
return True
if field in [s.lower() for s in _info.split(",")]:
return True
return False
graph_updates_info = graph_updates_info or {}
for node in sorted(deps_graph.nodes):
ref, conan = node
if not ref:
# ref is only None iff info is being printed for a project directory, and
# not a passed in reference
if project_reference is None:
continue
else:
ref = project_reference
self._out.writeln("%s" % str(ref), Color.BRIGHT_CYAN)
reg_remote = registry.get_ref(ref)
# Excludes PROJECT fake reference
remote_name = remote
if reg_remote and not remote:
remote_name = reg_remote.name
if isinstance(ref, ConanFileReference) and show("remote"):
if reg_remote:
self._out.writeln(" Remote: %s=%s" % (reg_remote.name, reg_remote.url),
Color.BRIGHT_GREEN)
else:
self._out.writeln(" Remote: None", Color.BRIGHT_GREEN)
url = getattr(conan, "url", None)
license_ = getattr(conan, "license", None)
author = getattr(conan, "author", None)
if url and show("url"):
self._out.writeln(" URL: %s" % url, Color.BRIGHT_GREEN)
if license_ and show("license"):
if isinstance(license_, (list, tuple, set)):
self._out.writeln(" Licenses: %s" % ", ".join(license_), Color.BRIGHT_GREEN)
else:
self._out.writeln(" License: %s" % license_, Color.BRIGHT_GREEN)
if author and show("author"):
self._out.writeln(" Author: %s" % author, Color.BRIGHT_GREEN)
if isinstance(ref, ConanFileReference) and show("update"): # Excludes PROJECT
update = graph_updates_info.get(ref)
update_messages = {
None: ("Version not checked", Color.WHITE),
0: ("You have the latest version (%s)" % remote_name, Color.BRIGHT_GREEN),
1: ("There is a newer version (%s)" % remote_name, Color.BRIGHT_YELLOW),
-1: ("The local file is newer than remote's one (%s)" % remote_name,
Color.BRIGHT_RED)
}
self._out.writeln(" Updates: %s" % update_messages[update][0],
update_messages[update][1])
dependants = deps_graph.inverse_neighbors(node)
if isinstance(ref, ConanFileReference) and show("required"): # Excludes
self._out.writeln(" Required by:", Color.BRIGHT_GREEN)
for d in dependants:
ref = repr(d.conan_ref) if d.conan_ref else project_reference
self._out.writeln(" %s" % ref, Color.BRIGHT_YELLOW)
if show("requires"):
depends = deps_graph.neighbors(node)
if depends:
self._out.writeln(" Requires:", Color.BRIGHT_GREEN)
for d in depends:
self._out.writeln(" %s" % repr(d.conan_ref), Color.BRIGHT_YELLOW)
def print_search_recipes(self, references, pattern):
""" Print all the exported conans information
param pattern: wildcards, e.g., "opencv/*"
"""
if not references:
warn_msg = "There are no packages"
pattern_msg = " matching the %s pattern" % pattern
self._out.info(warn_msg + pattern_msg if pattern else warn_msg)
return
self._out.info("Existing package recipes:\n")
for conan_ref in sorted(references):
self._print_colored_line(str(conan_ref), indent=0)
def print_search_packages(self, packages_props, reference, packages_query):
if not packages_props:
if packages_query:
warn_msg = "There are no packages for reference '%s' matching the query '%s'" % (str(reference), packages_query)
else:
warn_msg = "There are no packages for pattern '%s'" % str(reference)
self._out.info(warn_msg)
return
self._out.info("Existing packages for recipe %s:\n" % str(reference))
# Each package
for package_id, properties in sorted(packages_props.items()):
self._print_colored_line("Package_ID", package_id, 1)
for section in ("options", "settings", "full_requires"):
attrs = properties.get(section, [])
if attrs:
section_name = {"full_requires": "requires"}.get(section, section)
self._print_colored_line("[%s]" % section_name, indent=2)
if isinstance(attrs, dict): # options, settings
attrs = OrderedDict(sorted(attrs.items()))
for key, value in attrs.items():
self._print_colored_line(key, value=value, indent=3)
elif isinstance(attrs, list): # full requires
for key in sorted(attrs):
self._print_colored_line(key, indent=3)
self._out.writeln("")
def _print_colored_line(self, text, value=None, indent=0):
""" Print a colored line depending on its indentation level
Attributes:
text: string line
split_symbol: if you want an output with different in-line colors
indent_plus: integer to add a plus indentation
"""
text = text.strip()
if not text:
return
text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE)
indent_text = ' ' * Printer.INDENT_SPACES * indent
if value is not None:
value_color = Color.BRIGHT_WHITE
self._out.write('%s%s: ' % (indent_text, text), text_color)
self._out.writeln(value, value_color)
else:
self._out.writeln('%s%s' % (indent_text, text), text_color)
| {
"content_hash": "b720b48b4fa049fab442d58287b7d8d7",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 128,
"avg_line_length": 46.391304347826086,
"alnum_prop": 0.5346766635426429,
"repo_name": "dragly/conan",
"id": "387841aa34aac7483bab50d8d752c183ccd639a0",
"size": "8536",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/client/printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1383611"
},
{
"name": "Shell",
"bytes": "1132"
}
],
"symlink_target": ""
} |
from nose.tools import *
from hawkweed.functional.logical import *
def test_all():
assert_true(all(lambda x: x > 0, range(1,100)))
assert_false(all(lambda x: x > 0, range(100,-1,-1)))
def test_any():
assert_true(any(lambda x: x > 0, [0, 0, 0, 1, 0]))
assert_false(any(lambda x: x > 0, [0, 0, 0, 0, -1]))
def test_complement():
assert_false(complement(all, lambda x: x > 0, range(1, 3)))
assert_true(complement(all, lambda x: x > 0, [0]))
def test_false():
assert_false(false())
def test_true():
assert_true(true())
def test_and():
assert_true(and_(True)(True))
assert_false(and_(False)(True))
def test_or():
assert_true(or_(True)(True))
assert_false(or_(False)(False))
| {
"content_hash": "16ac4a8b030eadd32273f04492e54a46",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6104972375690608,
"repo_name": "hellerve/hawkweed",
"id": "37c5aa16d6d2f7e998925993f666b7c28c2f7170",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_logical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "339784"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
} |
import urllib2
import base64
import urllib
from lib.multipartform import MultiPartForm
#===============================================================================
#Using urllib2 to create a package in Databank
url = "http://databank-vm1.oerc.ox.ac.uk/test/datasets"
req = urllib2.Request(url)
USER = "admin"
PASS = "test"
identifier = "TestSubmission"
auth = 'Basic ' + base64.urlsafe_b64encode("%s:%s" % (USER, PASS))
req.add_header('Authorization', auth)
req.add_header('Accept', 'application/JSON')
req.add_data(urllib.urlencode({'id': identifier}))
# To verify the method is POST
req.get_method()
ans = urllib2.urlopen(req)
ans.read()
ans.msg
ans.code
#===============================================================================
#Using urllib2 to post a file in Databank
#Add a file
form = MultiPartForm()
filename = "solrconfig.xml"
filepath = "data/unicode07.xml"
form.add_file('file', filename, fileHandle=open(filepath))
# Build the request
url2 = "http://databank-vm1.oerc.ox.ac.uk/test/datasets/TestSubmission"
req2 = urllib2.Request(url2)
auth = 'Basic ' + base64.urlsafe_b64encode("admin:test")
req2.add_header('Authorization', auth)
req2.add_header('Accept', 'application/JSON')
body = str(form)
req2.add_header('Content-type', form.get_content_type())
req2.add_header('Content-length', len(body))
req2.add_data(body)
print
print 'OUTGOING DATA:'
print req2.get_data()
ans2 = urllib2.urlopen(req2)
print
print 'SERVER RESPONSE:'
ans2.read()
| {
"content_hash": "056ff45fbaf17890c9f26887f60796f6",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 28.754716981132077,
"alnum_prop": 0.634514435695538,
"repo_name": "fcrepo4-archive/RDFDatabank",
"id": "4519576f6ec776300014718db66af93602e5e025",
"size": "1544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/using_databank_api/DatabankDemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8"
},
{
"name": "Perl",
"bytes": "442"
},
{
"name": "Python",
"bytes": "1743311"
},
{
"name": "Shell",
"bytes": "201"
}
],
"symlink_target": ""
} |
import logging
import util
log = logging.getLogger(util.LOGNAME)
_DEBUG_PREFIX = ""
def DEBUG_PUSH():
global _DEBUG_PREFIX
_DEBUG_PREFIX += " "
def DEBUG_POP():
global _DEBUG_PREFIX
_DEBUG_PREFIX = _DEBUG_PREFIX[:-2]
def DEBUG(s):
log.debug("{}{}".format(_DEBUG_PREFIX, str(s)))
def WARN(s):
log.warn("{}{}".format(_DEBUG_PREFIX, str(s)))
def ERROR(s):
log.error("{}{}".format(_DEBUG_PREFIX, str(s)))
| {
"content_hash": "068ac003949dd1e1f23a11828ba36ece",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 49,
"avg_line_length": 16.423076923076923,
"alnum_prop": 0.6252927400468384,
"repo_name": "chubbymaggie/mcsema",
"id": "9362184ed38e6a5b3ab9c2ef2e4fd5a6a8507224",
"size": "1015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/mcsema_disass/binja/debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "3325"
},
{
"name": "C",
"bytes": "12718"
},
{
"name": "C++",
"bytes": "331832"
},
{
"name": "CMake",
"bytes": "33794"
},
{
"name": "GDB",
"bytes": "6207"
},
{
"name": "Python",
"bytes": "42216"
},
{
"name": "Shell",
"bytes": "8622"
}
],
"symlink_target": ""
} |
''' CSV-to-Appliance configuration script for AppNeta 9.x appliances '''
''' Run './csv_to_appliance.py appliances.csv' to configure from CSV file '''
''' Run './csv_to_appliance.py template' to generate blank csv template '''
''' To use, put the current IP of the appliance in the dhcp_ip column, '''
''' and then populate the other columns with desired config settings. '''
''' Check 'csv_to_appliance_example.csv' for an example of how the .csv '''
''' file should be populated. '''
import json
import sys
import os
import pandas
import logging
from theappnetas.appliance import Appliance
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log.addHandler(sh)
def parse_csv(csv_file):
df = pandas.read_csv(csv_file, names=[
'dhcp_ip',
'password_old',
'password_new',
'hostname',
'ip_address',
'ip_gateway',
'ip_netmask',
'dns_servers',
'dns_search',
'ntp_servers',
'timezone',
'nis_address',
'nis_sitekey',
'nis_port',
'nis_protocol',
'nis_ssl',
'nis_relay',
'nis_tunnel'
], header=0, dtype=None)
df = df.where((pandas.notnull(df)), None)
q = []
for index, row in df.iterrows():
dhcp_ip = row['dhcp_ip']
password_old = row['password_old']
password_new = row['password_new']
hostname = row['hostname']
ip_address = row['ip_address']
ip_gateway = row['ip_gateway']
ip_netmask = row['ip_netmask']
dns_servers = row['dns_servers']
dns_search = row['dns_search']
ntp_servers = row['ntp_servers']
timezone = row['timezone']
nis_address = row['nis_address']
nis_sitekey = row['nis_sitekey']
nis_port = row['nis_port']
nis_protocol = row['nis_protocol']
nis_ssl = row['nis_ssl']
nis_relay = row['nis_relay']
nis_tunnel = row['nis_tunnel']
q.append({
'dhcp_ip': dhcp_ip,
'password_old' : password_old,
'password_new': password_new,
'hostname': hostname,
'ip_address': ip_address,
'ip_gateway': ip_gateway,
'ip_netmask': ip_netmask,
'dns_servers': dns_servers,
'dns_search': dns_search,
'ntp_servers': ntp_servers,
'timezone': timezone,
'nis_address': nis_address,
'nis_sitekey': nis_sitekey,
'nis_port': nis_port,
'nis_protocol': nis_protocol,
'nis_ssl': nis_ssl,
'nis_relay': nis_relay,
'nis_tunnel': nis_tunnel
})
return q
def create_template_file(filename):
f = open(filename,'w')
f.write('dhcp_ip,password_old,password_new,hostname,ip_address,ip_gateway,ip_netmask,dns_servers,dns_search,ntp_servers,timezone,nis_address,nis_sitekey,nis_port,nis_protocol,nis_ssl,nis_relay,nis_tunnel')
f.close()
def apply_config(appliance):
result = {}
a = Appliance(host=appliance['dhcp_ip'], username='admin', password=appliance['password_old'])
if a.put_password(appliance['password_new']):
log.info('{} - Password updated successfully'.format(appliance['hostname']))
result['password'] = True
else:
log.error('{} - Password did not update'.format(appliance['hostname']))
result['password'] = False
a = Appliance(host=appliance['dhcp_ip'], username='admin', password=appliance['password_new'])
if a.put_hostname(appliance['hostname']):
log.info('{} - Hostname updated successfully'.format(appliance['hostname']))
result['hostname'] = True
else:
log.error('{} - Hostname did not update'.format(appliance['hostname']))
result['hostname'] = False
if appliance['ip_address'] == 'dhcp':
if a.post_interface(
name='eth0',
method='dhcp'
):
log.info('{} - {} updated successfully'.format(appliance['hostname'], 'eth0'))
result['interface'] = True
else:
log.error('{} - {} did not update'.format(appliance['hostname'], 'eth0'))
result['interface'] = False
else:
if a.post_interface(
name='eth0',
method='static',
address=appliance['ip_address'],
netmask=appliance['ip_netmask'],
gateway=appliance['ip_gateway']
):
log.info('{} - {} updated successfully'.format(appliance['hostname'], 'eth0'))
result['interface'] = True
else:
log.error('{} - {} did not update'.format(appliance['hostname'], 'eth0'))
result['interface'] = False
if a.post_dns_servers(
interface='eth0',
servers=appliance['dns_servers']
):
log.info('{} - {} dns servers updated successfully'.format(appliance['hostname'], 'eth0'))
result['dns_servers'] = True
else:
log.error('{} - {} dns servers did not update'.format(appliance['hostname'], 'eth0'))
result['dns_servers'] = False
if a.post_dns_search(
interface='eth0',
servers=appliance['dns_search']
):
log.info('{} - {} dns search domain(s) updated successfully'.format(appliance['hostname'], 'eth0'))
result['dns_search'] = True
else:
log.error('{} - {} dns search domain(s) did not update'.format(appliance['hostname'], 'eth0'))
result['dns_search'] = False
if a.put_ntp_servers(servers=appliance['ntp_servers']):
log.info('{} - ntp servers updated successfully'.format(appliance['hostname']))
result['ntp_servers'] = True
else:
log.error('{} - ntp servers did not update'.format(appliance['hostname']))
result['ntp_servers'] = False
if a.put_timezone(timezone=appliance['timezone']):
log.info('{} - timezone updated successfully'.format(appliance['hostname']))
result['timezone'] = True
else:
log.error('{} - timezone did not update'.format(appliance['hostname']))
result['timezone'] = False
if a.post_nis(
address=appliance['nis_address'],
site_key=appliance['nis_sitekey'],
ports=str(appliance['nis_port']),
relay_addresses=appliance['nis_relay'],
ssl=str(appliance['nis_ssl']),
protocol=appliance['nis_protocol'],
tunnel_url=appliance['nis_tunnel']):
log.info('{} - nis config applied successfully'.format(appliance['hostname']))
result['nis_config'] = True
else:
log.error('{} - nis config was not applied'.format(appliance['hostname']))
result['nis_config'] = False
if a.put_appliance(action='reboot'):
log.info('{} - appliance rebooted successfully'.format(appliance['hostname']))
result['appliance_reboot'] = True
else:
log.error('{} - appliance did not reboot'.format(appliance['hostname']))
result['appliance_reboot'] = False
return result
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1] == 'template':
if os.path.isfile('template.csv') == True:
sys.exit('\'template.csv\' already exists. Exiting.')
else:
create_template_file('template.csv')
sys.exit('\'template.csv\' created.')
else:
csv_file = sys.argv[1]
else:
print 'usage:'
print '\'./csv_to_appliance.py appliances.csv\''
print '\'./csv_to_appliance.py template\' - creates a blank csv template'
sys.exit()
appliances = parse_csv(csv_file)
results = {}
for appliance in appliances:
result = apply_config(appliance)
results[appliance['hostname']] = result
print results
| {
"content_hash": "fef1320e1decb91042ed65a433f188b0",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 206,
"avg_line_length": 32.57013574660633,
"alnum_prop": 0.6386496248958043,
"repo_name": "mikemorgan15/theappnetas",
"id": "6843ec79e70946467bf80ae45ebad0a17ae2d89a",
"size": "7217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csv_to_appliance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29639"
}
],
"symlink_target": ""
} |
"""High-level tests."""
import pytest
from httpie.compat import is_windows
from httpie.status import ExitStatus
from .fixtures import FILE_PATH_ARG, FILE_CONTENT
from .utils import http, HTTP_OK
from .utils.matching import assert_output_matches, Expect, ExpectSequence
# <https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections>
REDIRECTS_WITH_METHOD_BODY_PRESERVED = [307, 308]
def test_follow_all_redirects_shown(httpbin):
r = http('--follow', '--all', httpbin.url + '/redirect/2')
assert r.count('HTTP/1.1') == 3
assert r.count('HTTP/1.1 302 FOUND', 2)
assert HTTP_OK in r
@pytest.mark.parametrize('follow_flag', ['--follow', '-F'])
def test_follow_without_all_redirects_hidden(httpbin, follow_flag):
r = http(follow_flag, httpbin.url + '/redirect/2')
assert r.count('HTTP/1.1') == 1
assert HTTP_OK in r
@pytest.mark.xfail(True, reason="https://github.com/httpie/httpie/issues/1082")
def test_follow_output_options_used_for_redirects(httpbin):
r = http('--follow', '--print=H', httpbin.url + '/redirect/2')
assert r.count('GET /') == 1
assert HTTP_OK not in r
def test_follow_all_output_options_used_for_redirects(httpbin):
r = http('--check-status',
'--follow',
'--all',
'--print=H',
httpbin.url + '/redirect/2')
assert r.count('GET /') == 3
assert HTTP_OK not in r
#
# def test_follow_redirect_output_options(httpbin):
# r = http('--check-status',
# '--follow',
# '--all',
# '--print=h',
# '--history-print=H',
# httpbin.url + '/redirect/2')
# assert r.count('GET /') == 2
# assert 'HTTP/1.1 302 FOUND' not in r
# assert HTTP_OK in r
#
def test_max_redirects(httpbin):
r = http(
'--max-redirects=1',
'--follow',
httpbin.url + '/redirect/3',
tolerate_error_exit_status=True,
)
assert r.exit_status == ExitStatus.ERROR_TOO_MANY_REDIRECTS
@pytest.mark.skipif(is_windows, reason='occasionally fails w/ ConnectionError for no apparent reason')
@pytest.mark.parametrize('status_code', REDIRECTS_WITH_METHOD_BODY_PRESERVED)
def test_follow_redirect_with_repost(httpbin, status_code):
r = http(
'--follow',
httpbin.url + '/redirect-to',
'A:A',
'A:B',
'B:B',
f'url=={httpbin.url}/post',
f'status_code=={status_code}',
'@' + FILE_PATH_ARG,
)
assert HTTP_OK in r
assert FILE_CONTENT in r
assert r.json['headers']['A'] == 'A,B'
assert r.json['headers']['B'] == 'B'
@pytest.mark.skipif(is_windows, reason='occasionally fails w/ ConnectionError for no apparent reason')
@pytest.mark.parametrize('status_code', REDIRECTS_WITH_METHOD_BODY_PRESERVED)
def test_verbose_follow_redirect_with_repost(httpbin, status_code):
r = http(
'--follow',
'--verbose',
httpbin.url + '/redirect-to',
'A:A',
'A:B',
'B:B',
f'url=={httpbin.url}/post',
f'status_code=={status_code}',
'@' + FILE_PATH_ARG,
)
assert f'HTTP/1.1 {status_code}' in r
assert 'A: A' in r
assert 'A: B' in r
assert 'B: B' in r
assert r.count('POST /redirect-to') == 1
assert r.count('POST /post') == 1
assert r.count(FILE_CONTENT) == 3 # two requests + final response contain it
assert HTTP_OK in r
assert_output_matches(r, [
*ExpectSequence.TERMINAL_REQUEST,
Expect.RESPONSE_HEADERS,
Expect.SEPARATOR,
*ExpectSequence.TERMINAL_EXCHANGE,
])
| {
"content_hash": "7cab6834f605f5c39d777f1d49344a62",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 102,
"avg_line_length": 30.862068965517242,
"alnum_prop": 0.6044692737430167,
"repo_name": "PKRoma/httpie",
"id": "81dcb2befd4da9ebdf39a5b8372d2bfc12a875e5",
"size": "3580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_redirects.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1148"
},
{
"name": "Makefile",
"bytes": "6658"
},
{
"name": "Python",
"bytes": "571783"
},
{
"name": "Roff",
"bytes": "31811"
},
{
"name": "Shell",
"bytes": "7629"
}
],
"symlink_target": ""
} |
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-05-10
Last_modify: 2016-05-10
******************************************
'''
'''
Given an array nums, write a function to move all 0's to the end of it
while maintaining the relative order of the non-zero elements.
For example, given nums = [0, 1, 0, 3, 12], after calling your function,
nums should be [1, 3, 12, 0, 0].
Note:
You must do this in-place without making a copy of the array.
Minimize the total number of operations.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and
creating all test cases.
'''
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return
count = 0
n = len(nums)
for i in range(n):
if nums[i] != 0:
nums[count] = nums[i]
count += 1
for i in range(count, n):
nums[i] = 0
| {
"content_hash": "f702403b9e4c938a120bc324b00ea3f1",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 27.634146341463413,
"alnum_prop": 0.5463371579876434,
"repo_name": "zhlinh/leetcode",
"id": "12c4dac4c4b4442b52bf53a3d6cf007ae4413339",
"size": "1179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0283.Move Zeroes/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "478111"
}
],
"symlink_target": ""
} |
from rest_framework import generics, permissions, status
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.test import APIRequestFactory, APITestCase
# User class from django
from django.contrib.auth.models import User, UserManager
# Models and Serializers
from main.serializers import UserSerializer, TopicSerializer, FeedSerializer, PostSerializer
from main.models import Topic, Feed, Post
from django.forms.models import model_to_dict
## Transaction Management
from django.db import transaction
# Python built-ins required for tests
import time
import datetime
import pytz
import traceback
# class UserTests(APITestCase):
# @classmethod
# def setUpClass(self):
# self.user = User(username="shakespeare", password="shakespeare")
# self.user.save() #user creation isn't tested in iteration 1, it is assumed a user exists.
# self.u_id = self.user.id
# self.u_uncat_id = self.user.topics.get(name="Uncategorized").id
# self.model_u = User(username="eecummings")
# self.u = UserSerializer(self.model_u)
#
# @classmethod
# def tearDownClass(cls):
# cls.client.logout()
# cls.user.delete()
#
# def setUp(self):
# # Log the client in as the User
# self.client.login(username="shakespeare", password="shakespeare")
#
# def test_userlist_exists(self):
# # A userlist can only have 1 user in iteration 1
# """Check that UserList is alive and well"""
# response = self.client.get('/user/')
# self.assertEqual(response.status_code, 200)
# self.assertEqual(response.data, [{'id': self.u_id, 'username': u'shakespeare', 'first_name': u'', 'last_name': u'', 'topics': [self.u_uncat_id]}])
#
# def test_username_change(self):
# """Trying to change the username should fail"""
# url = '/user/'
# response = self.client.post(url, {'username':u'marlowe'}, format = 'json')
# self.assertEqual(response.status_code, 405)
#
# def test_userdetail_exists(self):
# """Check that UserDetail is accurate"""
# response = self.client.get('/user/')
# self.assertEqual(response.status_code, 200)
# self.assertItemsEqual(response.data, {'id': self.u_id, 'username':u'shakespeare', 'first_name': u'', 'last_name': u'', 'topics': [self.u_uncat_id]})
#
# # def test_user_cannot_be_created(self): #there is one user in iteration 1, so a second one cannot be created
# # """Iteration 1 should have one user, so users cannot be made"""
# # response = self.client.put('/users/', self.u.data, format='json')
# # self.assertEqual(response.status_code, 405)
#
# def test_uncategorized_exists(self):
# """Uncategorized should be a default Topic for a newly created User"""
# response = self.client.get('/topics/')
# self.assertEqual(response.status_code, 200)
# self.assertItemsEqual(response.data, [{u'id':self.u_uncat_id,'name':u'Uncategorized', 'user':self.u_id, 'feeds':[]}])
# class TopicAddTests(APITestCase):
# @classmethod
# def setUpClass(cls):
# # Create User
# cls.user = User(username="shakespeare")
# cls.user.save()
# cls.u_id = cls.user.id
#
# # Grab Uncategorized Topic
# cls.user_uncat = cls.user.topics.get(name="Uncategorized")
#
# # Create other topics
# cls.t1_m = Topic(name = "sonnets", user = cls.user)
# cls.t2_m = Topic(name = "tragedies", user = cls.user)
# cls.evil_t1_m = Topic(name = "tragedies", user=cls.user)
#
# # Turn topics into JSON objects
# cls.t1_data = model_to_dict(cls.t1_m)
# cls.t2_data = model_to_dict(cls.t2_m)
# cls.evil_t1_data = model_to_dict(cls.evil_t1_m)
#
# @classmethod
# def tearDownClass(cls):
# cls.user.delete()
#
# def test_add_topic(cls):
# """Tests that Topic can be added"""
# response = cls.client.post('/topics/create', cls.t1_data, format='json')
# cls.assertEqual(response.status_code, 201)
# t1Server = Topic.objects.get(name=cls.t1_data["name"])
# t1_id = t1Server.id
#
# response = cls.client.get('/topics/')
# cls.assertEqual(response.status_code, 200)
# # We don't care about the order the server returns things in
# cls.assertItemsEqual(response.data, [{u'id':cls.user_uncat.id,'name':u'Uncategorized', 'user':cls.u_id, 'feeds':[]},
# {u'id': t1_id, 'name': u'sonnets', 'user': cls.u_id, 'feeds': []}])
#
# response = cls.client.post('/topics/create', cls.t2_data, format = 'json')
# cls.assertEqual(response.status_code, 201)
# t2Server = Topic.objects.get(name=cls.t2_data["name"])
# t2_id = t2Server.id
#
# response = cls.client.get('/topics/')
# cls.assertEqual(response.status_code, 200)
# # We don't care about the order the server returns things in
# cls.assertItemsEqual(response.data, [{u'id':cls.user_uncat.id,'name':u'Uncategorized', 'user':cls.u_id, 'feeds':[]},
# {u'id':t1_id, 'name':u'sonnets','user':cls.u_id, 'feeds':[]},
# {u'id':t2_id, 'name':u'tragedies','user':cls.u_id, 'feeds':[]}])
# # Cleanup topics on server
# t1Server.delete()
# t2Server.delete()
#
# def test_add_repeat_topic(cls):
# """Adding a Topic with the same name as an existent Topic will fail"""
# cls.t2_m.save()
# with transaction.atomic():
# response = cls.client.post('/topics/create', cls.evil_t1_data, format='json')
# cls.assertEqual(response.status_code, 409)
# cls.t2_m.delete()
#
# class TopicTests(APITestCase):
# @classmethod
# def setUpClass(cls):
# # Create User
# cls.user = User(username="shakespeare")
# cls.user.save()
# cls.u_id = cls.user.id
#
# # Grab Uncategorized Topic
# cls.user_uncat = cls.user.topics.get(name="Uncategorized")
#
# # Create other topics
# cls.t1_m = Topic(name = "sonnets", user = cls.user)
# cls.t1_m.save()
# cls.t1_id = cls.t1_m.id
#
# cls.t2_m = Topic(name = "tragedies", user = cls.user)
# cls.t2_m.save()
# cls.t2_id = cls.t2_m.id
#
# cls.evil_t1_m = Topic(name = "tragedies", user=cls.user)
# cls.evil_t1_id = 154 # shakespeare wrote this many sonnets! <- Be more subtle Lucia, let the reader figure it out
#
# # Turn topics into JSON objects
# cls.evil_t1_data = model_to_dict(cls.evil_t1_m)
#
# @classmethod
# def tearDownClass(cls):
# cls.t1_m.delete()
# cls.t2_m.delete()
# cls.user.delete()
#
# def test_rename_topic(cls):
# """Tests that Topic can be renamed"""
# url = "/topics/%d" % (cls.t2_id,)
# response = cls.client.patch(url, {'name':u'comedies'}, format='json')
# cls.assertEqual(response.status_code, 200)
# response = cls.client.get(url)
# cls.assertEqual(response.status_code, 200)
# cls.assertEqual(response.data, {u'id':cls.t2_id, 'name':u'comedies', 'user':cls.u_id,'feeds':[]})
#
# # Set it back for further tests
# resetTopic = Topic.objects.get(name="comedies")
# resetTopic.name="tragedies"
# resetTopic.save()
#
# def test_rename_repeat_topic(cls):
# """Tests that Topic renamed with another Topic's name fails"""
# url = '/topics/%d' % (cls.t2_id,)
# response = cls.client.patch(url, {'name':u'sonnets'}, format='json')
# cls.assertEqual(response.status_code, 400)
#
# def test_rename_nameless_topic(cls):
# """A Test cannot be renamed without a name"""
# url = '/topics/%d' % (cls.t2_id,)
# response = cls.client.patch(url, {'name':u''}, format='json')
# cls.assertEqual(response.status_code, 400)
#
# def test_rename_uncategorized(cls):
# """The Uncategorized Topic cannot be renamed"""
# response = cls.client.post("/topics/rename", {"index" : cls.user_uncat.id, 'name':u'tragedies'}, format='json')
# cls.assertEqual(response.status_code, 400)
# response = cls.client.get("/topics/%d" % cls.user_uncat.id)
# cls.assertEqual(response.status_code, 200)
# cls.assertItemsEqual(response.data, {u'id':cls.user_uncat.id, 'name':u'Uncategorized', 'user':cls.u_id,'feeds':[]})
#
# def test_delete_topic(cls):
# """Tests that Topic can be deleted"""
# response = cls.client.post("/topics/delete", {"index" : cls.t2_id})
# cls.assertEqual(response.status_code, 204)
#
# response = cls.client.get('/topics/')
# cls.assertEqual(response.status_code, 200)
# cls.assertItemsEqual(response.data, [{u'id':cls.user_uncat.id,'name':u'Uncategorized', 'user':cls.u_id, 'feeds':[]}, {u'id': cls.t1_id, 'name': u'sonnets', 'user': cls.u_id, 'feeds': []}])
#
# response = cls.client.post("/topics/delete", {"index" : cls.t1_id})
# cls.assertEqual(response.status_code, 204)
#
# response = cls.client.get('/topics/')
# cls.assertEqual(response.status_code, 200)
# cls.assertItemsEqual(response.data, [{u'id':cls.user_uncat.id,'name':u'Uncategorized', 'user':cls.u_id, 'feeds':[]}])
#
# def test_delete_nonexistent_topic(cls):
# """A Topic that does not exist should fail upon attempted deletion"""
# url = '/topics/delete'
# response = cls.client.post(url, {"index" : cls.evil_t1_id})
# cls.assertEqual(response.status_code, 400)
#
#
# def test_delete_uncategorized(cls):
# """The Uncategorized Topic cannot be removed"""
# response = cls.client.post('/topics/delete', {"index" : cls.user_uncat.id})
# cls.assertEqual(response.status_code, 400)
#
# class FeedCreateTests(APITestCase):
# @classmethod
# def setUpClass(cls):
# cls.user = User(username="FeedTests")
# cls.user.save()
# cls.f1_url = "http://home.uchicago.edu/~jharriman/example-rss.xml"
# cls.f1_id = None
# cls.f1 = None
# cls.f1_post_list = [
# {
# "id": 6,
# "feedURL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
# "author": "By KATIE HAFNER",
# "category": [],
# "rights": "",
# "title": "Bracing for the Falls of an Aging Nation",
# "subtitle": "",
# "content": "As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border=\"0\" height=\"1\" src=\"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif\" width=\"1\" /><br clear=\"all\" />",
# "generator": "",
# "guid": "http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html",
# "url": "http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
# "contributor": "",
# "pubDate": "2014-11-02T13:43:10Z",
# "updated": "2014-11-02T13:43:10Z",
# "ackDate": 1415855355.56354,
# "feed": 2
# },
# {
# "id": 5,
# "feedURL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
# "author": "By LYNN VAVRECK",
# "category": ["Elections, Senate","United States Politics and Government","Elections, House of Representatives", "Voting and Voters", "Midterm Elections (2014)"],
# "rights": "",
# "title": "Midterm Calculus: The Economy Elects Presidents. Presidents Elect Congress.",
# "subtitle": "",
# "content": "While presidential elections are shaped largely by economic performance, the largest factor in midterm elections is the president.",
# "generator": "",
# "guid": "http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html",
# "url": "http://rss.nytimes.com/c/34625/f/642562/s/40134217/sc/1/l/0L0Snytimes0N0C20A140C110C0A30Cupshot0Cthe0Eeconomy0Eelects0Epresidents0Epresidents0Eelect0Econgress0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
# "contributor": "",
# "pubDate": "2014-11-02T14:00:22Z",
# "updated": "2014-11-02T14:00:22Z",
# "ackDate": 1415855355.55587,
# "feed": 2
# }]
#
# cls.f1_details = {
# "id": cls.f1_id,
# "author": "",
# "category": "",
# "contributor": "",
# "description": "US",
# "docURL": "",
# "editorAddr": "",
# "generator": "",
# "guid": "",
# "language": "en-us",
# "logo": "http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png",
# "rights": "Copyright 2014 The New York Times Company",
# "subtitle": "US",
# "title": "NYT > U.S.",
# "webmaster": "",
# "URL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
# "ttl": 5,
# "skipDays": None,
# "skipHours": None,
# "pubDate": "2014-11-02T16:13:02Z",
# "updated": "2014-11-06T01:00:31Z",
# "posts": [2,1]
# }
#
# @classmethod
# def tearDownClass(cls):
# cls.user.topics.get(name="Uncategorized").delete()
# cls.user.delete()
# # Make sure to delete the feed so we don't run into other tests
#
# def test_create_feed(cls):
# """Test that Feed can be created by URL"""
# response = cls.client.post('/feeds/create', {"url" : cls.f1_url})
# cls.assertEqual(response.status_code, 200)
#
# response = cls.client.get('/feeds/')
# cls.assertEqual(response.status_code, 200)
# cls.assertEqual(response.data, [{'id': 1, 'author': u'', 'category': u'',
# 'contributor': u'', 'description': u'US',
# 'docURL': u'http://www.nytimes.com/pages/national/index.html?partner=rss&emc=rss',
# 'editorAddr': u'', 'generator': u'', 'guid': u'',
# 'language': u'en-us',
# 'logo': u'http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png',
# 'rights': u'Copyright 2014 The New York Times Company',
# 'subtitle': u'US', 'title': u'NYT > U.S.', 'webmaster': u'',
# 'URL': u'http://home.uchicago.edu/~jharriman/example-rss.xml',
# 'ttl': 5, 'skipDays': None, 'skipHours': None,
# 'pubDate': datetime.datetime(2014, 11, 2, 16, 13, 2, tzinfo=pytz.UTC),
# 'updated': datetime.datetime(2014, 11, 6, 1, 0, 31, tzinfo=pytz.UTC),
# 'posts': [2, 1]}])
#
# #gets newly created feed object and its id
# cls.f1 = Feed.objects.get(id=response.data[0]["id"])
# cls.f1_id = cls.f1.id
# cls.f1.delete()
#
#
# class FeedTests(APITestCase):
# @classmethod
# def setUpClass(cls):
# cls.user = User(username="FeedTests")
# cls.user.save()
# cls.f1_url = "http://home.uchicago.edu/~jharriman/example-rss.xml"
# cls.f1_id = None
# cls.f1 = None
# cls.f1_post_list = [
# {
# "feedURL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
# "author": u"By KATIE HAFNER",
# "category": [],
# "rights": u"",
# "title": u"Bracing for the Falls of an Aging Nation",
# "subtitle": u"",
# "content": u"As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border=\"0\" height=\"1\" src=\"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif\" width=\"1\" /><br clear=\"all\" />",
# "generator": u"",
# "guid": u"http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html",
# "url": u"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
# "contributor": u"",
# "pubDate": u"2014-11-02T13:43:10Z",
# "updated": u"2014-11-02T13:43:10Z",
# "ackDate": 1415855355.56354,
# "feed": 2
# },
# {
# "feedURL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
# "author": u"By LYNN VAVRECK",
# "category": ["Elections, Senate","United States Politics and Government","Elections, House of Representatives", "Voting and Voters", "Midterm Elections (2014)"],
# "rights": u"",
# "title": u"Midterm Calculus: The Economy Elects Presidents. Presidents Elect Congress.",
# "subtitle": u"",
# "content": u"While presidential elections are shaped largely by economic performance, the largest factor in midterm elections is the president.",
# "generator": u"",
# "guid": u"http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html",
# "url": u"http://rss.nytimes.com/c/34625/f/642562/s/40134217/sc/1/l/0L0Snytimes0N0C20A140C110C0A30Cupshot0Cthe0Eeconomy0Eelects0Epresidents0Epresidents0Eelect0Econgress0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
# "contributor": u"",
# "pubDate": u"2014-11-02T14:00:22Z",
# "updated": u"2014-11-02T14:00:22Z",
# "ackDate": 1415855355.55587,
# "feed": 2
# }]
#
# cls.f1 = Feed.createByURL(cls.f1_url)
# cls.p1 = Post.objects.get(guid="http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html")
# cls.p2 = Post.objects.get(guid="http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html")
# cls.f1_details = {
# "id": cls.f1_id,
# "author": u"",
# "category": u"",
# "contributor": u"",
# "description": u"US",
# "docURL": u"",
# "editorAddr": u"",
# "generator": u"",
# "guid": u"",
# "language": u"en-us",
# "logo": u"http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png",
# "rights": u"Copyright 2014 The New York Times Company",
# "subtitle": u"US",
# "title": u"NYT > U.S.",
# "webmaster": u"",
# "URL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
# "ttl": 5,
# "skipDays": None,
# "skipHours": None,
# "pubDate" : datetime.datetime(2014, 11, 2, 16, 13, 2, tzinfo=pytz.UTC),
# "updated": datetime.datetime(2014, 11, 6, 1, 0, 31, tzinfo=pytz.UTC),
# "posts": [cls.p1.id,cls.p2.id]
# }
# cls.f1_id = cls.f1.id
#
# @classmethod
# def tearDownClass(cls):
# cls.user.topics.get(name="Uncategorized").delete()
# cls.user.delete()
# cls.f1.delete()
# # Make sure to delete the feed so we don't run into other tests
#
# def test_feed_detail_exists(cls):
# """Test accuracy of feed details"""
# response = cls.client.get("/feeds/%d" % (cls.f1_id, ))
# cls.assertEqual(response.status_code, 200)
# cls.assertItemsEqual(response.data, cls.f1_details)
#
# def test_post_list_exists(cls):
# """Test accuracy of post list"""
# response = cls.client.get("/feeds/%d/posts" % (cls.f1_id, ))
# cls.assertEqual(response.status_code, 200)
# # Delete the ids, since they are added by the server and not really relevant to checking correctness
# for post in response.data:
# del post["id"]
# for res, exp in response.data, cls.f1_post_list:
# cls.assertItemsEqual(res, exp)
#
# def test_delete_feed(cls):
# """Test feed deletion"""
# response = cls.client.delete("/feeds/%d" % (cls.f1_id,))
# cls.assertEqual(response.status_code, 204)
# response = cls.client.get("/feeds/")
# cls.assertEqual(response.status_code, 200)
# cls.assertEqual(response.data, [])
#
# class PostTests(APITestCase):
# @classmethod
# def setUpClass(cls):
# cls.f1 = Feed.createByURL("http://home.uchicago.edu/~jharriman/example-rss.xml")
# cls.f1.save()
# cls.f1_id = cls.f1.id
# cls.p1_id = cls.f1.posts.all()[0].id
# cls.p1_data = {
# u'id': cls.p1_id,
# 'feedURL': u'http://www.nytimes.com/services/xml/rss/nyt/US.xml',
# 'author': u'By KATIE HAFNER',
# 'category': [],
# 'rights': u'',
# 'title': u'Bracing for the Falls of an Aging Nation',
# 'subtitle': u'',
# 'content': u'As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border="0" height="1" src="http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif" width="1" /><br clear="all" />',
# 'generator': u'',
# 'guid': u'http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html',
# 'url': u'http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm',
# 'contributor': u'',
# 'pubDate': datetime.datetime(2014, 11, 2, 13, 43, 10, tzinfo=pytz.UTC),
# 'updated': datetime.datetime(2014, 11, 2, 13, 43, 10, tzinfo=pytz.UTC),
# 'ackDate': 1415858199.31228,
# 'feed': cls.f1_id
# }
#
# @classmethod
# def tearDownClass(cls):
# cls.f1.delete()
#
# def test_post_detail_exists(cls):
# """Test accuracy of post"""
# response = cls.client.get('/posts/%d' % (cls.p1_id, ))
# cls.assertEqual(response.status_code, 200)
# cls.assertItemsEqual(response.data, cls.p1_data)
| {
"content_hash": "94085b5cd3e8169ef88a2a1da43283cc",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 382,
"avg_line_length": 49.552016985138,
"alnum_prop": 0.5740177385492095,
"repo_name": "CombustibleLemons/rss-reader",
"id": "0796440dd8207484962fd48c1e47f068223b5c56",
"size": "23356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rss_reader/main/tests/tests_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6483"
},
{
"name": "JavaScript",
"bytes": "91192"
},
{
"name": "Python",
"bytes": "154076"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
import click
from do_cli.contexts import CTX
from do_cli.formatters import format_json
from do_cli.settings import get_env
DO_VARS = [
'api_key', 'client_id', 'api_token', 'ssh_key_id', 'size_id', 'region_id',
'image_id', 'wait_timeout', 'redis_host', 'redis_port', 'redis_db'
]
@click.command('env')
@CTX
def cli(ctx):
"""
Display DigitalOcean environment variables
"""
envdict = dict()
for varname in DO_VARS:
envdict['do_{}'.format(varname)] = get_env(varname)
envdict['ctx'] = ctx.all()
click.echo(format_json(envdict))
if ctx.verbose:
click.echo('---- cmd_env done ----')
| {
"content_hash": "049ef89b777e88a1cca967bb667cf68a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.6203125,
"repo_name": "meganlkm/do-cli",
"id": "82e4fa4afcc8334a5c0252248f7f5a268dcdaa74",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "do_cli/commands/cmd_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27071"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
} |
"""
Technical Analysis Factors
--------------------------
"""
from __future__ import division
from numbers import Number
from numpy import (
abs,
arange,
average,
clip,
diff,
dstack,
exp,
fmax,
full,
inf,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded, expect_types
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nansum,
nanmin,
)
from zipline.utils.numpy_utils import (
float64_dtype,
ignore_nanwarnings,
rolling_window,
)
from .factor import CustomFactor
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [USEquityPricing.close]
"""
inputs = [USEquityPricing.close]
window_safe = True
def _validate(self):
super(Returns, self)._validate()
if self.window_length < 2:
raise ValueError(
"'Returns' expected a window length of at least 2, but was "
"given {window_length}. For daily returns, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [USEquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (USEquityPricing.close,)
window_safe = True
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class SimpleMovingAverage(CustomFactor, SingleInputMixin):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = (USEquityPricing.close, USEquityPricing.volume)
class MaxDrawdown(CustomFactor, SingleInputMixin):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = [USEquityPricing.close, USEquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nansum(close * volume, axis=0) / len(close)
def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ('decay_rate',)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[USEquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[USEquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[USEquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:func:`pandas.ewma`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=exponential_weights(len(data), decay_rate),
)
class LinearWeightedMovingAverage(CustomFactor, SingleInputMixin):
"""
Weighted Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
ndays = data.shape[0]
# Initialize weights array
weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1)
# Compute normalizer
normalizer = (ndays * (ndays + 1)) / 2
# Weight the data
weighted_data = data * weights
# Compute weighted averages
out[:] = nansum(weighted_data, axis=0) / normalizer
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.ewmstd`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = exponential_weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = (np_sum(weights) ** 2)
bias_correction = (
squared_weight_sum / (squared_weight_sum - np_sum(weights ** 2))
)
out[:] = sqrt(variance * bias_correction)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (USEquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator # noqa
**Defaults Inputs:** USEquityPricing.low, USEquityPricing.high
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
"""
inputs = (USEquityPricing.low, USEquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data: `zipline.pipeline.data.USEquityPricing.close`
:data: `zipline.pipeline.data.USEquityPricing.low`
:data: `zipline.pipeline.data.USEquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (USEquityPricing.close, USEquityPricing.low, USEquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud # noqa
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
"""
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (USEquityPricing.high, USEquityPricing.low, USEquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
USEquityPricing.high,
USEquityPricing.low,
USEquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (USEquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
class AnnualizedVolatility(CustomFactor):
"""
Volatility. The degree of variation of a series over time as measured by
the standard deviation of daily returns.
https://en.wikipedia.org/wiki/Volatility_(finance)
**Default Inputs:** :data:`zipline.pipeline.factors.Returns(window_length=2)` # noqa
Parameters
----------
annualization_factor : float, optional
The number of time units per year. Defaults is 252, the number of NYSE
trading days in a normal year.
"""
inputs = [Returns(window_length=2)]
params = {'annualization_factor': 252.0}
window_length = 252
def compute(self, today, assets, out, returns, annualization_factor):
out[:] = nanstd(returns, axis=0) * (annualization_factor ** .5)
# Convenience aliases.
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev
MACDSignal = MovingAverageConvergenceDivergenceSignal
| {
"content_hash": "83dc8cf4f24b4915b8f744220769e5f1",
"timestamp": "",
"source": "github",
"line_count": 813,
"max_line_length": 131,
"avg_line_length": 29.731857318573187,
"alnum_prop": 0.5767416845937449,
"repo_name": "alphaBenj/zipline",
"id": "c5fe6804b7aaf14e7806bd7f3e0bd73191a3197a",
"size": "24172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/pipeline/factors/technical.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1317560"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
} |
r"""A library of utilities."""
import os
import re
import numpy as np
import soundfile as sf
def read_wav(wav_f, always_2d=False):
data, samplerate = sf.read(wav_f, always_2d=always_2d)
return data, samplerate
# Use subtype = 'FLOAT' to write float wavs.
def write_wav(wav_f, wav_data, samplerate, subtype='PCM_16'):
sf.write(wav_f, wav_data, samplerate, format='WAV', subtype=subtype)
def make_example_dict_from_folder(
folder_sources, subset='all', ss_regex=re.compile(r'example.*_sources'),
pattern='_sources', subfolder_events=('background', 'foreground')):
"""Returns a dictionary which maps subfolder -> example -> source wavs list.
Returns a hierarchical dict of relative source file paths when given a
folder produced by scaper.
Args:
folder_sources: Main path to a sources folder which contains
train validation eval subfolders.
subset: A subdirectory name or 'all' for all subdirectories.
ss_regex: A regex that matches source folder names.
pattern: The pattern that is assumed to be added after the base filename
of the mixed file to get the source folder name.
subfolder_events: Source/event subfolders under source folder, if any.
Returns:
A hierarchical dictionary as described above.
"""
if subset == 'all':
subfolders = ['train', 'validation', 'eval']
else:
subfolders = [subset]
sources_for_mix = {}
for subfolder in subfolders:
sources_for_mix[subfolder] = {}
src_sub = os.path.join(folder_sources, subfolder)
if os.path.isdir(src_sub):
src_entries = os.listdir(src_sub)
src_selected = sorted(list(filter(ss_regex.search, src_entries)))
for src_example in src_selected:
src_example_base = src_example.rstrip(pattern)
src_example_wav = src_example_base + '.wav'
if not os.path.isfile(os.path.join(src_sub, src_example_wav)):
raise ValueError('In {}, no mixed file {} but there is a folder '
'of sources {}'.format(
subfolder, src_example_wav, src_example))
src_example_rel = os.path.join(subfolder, src_example_wav)
sources_for_mix[subfolder][src_example_rel] = []
if subfolder_events is not None:
for ex_sub in subfolder_events:
src_wav_dir = os.path.join(src_sub, src_example, ex_sub)
if os.path.isdir(src_wav_dir):
src_wavs = sorted(list(filter(re.compile(r'.*\.wav').search,
os.listdir(src_wav_dir))))
for src_wav in src_wavs:
src_wav_f = os.path.join(src_wav_dir, src_wav)
src_wav_f_rel = os.path.relpath(src_wav_f, folder_sources)
sources_for_mix[subfolder][src_example_rel].append(
src_wav_f_rel)
else:
src_wav_dir = os.path.join(src_sub, src_example)
if os.path.isdir(src_wav_dir):
src_wavs = sorted(list(filter(re.compile(r'.*\.wav').search,
os.listdir(src_wav_dir))))
for src_wav in src_wavs:
src_wav_f = os.path.join(src_wav_dir, src_wav)
src_wav_f_rel = os.path.relpath(src_wav_f, folder_sources)
sources_for_mix[subfolder][src_example_rel].append(src_wav_f_rel)
return sources_for_mix
def make_example_list_from_folder(
folder_sources, subset='all', ss_regex=re.compile(r'example.*_sources'),
pattern='_sources', subfolder_events=('background', 'foreground')):
"""Makes a tab separated list of examples from a top folder."""
example_dict = make_example_dict_from_folder(
folder_sources, subset=subset, ss_regex=ss_regex, pattern=pattern,
subfolder_events=subfolder_events)
example_list = []
for subset in example_dict:
for example in example_dict[subset]:
example_list.append('\t'.join([example] + example_dict[subset][example]))
return example_list
def check_and_correct_example(example, root_dir,
check_length, fix_length,
check_mix, fix_mix,
sample_rate=16000, duration=10.0,
chat=False):
"""Checks and possibly corrects a scaper produced example."""
# Earlier versions of scaper had a tendency to make mistakes every
# once in a while.
# This has most likely been fixed in the latest scaper release, at least
# for the parameter settings we are using, but this test and correction
# can serve to catch failures that may be introduced by using the wrong
# scaper version, or by using parameters in scaper that do not maintain
# mixture consistency. For example, at the time of this coding,
# using scaper reverb breaks mixture consistency.
# Enforce dependencies between flags.
if fix_mix:
check_mix = True
if check_mix:
fix_length = True
if fix_length:
check_length = True
length_problem = 0
fixed_length = 0
mix_problem = 0
fixed_mix = 0
files = example.split('\t')
mixfile = files[0]
if chat:
print('Checking {}'.format(mixfile))
components = files[1:]
def resize_audio(audio, length):
in_length = audio.shape[0]
new_audio = np.zeros((length, audio.shape[1]), dtype=audio.dtype)
new_audio[0:min(length, in_length), :] = \
audio[0:min(length, in_length), :]
return new_audio
expected_samples = int(duration * sample_rate)
if check_length:
for myfile in files:
file_abs = os.path.join(root_dir, myfile)
file_info = sf.info(file_abs)
num_samples = int(file_info.duration * file_info.samplerate)
if num_samples != expected_samples:
length_problem += 1
print('Warning: scaper output on {:s} is {:d} samples; '
'expected {:d}'.format(file_abs, num_samples, expected_samples))
audio, _ = read_wav(file_abs, always_2d=True)
num_samples, num_channels = audio.shape
audio = resize_audio(audio, expected_samples)
if fix_length:
# rewrite corrected source
print('Adjusting length of {:s}'.format(file_abs))
write_wav(file_abs, audio, sample_rate, subtype=file_info.subtype)
fixed_length += 1
def check_mixture(mixed_data, remixed_data, mixfile):
if not np.allclose(mixed_data, remixed_data, rtol=1e-4, atol=1e-5):
mixed_norm = np.linalg.norm(mixed_data)
err_norm = np.linalg.norm(mixed_data - remixed_data)
normalized_err = err_norm / mixed_norm
print('WARNING: Mismatched mixed data found {}. '
'Normalized error {}, mixed signal norm {}'.format(
mixfile, normalized_err, mixed_norm))
return False
return True
if check_mix:
mixfile_abs = os.path.join(root_dir, mixfile)
mix_info = sf.info(mixfile_abs)
mixture, _ = read_wav(mixfile_abs, always_2d=True)
num_samples, num_channels = mixture.shape
source_sum = np.zeros((expected_samples, num_channels),
dtype=mixture.dtype)
for srcfile in components:
srcfile_abs = os.path.join(root_dir, srcfile)
source, _ = read_wav(srcfile_abs, always_2d=True)
# sum up sources
source_sum += source
if not check_mixture(mixture, source_sum, mixfile):
mix_problem += 1
if fix_mix:
print('Rewriting corrected mixture file {}.'.format(mixfile_abs))
# write new mixture
# note we are not doing anything about clipping here,
# so if mismatch is due to clipping it will clip again on writing.
mixture = source_sum
write_wav(mixfile_abs, mixture, sample_rate, subtype=mix_info.subtype)
fixed_mix += 1
return length_problem, fixed_length, mix_problem, fixed_mix
| {
"content_hash": "3de3c22533fa3c3179d98ece14b71f72",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 40.06185567010309,
"alnum_prop": 0.633170355120947,
"repo_name": "google-research/sound-separation",
"id": "523096ca8be1792f6a5f8e01647d382e615ef5f5",
"size": "8347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets/fuss/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6308"
},
{
"name": "HTML",
"bytes": "69934"
},
{
"name": "Python",
"bytes": "525147"
},
{
"name": "Shell",
"bytes": "39143"
}
],
"symlink_target": ""
} |
import unittest
from lib import bst
from algo import print_level_order, sorted_array_to_bst
class TestPrintLevelOrder(unittest.TestCase):
def setUp(self):
self.bst = bst.BST()
def test_add(self):
vals = range(7)
sorted_array_to_bst.convert(vals, 0, len(vals)-1, self.bst)
print_level_order.print_tree(self.bst.root)
| {
"content_hash": "644af0c89b440de787a3b73d14689d0e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.657608695652174,
"repo_name": "gsathya/dsalgo",
"id": "44ff62a5edfaf2ef1d108b6c49a2ecc996441a88",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/print_level_order_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "287"
},
{
"name": "JavaScript",
"bytes": "3282"
},
{
"name": "Python",
"bytes": "52980"
}
],
"symlink_target": ""
} |
import smbus
import time
# version 2 uses port 1
bus = smbus.SMBus(1)
DEV_ADRS = 0x5C # Sensor/LED Slave aboard address
DEV_STATUS_LED = 0x00 # Status LED command
LED_ON = 0x01
LED_OFF = 0x00
#out_val = [DEV_STATUS_LED, DSL_ON, 7]
out_block = [LED_OFF]
# Write out a BYTE to DEV_STATUS_LED register
bus.write_byte_data(DEV_ADRS, DEV_STATUS_LED, LED_ON)
# Does SDA_W REG then SDA_R DATA.
#print(hex(bus.read_byte_data(DEV_ADRS, 1)))
#print(hex(bus.read_byte_data(DEV_ADRS, 2)))
#print(hex(bus.read_byte_data(DEV_ADRS, 3)))
#print(hex(bus.read_byte_data(DEV_ADRS, 4)))
#print(hex(bus.read_byte_data(DEV_ADRS, 2)))
#print(hex(bus.read_byte_data(DEV_ADRS, 4)))
#print(hex(bus.read_byte_data(DEV_ADRS, 1)))
wt = time.clock() + 0.5
for c in range(5):
# print(bus.read_i2c_block_data(DEV_ADRS, 1, 4)) # worked once or twice.
# print(hex(bus.read_byte_data(DEV_ADRS, 2))) # works once
# print(hex(bus.read_byte(DEV_ADRS))) # NAK after read, so can't read again.
# print(bus.read_block_data(DEV_ADRS, 4)) # LOCKS UP SYSTEM
# Multi stage command # works a couple of times
# bus.write_byte(DEV_ADRS, 2)
# bus.read_byte(DEV_ADRS)
# Issue seems RPi not responing to Slave clock stretch..trying 20MHz Slave.
while( wt > time.clock() ):
out_block[0] = 9
wt += 0.5
print(bus.read_i2c_block_data(DEV_ADRS, 1, 4)) # worked once or twice.
print(c, 'Again\n')
## END of FOR loop
bus.write_byte_data(DEV_ADRS, DEV_STATUS_LED, LED_OFF)
| {
"content_hash": "69d3db9beb7f84b37f4313bd77e7ff03",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 94,
"avg_line_length": 31.897959183673468,
"alnum_prop": 0.6289187460012796,
"repo_name": "CmdrZin/chips_avr_examples",
"id": "a99f9801faf64166d3fed3b89fa9d60755c77f8c",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RC_Interface/Python/i2cTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "388500"
},
{
"name": "Batchfile",
"bytes": "5016"
},
{
"name": "C",
"bytes": "1096172"
},
{
"name": "C#",
"bytes": "5170"
},
{
"name": "C++",
"bytes": "163567"
},
{
"name": "CSS",
"bytes": "7205"
},
{
"name": "HTML",
"bytes": "117827"
},
{
"name": "Makefile",
"bytes": "133535"
},
{
"name": "Objective-C",
"bytes": "4970"
},
{
"name": "Processing",
"bytes": "43022"
},
{
"name": "Python",
"bytes": "3845"
}
],
"symlink_target": ""
} |
"""
Module contains Record class that allows you to create SQLAlchemy like classes.
"""
from collections import OrderedDict
from prettyrecord.name_utils import mangle_name, unmangle_name
from prettyrecord.fields import Field, RecordField
__all__ = ("Record",)
class MetaRecord(type):
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
return OrderedDict()
@classmethod
def _prepare_field(mcs, field):
if isinstance(field, Record):
return RecordField(record_type=type(field))
else:
return field
def __new__(mcs, name, bases, attrs, **kwargs):
slots = []
try:
for attr_name, attr in attrs.items():
if isinstance(attr, (Field, Record)):
field = attrs[attr_name] = mcs._prepare_field(attr)
field.name = field.name or mangle_name(name, attr_name)
slots.append(field.name)
except NameError:
pass
for option, value in kwargs.items():
attrs['__{}__'.format(option)] = value
attrs['__slots__'] = tuple(slots)
return super(MetaRecord, mcs).__new__(mcs, name, bases, attrs)
def __init__(cls, name, bases, attrs, **kwargs):
super(MetaRecord, cls).__init__(name, bases, attrs)
def __setattr__(self, key, value):
old_value = getattr(self, key)
if (old_value is None) or (type(old_value) is type(value)):
super(MetaRecord, self).__setattr__(key, value)
else:
raise AttributeError("can't modify {}'s {} attribute".format(self.__name__, key))
def __delattr__(self, key):
raise AttributeError("can't remove {}'s {} attribute".format(self.__name__, key))
class Record(metaclass=MetaRecord):
"""
Record allows you to create SQLAlchemy like classes.
You have only subclass it and fill with descriptors based on Field class.
"""
def __init__(self, **kwargs):
for _, field in self.iter_fields():
field.__set__(self, field.default_value)
for key, value in kwargs.items():
setattr(self, key, value)
@classmethod
def iter_fields(cls):
for owner in reversed(cls.__mro__):
if issubclass(owner, Record) and owner is not Record:
for mangled_name in owner.__slots__:
yield owner, getattr(owner, unmangle_name(mangled_name))
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
return all(attr.__get__(self, None) == attr.__get__(other, None)
for _, attr in self.iter_fields())
def __repr__(self):
args = ['{}={}'.format(unmangle_name(attr.name), repr(attr.__get__(self, None)))
for klass, attr in self.iter_fields() if klass is self.__class__]
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
| {
"content_hash": "930685fca2699b6ad674eaa4de914c60",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 93,
"avg_line_length": 32.8,
"alnum_prop": 0.573170731707317,
"repo_name": "skorczan/prettyrecord",
"id": "f9f8706b89f117d64b2103a628cc2ec476b428dc",
"size": "2952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prettyrecord/record.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20806"
}
],
"symlink_target": ""
} |
from autopyfactory.interfaces import SchedInterface
import logging
class WeightedActivated(SchedInterface):
id = 'weightedactivated'
def __init__(self, apfqueue, config, section):
try:
self.apfqueue = apfqueue
self.log = logging.getLogger('autopyfactory.sched.%s' %apfqueue.apfqname)
# --- weights ---
self.activated_w = self.apfqueue.qcl.generic_get(self.apfqueue.apfqname, 'sched.weightedactivated.activated', 'getfloat', default_value=1.0)
self.pending_w = self.apfqueue.qcl.generic_get(self.apfqueue.apfqname, 'sched.weightedactivated.pending', 'getfloat', default_value=1.0)
self.log.debug("SchedPlugin: weight values are activated_w=%s, pending_w=%s." %(self.activated_w, self.pending_w))
self.log.debug("SchedPlugin: Object initialized.")
except Exception, ex:
self.log.error("SchedPlugin object initialization failed. Raising exception")
raise ex
def calcSubmitNum(self, n=0):
"""
It returns nb of Activated Jobs - nb of Pending Pilots
But before making that calculation, it applies a scaling factor
to both values: activated and pending
"""
self.log.debug('Starting.')
self.wmsinfo = self.apfqueue.wmsstatus_plugin.getInfo()
self.batchinfo = self.apfqueue.batchstatus_plugin.getInfo()
if self.wmsinfo is None:
self.log.warning("wsinfo is None!")
out = self.default
msg = "WeightedActivated:comment=no wmsinfo,in=%s,ret=%s" %(n, out)
elif self.batchinfo is None:
self.log.warning("self.batchinfo is None!")
out = self.default
msg = "WeightedActivated:comment=no batchinfo,in=%,ret=%s" %(n, out)
elif not self.wmsinfo.valid() and self.batchinfo.valid():
out = self.default
msg = "WeightedActivated:comment=no wms/batchinfo,in=%s,ret=%s" %(n, out)
self.log.warn('a status is not valid, returning default = %s' %out)
else:
# Carefully get wmsinfo, activated.
self.wmsqueue = self.apfqueue.wmsqueue
self.log.debug("wmsqueue is %s" % self.wmsqueue)
(out, msg) = self._calc(n)
return (out, msg)
def _calc(self, n):
# initial default values.
activated_jobs = 0
pending_pilots = 0
jobsinfo = self.wmsinfo.jobs
self.log.debug("jobsinfo class is %s" % jobsinfo.__class__ )
try:
sitedict = jobsinfo[self.wmsqueue]
self.log.debug("sitedict class is %s" % sitedict.__class__ )
activated_jobs = sitedict.ready
except KeyError:
# This is OK--it just means no jobs in any state at the wmsqueue.
self.log.error("wmsqueue %s not present in jobs info from WMS" % self.wmsqueue)
activated_jobs = 0
try:
pending_pilots = self.batchinfo[self.apfqueue.apfqname].pending # using the new info objects
except KeyError:
# This is OK--it just means no jobs.
pass
except KeyError:
# This is OK--it just means no jobs.
pass
# correct values based on weights
activated_jobs_w = int(activated_jobs * self.activated_w)
pending_pilots_w = int(pending_pilots * self.pending_w)
out = max(0, activated_jobs_w - pending_pilots_w)
msg = "WeightedActivated:in=%s,activated=%s,weightedactivated=%s,pending=%s,weightedpending=%s,ret=%s" %(n, activated_jobs, activated_jobs_w, pending_pilots, pending_pilots_w, out)
self.log.info(msg)
return (out, msg)
| {
"content_hash": "b88baddcecb1d60c67fcf36e4d2eb645",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 188,
"avg_line_length": 41.67777777777778,
"alnum_prop": 0.6057051452945881,
"repo_name": "btovar/autopyfactory",
"id": "dbcbea741a4dae9af5c8953d2ede7a0ae5bb8694",
"size": "3777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autopyfactory/plugins/queue/sched/WeightedActivated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "827948"
},
{
"name": "Shell",
"bytes": "97872"
}
],
"symlink_target": ""
} |
from djangoappengine.settings_base import *
import os
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'djangotoolbox',
'autoload',
'dbindexer',
'app1',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'app1.middleware.LastSiteUrl',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
STATIC_ROOT = ''
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
AUTH_PROFILE_MODULE = 'app1.Client'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Linia que estableix si els cookie s'esbori al tancar el navegador.
SESSION_COOKIE_AGE = 15 * 60 # Linia que estableix el temps de caducitat dels cookies que es en segons , per aixo hi ha 15 minuts * 60 segons
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/home/andrei/Projectes/Django/ecoop/static/',
)
| {
"content_hash": "d395f111ce31c447ded0ac3208e503ca",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 141,
"avg_line_length": 31.720588235294116,
"alnum_prop": 0.7273991655076495,
"repo_name": "ecopro/ecoop",
"id": "3e962cac42c1f63111fcdd2892ec2155ff7fbded",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25455"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
import mock
from rally.plugins.openstack.scenarios.heat import stacks
from tests.unit import test
HEAT_STACKS = "rally.plugins.openstack.scenarios.heat.stacks.HeatStacks"
class HeatStacksTestCase(test.ScenarioTestCase):
def setUp(self):
super(HeatStacksTestCase, self).setUp()
self.default_template = "heat_template_version: 2013-05-23"
self.default_parameters = {"dummy_param": "dummy_key"}
self.default_files = ["dummy_file.yaml"]
self.default_environment = {"env": "dummy_env"}
@mock.patch(HEAT_STACKS + ".generate_random_name")
@mock.patch(HEAT_STACKS + "._list_stacks")
@mock.patch(HEAT_STACKS + "._create_stack")
def test_create_and_list_stack(self, mock__create_stack, mock__list_stacks,
mock_generate_random_name):
heat_scenario = stacks.HeatStacks(self.context)
mock_generate_random_name.return_value = "test-rally-stack"
heat_scenario.create_and_list_stack(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters, self.default_files,
self.default_environment)
mock__list_stacks.assert_called_once_with()
@mock.patch(HEAT_STACKS + "._list_stacks")
def test_list_stack_and_resources(self, mock__list_stacks):
stack = mock.Mock()
mock__list_stacks.return_value = [stack]
heat_scenario = stacks.HeatStacks(self.context)
heat_scenario.list_stacks_and_resources()
self.clients("heat").resources.list.assert_called_once_with(stack.id)
self._test_atomic_action_timer(
heat_scenario.atomic_actions(), "heat.list_resources_of_1_stacks")
@mock.patch(HEAT_STACKS + "._list_stacks")
def test_list_stack_and_events(self, mock__list_stacks):
stack = mock.Mock()
mock__list_stacks.return_value = [stack]
heat_scenario = stacks.HeatStacks(self.context)
heat_scenario.list_stacks_and_events()
self.clients("heat").events.list.assert_called_once_with(stack.id)
self._test_atomic_action_timer(
heat_scenario.atomic_actions(), "heat.list_events_of_1_stacks")
@mock.patch(HEAT_STACKS + ".generate_random_name")
@mock.patch(HEAT_STACKS + "._delete_stack")
@mock.patch(HEAT_STACKS + "._create_stack")
def test_create_and_delete_stack(
self, mock__create_stack, mock__delete_stack,
mock_generate_random_name):
heat_scenario = stacks.HeatStacks(self.context)
fake_stack = object()
mock__create_stack.return_value = fake_stack
mock_generate_random_name.return_value = "test-rally-stack"
heat_scenario.create_and_delete_stack(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
mock__delete_stack.assert_called_once_with(fake_stack)
@mock.patch(HEAT_STACKS + "._delete_stack")
@mock.patch(HEAT_STACKS + "._check_stack")
@mock.patch(HEAT_STACKS + "._create_stack")
def test_create_check_delete_stack(
self, mock__create_stack, mock__check_stack, mock__delete_stack):
heat_scenario = stacks.HeatStacks(self.context)
heat_scenario.create_check_delete_stack(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters, self.default_files,
self.default_environment)
mock__check_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__delete_stack.assert_called_once_with(
mock__create_stack.return_value)
@mock.patch(HEAT_STACKS + ".generate_random_name")
@mock.patch(HEAT_STACKS + "._delete_stack")
@mock.patch(HEAT_STACKS + "._update_stack")
@mock.patch(HEAT_STACKS + "._create_stack")
def test_create_update_delete_stack(
self, mock__create_stack, mock__update_stack, mock__delete_stack,
mock_generate_random_name):
heat_scenario = stacks.HeatStacks(self.context)
fake_stack = object()
mock__create_stack.return_value = fake_stack
mock_generate_random_name.return_value = "test-rally-stack"
heat_scenario.create_update_delete_stack(
template_path=self.default_template,
parameters=self.default_parameters,
updated_template_path=self.default_template,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
mock__update_stack.assert_called_once_with(
fake_stack, self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
mock__delete_stack.assert_called_once_with(fake_stack)
def test_create_stack_and_scale(self):
heat_scenario = stacks.HeatStacks(self.context)
stack = mock.Mock()
heat_scenario._create_stack = mock.Mock(return_value=stack)
heat_scenario._scale_stack = mock.Mock()
heat_scenario.create_stack_and_scale(
self.default_template, "key", -1,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
heat_scenario._create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
heat_scenario._scale_stack.assert_called_once_with(
stack, "key", -1)
@mock.patch(HEAT_STACKS + "._delete_stack")
@mock.patch(HEAT_STACKS + "._resume_stack")
@mock.patch(HEAT_STACKS + "._suspend_stack")
@mock.patch(HEAT_STACKS + "._create_stack")
def test_create_suspend_resume_delete_stack(
self, mock__create_stack, mock__suspend_stack, mock__resume_stack,
mock__delete_stack):
heat_scenario = stacks.HeatStacks(self.context)
heat_scenario.create_suspend_resume_delete_stack(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment
)
mock__suspend_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__resume_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__delete_stack.assert_called_once_with(
mock__create_stack.return_value
)
@mock.patch(HEAT_STACKS + "._delete_stack")
@mock.patch(HEAT_STACKS + "._restore_stack")
@mock.patch(HEAT_STACKS + "._snapshot_stack")
@mock.patch(HEAT_STACKS + "._create_stack")
def test_create_snapshot_restore_delete_stack(
self, mock__create_stack, mock__snapshot_stack,
mock__restore_stack, mock__delete_stack):
heat_scenario = stacks.HeatStacks(self.context)
mock__snapshot_stack.return_value = {"id": "dummy_id"}
heat_scenario.create_snapshot_restore_delete_stack(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__snapshot_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__restore_stack.assert_called_once_with(
mock__create_stack.return_value, "dummy_id")
mock__delete_stack.assert_called_once_with(
mock__create_stack.return_value)
| {
"content_hash": "cfa7a09f6c75afd57e1c6e057f4b3a66",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 43.23152709359606,
"alnum_prop": 0.6340018231540565,
"repo_name": "vishnu-kumar/PeformanceFramework",
"id": "02e24f12cd3208a9b2559eb7cedee22a3b7cac49",
"size": "9406",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/heat/test_stacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "75364"
},
{
"name": "Mako",
"bytes": "87863"
},
{
"name": "Python",
"bytes": "4081695"
},
{
"name": "Shell",
"bytes": "47535"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys, os
version = '0.0.1'
setup(name='sheepherding',
version=version,
description="Sheepherding AI",
long_description="""\
An AI for simulated sheepherding""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='ai',
author='Sven Schmit',
author_email='[email protected]',
url='www.stanford.edu/~schmit',
license='MIT License',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| {
"content_hash": "6baced14b99d771bf59e42685b3d5ac4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 95,
"avg_line_length": 29.192307692307693,
"alnum_prop": 0.6047430830039525,
"repo_name": "schmit/sheepherding",
"id": "38dc5e5f84430b7bc26185fcb20c85fad4988a5c",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43915"
}
],
"symlink_target": ""
} |
"""A module for Shoptimizer API that fixes identifierExists values.
Reference: https://support.google.com/merchants/answer/6324478
and https://support.google.com/merchants/answer/9464748
Products that have a brand, mpn, or gtin set and identifierExists as "false"
could cause disapproval, so this optimizer will delete the identifierExists
value in these cases, which defaults the value to true in Content API.
"""
import logging
from typing import Any, Dict
from models import optimization_result_counts
from optimizers_abstract import base_optimizer
from util import optimization_util
class IdentifierExistsOptimizer(base_optimizer.BaseOptimizer):
""""An optimizer that fixes invalid identifierExists values."""
_OPTIMIZER_PARAMETER = 'identifier-exists-optimizer'
def _optimize(
self, product_batch: Dict[str, Any], language: str, country: str,
currency: str) -> optimization_result_counts.OptimizationResultCounts:
"""Runs the optimization.
Removes invalid identifierExists fields.
See above for the definition of an invalid identifierExists field.
Args:
product_batch: A batch of product data.
language: The language to use for this optimizer.
country: The country to use for this optimizer.
currency: The currency to use for this optimizer.
Returns:
The number of products affected by this optimization.
"""
num_of_products_optimized = 0
num_of_products_excluded = 0
for entry in product_batch['entries']:
if (optimization_util.optimization_exclusion_specified(
entry, self._OPTIMIZER_PARAMETER)):
num_of_products_excluded += 1
continue
product = entry['product']
identifier_exists = product.get('identifierExists', True)
brand = product.get('brand', '')
gtin = product.get('gtin', '')
mpn = product.get('mpn', '')
if not identifier_exists and (brand or gtin or mpn):
item_id = product.get('offerId', '')
logging.info(
'Modified item %s: Clearing identifierExists '
'to prevent disapproval', item_id)
# Delete field from the request which defaults it to true.
del product['identifierExists']
num_of_products_optimized += 1
base_optimizer.set_optimization_tracking(product,
base_optimizer.SANITIZED)
return optimization_result_counts.OptimizationResultCounts(
num_of_products_optimized, num_of_products_excluded)
| {
"content_hash": "d1feebd41a556e527bbd393016da62ac",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 38.13636363636363,
"alnum_prop": 0.6968613428684942,
"repo_name": "google/shoptimizer",
"id": "460343a283b0459a454a89b8f27fe5823729e37e",
"size": "3108",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "shoptimizer_api/optimizers_builtin/identifier_exists_optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "558972"
},
{
"name": "Shell",
"bytes": "4697"
}
],
"symlink_target": ""
} |
import unittest
from tests.st.utils.docker_host import DockerHost
from test_base import TestBase
class TestEndpointCommands(TestBase):
@unittest.skip("Libnetwork doesn't support multi-host yet.")
def test_endpoint_commands_mainline(self):
"""
Run a mainline multi-host test using endpoint commands.
This test uses the "endpoint profile set" command to assign
endpoints to profiles according to the following topology:
Host1: [workload_A, workload_B, workload_C]
Host2: [workload_D, workload_E]
Creates a profile that connects A, C, & E
Creates an additional isolated profile for B.
Creates an additional isolated profile for D.
IP Connectivity is then tested to ensure that only workloads
in the same profile can ping one another
"""
host1 = DockerHost('host1')
host2 = DockerHost('host2')
ip_a = "192.168.1.1"
ip_b = "192.168.1.2"
ip_c = "192.168.1.3"
ip_d = "192.168.1.4"
ip_e = "192.168.1.5"
workload_a = host1.create_workload("workload_a", ip_a)
workload_b = host1.create_workload("workload_b", ip_b)
workload_c = host1.create_workload("workload_c", ip_c)
workload_d = host2.create_workload("workload_d", ip_d)
workload_e = host2.create_workload("workload_e", ip_e)
host1.calicoctl("profile add PROF_1_3_5")
host1.calicoctl("profile add PROF_2")
host1.calicoctl("profile add PROF_4")
workload_a_endpoint_id = host1.calicoctl("container workload_a endpoint-id show").strip()
workload_b_endpoint_id = host1.calicoctl("container workload_b endpoint-id show").strip()
workload_c_endpoint_id = host1.calicoctl("container workload_c endpoint-id show").strip()
workload_d_endpoint_id = host2.calicoctl("container workload_d endpoint-id show").strip()
workload_e_endpoint_id = host2.calicoctl("container workload_e endpoint-id show").strip()
host1.calicoctl("endpoint %s profile set PROF_1_3_5" % workload_a_endpoint_id)
host1.calicoctl("endpoint %s profile set PROF_2" % workload_b_endpoint_id)
host1.calicoctl("endpoint %s profile set PROF_1_3_5" % workload_c_endpoint_id)
host2.calicoctl("endpoint %s profile set PROF_4" % workload_d_endpoint_id)
host2.calicoctl("endpoint %s profile set PROF_1_3_5" % workload_e_endpoint_id)
self.assert_connectivity(pass_list=[workload_a, workload_c, workload_e],
fail_list=[workload_b, workload_d])
self.assert_connectivity(pass_list=[workload_b],
fail_list=[workload_a, workload_c, workload_d, workload_e])
self.assert_connectivity(pass_list=[workload_d],
fail_list=[workload_a, workload_b, workload_c, workload_e])
@unittest.skip("Libnetwork doesn't support multi-host yet.")
def test_endpoint_commands(self):
"""
Run a mainline multi-host test using endpoint commands
Performs more complicated endpoint profile assignments to test
the append, set, and remove commands in situations where the commands
specify multiple profiles at once.
"""
host1 = DockerHost('host1')
host2 = DockerHost('host2')
ip_main = "192.168.1.1"
ip_a = "192.168.1.2"
ip_b = "192.168.1.3"
ip_c = "192.168.1.4"
workload_main = host1.create_workload("workload_main", ip_main)
host2.create_workload("workload_a", ip_a)
host2.create_workload("workload_b", ip_b)
host2.create_workload("workload_c", ip_c)
workload_main_endpoint_id = host1.calicoctl("container workload_main endpoint-id show").strip()
workload_a_endpoint_id = host2.calicoctl("container workload_a endpoint-id show").strip()
workload_b_endpoint_id = host2.calicoctl("container workload_b endpoint-id show").strip()
workload_c_endpoint_id = host2.calicoctl("container workload_c endpoint-id show").strip()
host1.calicoctl("profile add PROF_A")
host1.calicoctl("profile add PROF_B")
host1.calicoctl("profile add PROF_C")
host2.calicoctl("endpoint %s profile set PROF_A" % workload_a_endpoint_id)
host2.calicoctl("endpoint %s profile set PROF_B" % workload_b_endpoint_id)
host2.calicoctl("endpoint %s profile set PROF_C" % workload_c_endpoint_id)
# Test set single profile
host1.calicoctl("endpoint %s profile set PROF_A" % workload_main_endpoint_id)
workload_main.assert_can_ping(ip_a, retries=4)
workload_main.assert_cant_ping(ip_b)
workload_main.assert_cant_ping(ip_c)
# Test set multiple profiles (note: PROF_A should now be removed)
host1.calicoctl("endpoint %s profile set PROF_B PROF_C" % workload_main_endpoint_id)
workload_main.assert_cant_ping(ip_a, retries=4)
workload_main.assert_can_ping(ip_b)
workload_main.assert_can_ping(ip_c)
# Test set profile to None
host1.calicoctl("endpoint %s profile set" % workload_main_endpoint_id)
workload_main.assert_cant_ping(ip_a, retries=4)
workload_main.assert_cant_ping(ip_b)
workload_main.assert_cant_ping(ip_c)
# Append a single profile
host1.calicoctl("endpoint %s profile append PROF_A" % workload_main_endpoint_id)
workload_main.assert_can_ping(ip_a, retries=4)
workload_main.assert_cant_ping(ip_b)
workload_main.assert_cant_ping(ip_c)
# Append two profiles at once
host1.calicoctl("endpoint %s profile append PROF_B PROF_C" % workload_main_endpoint_id)
workload_main.assert_can_ping(ip_a, retries=4)
workload_main.assert_can_ping(ip_b)
workload_main.assert_can_ping(ip_c)
# Remove a single profile
host1.calicoctl("endpoint %s profile remove PROF_C" % workload_main_endpoint_id)
workload_main.assert_can_ping(ip_a, retries=4)
workload_main.assert_can_ping(ip_b)
workload_main.assert_cant_ping(ip_c)
# Remove two profiles at once
host1.calicoctl("endpoint %s profile remove PROF_A PROF_B" % workload_main_endpoint_id)
workload_main.assert_cant_ping(ip_a, retries=4)
workload_main.assert_cant_ping(ip_b)
workload_main.assert_cant_ping(ip_c)
| {
"content_hash": "7cc372748c4737fc3a148fdac64d609d",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 103,
"avg_line_length": 46.61594202898551,
"alnum_prop": 0.6479092180942018,
"repo_name": "L-MA/calico-docker",
"id": "288f8a25461511f5653f4aa35511d1fdd296bb66",
"size": "6433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calico_containers/tests/st/test_endpoint_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4549"
},
{
"name": "Python",
"bytes": "294215"
},
{
"name": "Shell",
"bytes": "4849"
}
],
"symlink_target": ""
} |
import datetime
from django import http
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.utils.timezone import utc, make_naive
from django.db import transaction
from django.conf import settings
import requests
import pytz
from funfactory.urlresolvers import reverse
from slugify import slugify
from jsonview.decorators import json_view
from airmozilla.main.models import (
SuggestedEvent,
Event,
Channel,
SuggestedEventComment,
Location
)
from airmozilla.uploads.models import Upload
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.base.utils import tz_apply
from . import utils
from . import forms
from . import sending
def _increment_slug_if_exists(slug):
base = slug
count = 2
def exists(slug):
return (
Event.objects.filter(slug__iexact=slug)
or
SuggestedEvent.objects.filter(slug__iexact=slug)
)
while exists(slug):
slug = base + '-%s' % count
count += 1
return slug
@login_required
@transaction.commit_on_success
def start(request):
data = {}
if request.method == 'POST':
form = forms.StartForm(request.POST, user=request.user)
if form.is_valid():
slug = slugify(form.cleaned_data['title']).lower()
slug = _increment_slug_if_exists(slug)
upcoming = False
event_type = form.cleaned_data['event_type']
if event_type == 'upcoming':
upcoming = True
event = SuggestedEvent.objects.create(
user=request.user,
title=form.cleaned_data['title'],
upcoming=upcoming,
slug=slug,
)
# Enable discussion on by default.
# https://bugzilla.mozilla.org/show_bug.cgi?id=1135822
SuggestedDiscussion.objects.create(
event=event,
enabled=True,
notify_all=True,
)
if not event.upcoming:
location, __ = Location.objects.get_or_create(
name=settings.DEFAULT_PRERECORDED_LOCATION[0],
timezone=settings.DEFAULT_PRERECORDED_LOCATION[1]
)
event.location = location
now = datetime.datetime.utcnow().replace(tzinfo=utc)
event.start_time = now
event.save()
event.channels.add(
Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)
)
# XXX use next_url() instead?
if event.upcoming:
url = reverse('suggest:description', args=(event.pk,))
elif event_type == 'popcorn':
# this is a hack but it works well
event.popcorn_url = 'https://'
event.save()
url = reverse('suggest:popcorn', args=(event.pk,))
else:
request.session['active_suggested_event'] = event.pk
if request.session.get('active_event'):
del request.session['active_event']
url = reverse('uploads:upload')
return redirect(url)
else:
initial = {
'event_type': 'upcoming'
}
if request.GET.get('upload'):
try:
upload = Upload.objects.get(
pk=request.GET['upload'],
user=request.user
)
# is that upload used by some other suggested event
# in progress?
try:
suggested_event = SuggestedEvent.objects.get(
upload=upload
)
# that's bad!
messages.warning(
request,
'The file upload you selected belongs to a requested '
'event with the title: %s' % suggested_event.title
)
return redirect('uploads:home')
except SuggestedEvent.DoesNotExist:
pass
initial['event_type'] = 'pre-recorded'
request.session['active_upload'] = upload.pk
except Upload.DoesNotExist:
pass
form = forms.StartForm(user=request.user, initial=initial)
data['suggestions'] = (
SuggestedEvent.objects
.filter(user=request.user)
.order_by('modified')
)
data['form'] = form
data['event'] = None
return render(request, 'suggest/start.html', data)
@login_required
@transaction.commit_on_success
def title(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.TitleForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
form = forms.TitleForm(instance=event)
data = {'form': form, 'event': event}
return render(request, 'suggest/title.html', data)
@login_required
@transaction.commit_on_success
def choose_file(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if event.upcoming:
return redirect(reverse('suggest:description', args=(event.pk,)))
if request.method == 'POST':
form = forms.ChooseFileForm(
request.POST,
user=request.user,
instance=event
)
if form.is_valid():
event = form.save()
event.upload.suggested_event = event
event.upload.save()
# did any *other* upload belong to this suggested event?
other_uploads = (
Upload.objects
.filter(suggested_event=event)
.exclude(pk=event.upload.pk)
)
for upload in other_uploads:
upload.suggested_event = None
upload.save()
if request.session.get('active_suggested_event'):
del request.session['active_suggested_event']
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
initial = {}
if request.GET.get('upload'):
try:
upload = Upload.objects.get(
pk=request.GET['upload'],
user=request.user
)
initial['upload'] = upload.pk
except Upload.DoesNotExist:
pass
form = forms.ChooseFileForm(
user=request.user,
instance=event,
initial=initial
)
data = {'form': form, 'event': event}
return render(request, 'suggest/file.html', data)
@login_required
@transaction.commit_on_success
def popcorn(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if event.upcoming:
return redirect(reverse('suggest:description', args=(event.pk,)))
if request.method == 'POST':
form = forms.PopcornForm(
request.POST,
instance=event
)
if form.is_valid():
event = form.save()
image_url = utils.find_open_graph_image_url(event.popcorn_url)
if image_url:
from django.core.files.uploadedfile import InMemoryUploadedFile
import os
from StringIO import StringIO
image_content = requests.get(image_url).content
buf = StringIO(image_content)
# Seek to the end of the stream, so we can get its
# length with `buf.tell()`
buf.seek(0, 2)
file = InMemoryUploadedFile(
buf,
"image",
os.path.basename(image_url),
None,
buf.tell(),
None
)
event.placeholder_img = file
event.save()
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
initial = {}
form = forms.PopcornForm(
instance=event,
initial=initial
)
data = {'form': form, 'event': event}
return render(request, 'suggest/popcorn.html', data)
@login_required
@transaction.commit_on_success
def description(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.DescriptionForm(request.POST, instance=event)
if form.is_valid():
form.save()
# XXX use next_url() instead?
url = reverse('suggest:details', args=(event.pk,))
return redirect(url)
else:
form = forms.DescriptionForm(instance=event)
data = {'form': form, 'event': event}
return render(request, 'suggest/description.html', data)
@login_required
@transaction.commit_on_success
def details(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
try:
discussion = SuggestedDiscussion.objects.get(event=event)
except SuggestedDiscussion.DoesNotExist:
discussion = None
if request.method == 'POST':
form = forms.DetailsForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
# the start_time comes to us as a string, e.g. '2014-01-01
# 12:00:00' and that'll be converted into '2014-01-01
# 12:00:00 tzinfo=UTC' automatically. But that's not what we want
# so we change it first.
event.start_time = tz_apply(
event.start_time,
pytz.timezone(event.location.timezone)
)
event.save()
next_url = reverse('suggest:placeholder', args=(event.pk,))
if form.cleaned_data['enable_discussion']:
if discussion:
# make sure it's enabled
discussion.enabled = True
# discussion.moderate_all = (
# event.privacy != Event.PRIVACY_COMPANY
# )
discussion.save()
else:
discussion = SuggestedDiscussion.objects.create(
event=event,
enabled=True,
notify_all=True,
# moderate_all=event.privacy != Event.PRIVACY_COMPANY
)
if request.user not in discussion.moderators.all():
discussion.moderators.add(request.user)
next_url = reverse('suggest:discussion', args=(event.pk,))
elif SuggestedDiscussion.objects.filter(event=event):
discussion = SuggestedDiscussion.objects.get(event=event)
discussion.enabled = False
discussion.save()
return redirect(next_url)
else:
if event.location and event.start_time:
# Because the modelform is going present our user
# without input widgets' that are datetimes in
# naive format, when it does this is does so using the
# settings.TIME_ZONE and when saved it applies the
# settings.TIME_ZONE back again.
# Normally in Django templates, this is solved with
# {% timezone "Europe/Paris" %}
# {{ form.as_p }}
# {% endtimezone %}
# But that's not going to work when working with jinja
# so we do it manually from the view code.
event.start_time = make_naive(
event.start_time,
pytz.timezone(event.location.timezone)
)
initial = {'enable_discussion': not (event and not discussion)}
form = forms.DetailsForm(instance=event, initial=initial)
data = {'form': form, 'event': event}
return render(request, 'suggest/details.html', data)
@login_required
@transaction.commit_on_success
def discussion(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
discussion = SuggestedDiscussion.objects.get(event=event)
if request.method == 'POST':
form = forms.DiscussionForm(request.POST, instance=discussion)
if form.is_valid():
discussion = form.save()
discussion.moderators.clear()
for email in form.cleaned_data['emails']:
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
user = User.objects.create(
username=email.split('@')[0],
email=email
)
user.set_unusable_password()
user.save()
discussion.moderators.add(user)
url = reverse('suggest:placeholder', args=(event.pk,))
return redirect(url)
else:
emails = []
for moderator in discussion.moderators.all():
if moderator.email not in emails:
emails.append(moderator.email)
if not emails:
emails.append(request.user.email)
initial = {'emails': ', '.join(emails)}
form = forms.DiscussionForm(instance=discussion, initial=initial)
context = {'event': event, 'form': form, 'discussion': discussion}
return render(request, 'suggest/discussion.html', context)
@login_required
@json_view
def autocomplete_emails(request):
if 'q' not in request.GET:
return http.HttpResponseBadRequest('Missing q')
q = request.GET.get('q', '').strip()
emails = []
if len(q) > 1:
users = (
User.objects
.filter(email__istartswith=q)
.exclude(email__isnull=True)
)
for user in users.order_by('email'):
if user.email not in emails:
emails.append(user.email)
if not emails:
if utils.is_valid_email(q):
emails.append(q)
elif utils.is_valid_email('%[email protected]' % q):
emails.append('%[email protected]' % q)
return {'emails': emails}
@login_required
@transaction.commit_on_success
def placeholder(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.PlaceholderForm(
request.POST,
request.FILES,
instance=event
)
if form.is_valid():
event = form.save()
if form['placeholder_img'].value() != event.placeholder_img:
# User selected a new placeholder image. Clear gallery select.
event.picture = None
event.save()
# XXX use next_url() instead?
url = reverse('suggest:summary', args=(event.pk,))
return redirect(url)
else:
form = forms.PlaceholderForm()
if event.picture:
form.fields['picture'].initial = event.picture.id
data = {'form': form, 'event': event}
return render(request, 'suggest/placeholder.html', data)
@login_required
@transaction.commit_on_success
def summary(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
# it's ok if it's submitted and you have the 'add_event'
# permission
if request.user.has_perm('main.add_event'):
if not event.submitted:
return http.HttpResponseBadRequest('Not submitted')
else:
return http.HttpResponseBadRequest('Not your event')
comment_form = forms.SuggestedEventCommentForm()
if request.method == 'POST':
if request.POST.get('save_comment'):
comment_form = forms.SuggestedEventCommentForm(data=request.POST)
if comment_form.is_valid():
comment = SuggestedEventComment.objects.create(
comment=comment_form.cleaned_data['comment'].strip(),
user=request.user,
suggested_event=event
)
if event.submitted:
sending.email_about_suggested_event_comment(
comment,
request
)
messages.info(
request,
'Comment added and producers notified by email.'
)
else:
messages.info(
request,
'Comment added but not emailed to producers because '
'the event is not submitted.'
)
return redirect('suggest:summary', event.pk)
else:
if event.submitted:
event.status = SuggestedEvent.STATUS_RETRACTED
event.submitted = None
event.save()
else:
now = datetime.datetime.utcnow().replace(tzinfo=utc)
event.submitted = now
if not event.first_submitted:
event.status = SuggestedEvent.STATUS_SUBMITTED
event.first_submitted = now
else:
# it was only resubmitted if it was previously rejected
if event.status == SuggestedEvent.STATUS_REJECTED:
event.status = SuggestedEvent.STATUS_RESUBMITTED
else:
event.status = SuggestedEvent.STATUS_SUBMITTED
event.save()
sending.email_about_suggested_event(event, request)
url = reverse('suggest:summary', args=(event.pk,))
return redirect(url)
# we don't need the label for this form layout
comment_form.fields['comment'].label = ''
comments = (
SuggestedEventComment.objects
.filter(suggested_event=event)
.select_related('User')
.order_by('created')
)
discussion = None
for each in SuggestedDiscussion.objects.filter(event=event):
discussion = each
context = {
'event': event,
'comment_form': comment_form,
'comments': comments,
'discussion': discussion,
}
return render(request, 'suggest/summary.html', context)
@csrf_exempt
@require_POST
@login_required
def delete(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
event.delete()
return redirect('suggest:start')
| {
"content_hash": "d871c7240c9fa8207a64d199ba72e0df",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 79,
"avg_line_length": 35.00353982300885,
"alnum_prop": 0.5556960105172676,
"repo_name": "bugzPDX/airmozilla",
"id": "0177a1347ae971c4c1457909a0139898854a61cf",
"size": "19777",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airmozilla/suggest/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Brightscript",
"bytes": "67473"
},
{
"name": "CSS",
"bytes": "132588"
},
{
"name": "Groovy",
"bytes": "458"
},
{
"name": "HTML",
"bytes": "249557"
},
{
"name": "JavaScript",
"bytes": "541080"
},
{
"name": "Makefile",
"bytes": "11608"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "2656213"
},
{
"name": "Shell",
"bytes": "8175"
},
{
"name": "Smarty",
"bytes": "1638"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import scipy.io.wavfile as wavfile
# Good Luck!
#
# INFO:
# Samples = Observations. Each audio file will is a single sample
# in our dataset.
#
# Audio Samples = https://en.wikipedia.org/wiki/Sampling_(signal_processing)
# Each .wav file is actually just a bunch of numeric samples, "sampled"
# from the analog signal. Sampling is a type of discretization. When we
# mention 'samples', we mean observations. When we mention 'audio samples',
# we mean the actually "features" of the audio file.
#
#
# The goal of this lab is to use multi-target, linear regression to generate
# by extrapolation, the missing portion of the test audio file.
#
# Each one audio_sample features will be the output of an equation,
# which is a function of the provided portion of the audio_samples:
#
# missing_samples = f(provided_samples)
#
# You can experiment with how much of the audio you want to chop off
# and have the computer generate using the Provided_Portion parameter.
#
# TODO: Play with this. This is how much of the audio file will
# be provided, in percent. The remaining percent of the file will
# be generated via linear extrapolation.
Provided_Portion = 0.25
# INFO: You have to download the dataset (audio files) from the website:
# https://github.com/Jakobovski/free-spoken-digit-dataset
#
# TODO: Create a regular ol' Python List called `zero`
#
zero = []
sample_rates = []
#
# TODO: Loop through the dataset and load up all 50 of the 0_jackson*.wav
# files using the wavfile.read() method: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.io.wavfile.read.html
# Be careful! .read() returns a tuple and you're only interested in the audio
# data, and not sample_rate at this point. Inside your for loop, simply
# append the loaded audio data into your Python list `zero`:
#
import os
dir = 'Datasets/spoken-digit-jackson-0/'
for f in os.listdir(dir):
sample_rates.append(wavfile.read(dir + f)[0])
zero.append(wavfile.read(dir + f)[1])
#
# TODO: Just for a second, convert zero into a DataFrame. When you do
# so, set the dtype to np.int16, since the input audio files are 16
# bits per sample. If you don't know how to do this, read up on the docs
# here:
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
#
# Since these audio clips are unfortunately not length-normalized,
# we're going to have to just hard chop them to all be the same length.
# Since Pandas would have inserted NANs at any spot to make zero a
# perfectly rectangular [n_observed_samples, n_audio_samples] array,
# do a dropna on the Y axis here. Then, convert one back into an
# NDArray using yourarrayname.values
#
zero = pd.DataFrame(zero, dtype=np.int16)
zero = zero.dropna(axis=1)
zero = zero.values
#
# TODO: It's important to know how (many audio_samples samples) long the
# data is now. 'zero' is currently shaped [n_samples, n_audio_samples],
# so get the n_audio_samples count and store it in a variable called
# n_audio_samples
#
n_audio_samples = zero.shape[1]
#
# TODO: Create your linear regression model here and store it in a
# variable called 'model'. Don't actually train or do anything else
# with it yet:
#
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(max_depth=10)
#
# INFO: There are 50 takes of each clip. You want to pull out just one
# of them, randomly, and that one will NOT be used in the training of
# your model. In other words, the one file we'll be testing / scoring
# on will be an unseen sample, independent to the rest of your
# training set:
from sklearn.utils.validation import check_random_state
rng = check_random_state(7) # Leave this alone until you've submitted your lab
random_idx = rng.randint(zero.shape[0])
test = zero[random_idx]
sample_rate = sample_rates[random_idx]
train = np.delete(zero, [random_idx], axis=0)
#
# TODO: Print out the shape of train, and the shape of test
# train will be shaped: [n_samples, n_audio_samples], where
# n_audio_samples are the 'features' of the audio file
# train will be shaped [n_audio_features], since it is a single
# sample (audio file, e.g. observation).
#
print train.shape
print test.shape
#
# INFO: The test data will have two parts, X_test and y_test. X_test is
# going to be the first portion of the test audio file, which we will
# be providing the computer as input. y_test, the "label" if you will,
# is going to be the remaining portion of the audio file. Like such,
# the computer will use linear regression to derive the missing
# portion of the sound file based off of the training data its received!
#
# Save the original 'test' clip, the one you're about to delete
# half of, so that you can compare it to the 'patched' clip once
# you've generated it. HINT: you should have got the sample_rate
# when you were loading up the .wav files:
wavfile.write('Original Test Clip.wav', sample_rate, test)
#
# TODO: Prepare the TEST date by creating a slice called X_test. It
# should have Provided_Portion * n_audio_samples audio sample features,
# taken from your test audio file, currently stored in the variable
# 'test'. In other words, grab the FIRST Provided_Portion *
# n_audio_samples audio features from test and store it in X_test. This
# should be accomplished using indexing.
#
import math
X_test = test[0:int(math.floor(Provided_Portion*n_audio_samples)) + 1]
#
# TODO: If the first Provided_Portion * n_audio_samples features were
# stored in X_test, then we need to also grab the *remaining* audio
# features and store it in y_test. With the remaining features stored
# in there, we will be able to R^2 "score" how well our algorithm did
# in completing the sound file.
#
y_test = test[int(math.ceil(Provided_Portion*n_audio_samples)):len(test)]
#
# TODO: Duplicate the same process for X_train, y_train. The only
# differences being: 1) Your will be getting your audio data from
# 'train' instead of from 'test', 2) Remember the shape of train that
# you printed out earlier? You want to do this slicing but for ALL
# samples (observations). For each observation, you want to slice
# the first Provided_Portion * n_audio_samples audio features into
# X_train, and the remaining go into y_test. All of this should be
# accomplishable using regular indexing in two lines of code.
#
X_train = train[0:int(math.floor(Provided_Portion*n_audio_samples)) + 1]
y_train = train[int(math.ceil(Provided_Portion*n_audio_samples)):len(train)]
#
# TODO: SciKit-Learn gets mad if you don't supply your training
# data in the form of a 2D arrays: [n_samples, n_features].
#
# So if you only have one SAMPLE, such as is our case with X_test,
# and y_test, then by calling .reshape(1, -1), you can turn
# [n_features] into [1, n_features].
#
# On the other hand, if you only have one FEATURE, which currently
# doesn't apply, you can call .reshape(-1, 1) on your data to turn
# [n_samples] into [n_samples, 1]:
#
X_test.reshape(1, -1)
y_test.reshape(1, -1)
#
# TODO: Fit your model using your training data and label:
#
model.fit(X_train, y_train)
#
# TODO: Use your model to predict the 'label' of X_test. Store the
# resulting prediction in a variable called y_test_prediction
#
y_test_prediction = model.predict(X_test)
# INFO: SciKit-Learn will use float64 to generate your predictions
# so let's take those values back to int16:
y_test_prediction = y_test_prediction.astype(dtype=np.int16)
#
# TODO: Score how well your prediction would do for some good laughs,
# by passing in your test data and test label (y_test).
#
score = model.score(X_test, y_test)
print "Extrapolation R^2 Score: ", score
#
# First, take the first Provided_Portion portion of the test clip, the
# part you fed into your linear regression model. Then, stitch that
# together with the abomination the predictor model generated for you,
# and then save the completed audio clip:
completed_clip = np.hstack((X_test, y_test_prediction))
wavfile.write('Extrapolated Clip.wav', sample_rate, completed_clip[0])
#
# INFO: Congrats on making it to the end of this crazy lab =) !
#
| {
"content_hash": "e0cae66c7f8c3ec76fe3bd82d7ae2e89",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 127,
"avg_line_length": 35.109243697478995,
"alnum_prop": 0.7143370033508856,
"repo_name": "mr3bn/DAT210x",
"id": "3cc5a1c0c9842eec88f334a5ac89ef8834dabc8f",
"size": "8356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Module5/assignment10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132926"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import patch
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime
import meerkat_abacus
from meerkat_abacus.config import config
from meerkat_abacus import model
from meerkat_abacus.pipeline_worker.process_steps import quality_control
from meerkat_abacus.consumer.database_setup import create_db
# TODO: Test deviceid and exclusion list
class TestQualityControll(unittest.TestCase):
def setUp(self):
create_db(config.DATABASE_URL, drop=True)
engine = create_engine(config.DATABASE_URL)
model.Base.metadata.create_all(engine)
self.engine = create_engine(config.DATABASE_URL)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def test_quality_control(self):
variables = [
model.AggregationVariables(
id="qul_1",
type="import",
form="demo_case",
db_column="results./bmi_height",
method="between",
calculation="results./bmi_height",
condition="50,220"
),
model.AggregationVariables(
id="qul_2",
type="import",
form="demo_case",
db_column="pt./visit_date",
method="between",
category=["discard"],
calculation='Variable.to_date(pt./visit_date)',
condition="1388527200,2019679200"
),
model.AggregationVariables(
id="qul_3",
type="import",
form="demo_case",
db_column="pt./visit_date2",
method="match",
category=["replace:SubmissionDate"],
condition="15-Apr-2018"
)
]
config.country_config["quality_control"] = ["demo_case"]
self.session.query(model.AggregationVariables).delete()
self.session.commit()
for v in variables:
self.session.add(v)
self.session.commit()
qc = quality_control.QualityControl(
config,
self.session
)
data = {
"meta/instanceID": 1,
"deviceid": "1",
"SubmissionDate": "2016-04-17T02:43:31.306860",
"pt./visit_date": "2016-04-17",
"results./bmi_height": 60,
"intro./visit": "new"
}
result = qc.run("demo_case", data)[0]
self.assertEqual(result["data"]["results./bmi_height"], 60)
data["results./bmi_height"] = 20
result = qc.run("demo_case", data)[0]
self.assertEqual(result["data"]["results./bmi_height"], None)
data["result./bmi_height"] = 220
result = qc.run("demo_case", data)[0]
self.assertEqual(result["data"]["results./bmi_height"], None)
data["pt./visit_date"] = "15-Apr-2010"
result = qc.run("demo_case", data)
self.assertEqual(result, [])
data["pt./visit_date"] = "15-Apr-2016"
data["pt./visit_date2"] = "15-Apr-2019"
result = qc.run("demo_case", data)[0]
self.assertEqual(result["data"]["pt./visit_date2"],
"2016-04-17T02:43:31.306860")
class ValidateDateToEpiWeekConversionTest(unittest.TestCase):
test_data_types_list = [{"date": "date_column"}]
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=test_data_types_list)
def test_validates_proper_date(self, mock):
test_row = {"date_column": "2017-01-01"}
self.assertTrue(quality_control._validate_date_to_epi_week_convertion("test_form",
test_row,
config))
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=test_data_types_list)
def test_bypass_for_missing_date(self, mock):
test_row = {"date_column": ''}
self.assertFalse(quality_control._validate_date_to_epi_week_convertion("test_form",
test_row,
config))
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=test_data_types_list)
def test_bypass_and_logs_incorrect_date(self, mock):
test_row = {"deviceid": "fake_me", "date_column": '31 Feb 2011'}
with self.assertLogs(logger=meerkat_abacus.logger, level='DEBUG') as logs:
quality_control._validate_date_to_epi_week_convertion("test_form", test_row,
config)
self.assertTrue(len(logs.output))
self.assertIn("Failed to process date column for row with device_id: fake_me", logs.output[0])
multiple_data_types_single_date = [
{
"db_column": "condition1",
"condition": "valid",
"date": "same_date"
},
{
"date": "same_date"
}
]
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=multiple_data_types_single_date)
def test_dates_should_be_tested_once(self, mock):
test_row = {
"condition1": "valid",
"same_date": "June 14, 2015"
}
with patch.object(quality_control, 'epi_week_for_date') as mock:
quality_control._validate_date_to_epi_week_convertion("test_form", test_row,
param_config=config)
mock.assert_called_once()
mock.assert_called_with(datetime(2015, 6, 14), param_config=config.country_config)
test_epi_config = ({2015: datetime(2015, 3, 5)},)
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=test_data_types_list)
def test_bypass_if_date_out_of_custom_epi_config(self, data_types_mock):
test_row = {"deviceid": "fake_me", "date_column": "03-05-2014"}
config.country_config["epi_week"] = self.test_epi_config[0]
with self.assertLogs(logger=meerkat_abacus.logger, level='DEBUG') as logs:
quality_control._validate_date_to_epi_week_convertion("test_form", test_row,
param_config=config)
self.assertTrue(len(logs.output))
print(logs)
self.assertIn("Failed to process date column for row with device_id: fake_me", logs.output[0])
test_multiple_data_types = [
{
"db_column": "condition1",
"condition": "valid",
"date": "first_date"
},
{
"db_column": "condition2",
"condition": "valid",
"date": "second_date"
}
]
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=test_multiple_data_types)
def test_multiple_data_types_with_valid_dates(self, mock):
test_row = {
"condition1": "valid",
"first_date": "May 5,2015",
"condition2": "valid",
"second_date": "June 14, 2015"
}
config.country_config["epi_week"] = self.test_epi_config[0]
self.assertTrue(quality_control._validate_date_to_epi_week_convertion("test_form",
test_row,
param_config=config))
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=test_multiple_data_types)
def test_multiple_data_types_fails_if_single_date_invalid(self, mock):
test_row = {
"condition1": "valid",
"first_date": "May 5,2015",
"condition2": "valid",
"second_date": "June 14, 2014"
}
config.country_config["epi_week"] = self.test_epi_config[0]
self.assertFalse(quality_control._validate_date_to_epi_week_convertion("test_form",
test_row,
param_config=config))
data_types_mixed_condition = [
{
"db_column": "condition1",
"condition": "valid",
"date": "first_date"
},
{
"date": "second_date"
}
]
@patch('meerkat_abacus.util.epi_week.epi_year_start_date.__defaults__', new=test_epi_config)
@patch('meerkat_abacus.util.epi_week.epi_year_by_date.__defaults__', new=test_epi_config)
@patch.object(quality_control.data_types, 'data_types_for_form_name', return_value=data_types_mixed_condition)
def test_multiple_data_types_passes_for_mixed_conditions(self, mock):
test_row = {
"condition1": "valid",
"first_date": "May 5,2015",
"second_date": "June 14, 2015"
}
self.assertTrue(quality_control._validate_date_to_epi_week_convertion("test_form", test_row,
config))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "a42d8ae8cb0d724009da7a17c43eb2ce",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 119,
"avg_line_length": 40.05531914893617,
"alnum_prop": 0.5336237118878148,
"repo_name": "meerkat-code/meerkat_abacus",
"id": "12128b85e9cbbf1ba94787e00807c5095caf27fd",
"size": "9413",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "meerkat_abacus/pipeline_worker/tests/test_quality_control.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "269668"
},
{
"name": "Shell",
"bytes": "933"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_lamp_tatt_s01_lit.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_lamp_table_tatt_s01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "498d82c031875dbc549fdf0184a33fef",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.696319018404908,
"repo_name": "anhstudios/swganh",
"id": "d4ceea2d757392879bcb437b695499d62c76af9e",
"size": "471",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/furniture/all/shared_frn_all_lamp_tatt_s01_lit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import datetime
from database.event_query import TeamYearEventsQuery
from database.match_query import TeamEventMatchesQuery
from helpers.event_helper import EventHelper
from helpers.event_team_status_helper import EventTeamStatusHelper
from helpers.match_helper import MatchHelper
from models.event_team import EventTeam
from models.team import Team
class APIAIHelper(object):
ACTION_MAP = {
'getteam.generic': '_getteam_generic',
'getteam.fallback': '_getteam_fallback',
'getteam.location': '_getteam_location',
'getteam.rookieyear': '_getteam_rookieyear',
'getteam.status': '_getteam_status',
'getteam.nextmatch': '_getteam_nextmatch',
}
@classmethod
def process_request(cls, request):
action = request['result']['action']
return getattr(APIAIHelper, cls.ACTION_MAP.get(action, '_unknown_action'))(request)
@classmethod
def _team_number_tts(cls, team_number):
if not team_number.isdigit(): # For handling invalid team numbers
return team_number
team_number = int(team_number)
if team_number < 10:
return team_number
if team_number % 100 == 0:
return team_number
team_number_str = str(team_number)
if len(team_number_str) % 2 == 0:
tts = ''
start_idx = 0
else:
tts = '{} '.format(team_number_str[0])
start_idx = 1
return tts + ' '.join([team_number_str[i:i+2] for i in range(start_idx, len(team_number_str), 2)])
@classmethod
def _create_simple_response(cls, display_text, tts=None):
return [{
'type': 0,
'speech': display_text,
},
{
'type': 'simple_response',
'platform': 'google',
'displayText': display_text,
'textToSpeech': tts if tts else display_text,
}]
# Currently Unused
# @classmethod
# def _create_basic_card(cls, title, subtitle, buttons):
# return [{
# 'type': 'basic_card',
# 'platform': 'google',
# 'title': title,
# 'subtitle': subtitle,
# 'formattedText': text, # Only required field
# 'image': {
# 'url': image_url,
# },
# 'buttons': [
# {
# 'title': link_title,
# 'openUrlAction': {
# 'url': link_url,
# }
# }
# ],
# }]
@classmethod
def _create_suggestion_chips(cls, suggestions):
return [{
'type': 'suggestion_chips',
'platform': 'google',
'suggestions': [{'title': suggestion} for suggestion in suggestions]
}]
@classmethod
def _create_link_chip(cls, text, url):
return [{
'type': 'link_out_chip',
'platform': 'google',
'destinationName': text,
'url': url,
}]
@classmethod
def _unknown_action(cls, request):
text = 'Whoops, something went wrong. Please ask me something else.'
return {
'speech': text,
'messages': cls._create_simple_response(text)
}
@classmethod
def _getteam_generic(cls, request):
team_number = request['result']['parameters']['team_number']
team = Team.get_by_id('frc{}'.format(team_number))
if team:
fmt = 'What would you like to know about Team {0}? I can tell you about their next match, how they are currently doing, or generic information like their location or rookie year.'
add_messages = cls._create_suggestion_chips([
'Next match',
'Current status',
'Location',
'Rookie year',
])
else:
fmt = 'Team {0} does not exist. Please ask about another team.'
add_messages = []
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
return {
'speech': text,
'messages': cls._create_simple_response(text, tts=tts) + add_messages
}
@classmethod
def _getteam_fallback(cls, request):
team_number = None
for context in request['result']['contexts']:
if context['name'] == 'getteam':
team_number = context['parameters']['team_number']
break
team = Team.get_by_id('frc{}'.format(team_number))
if team:
fmt = 'Sorry, I don\'t understand your question about Team {0}. Try asking about their next match, status, location, or rookie year.'
else:
fmt = 'Team {0} does not exist. Please ask about another team.'
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
return {
'speech': text,
'messages': cls._create_simple_response(text, tts=tts) +
cls._create_suggestion_chips([
'Next match',
'Current status',
'Location',
'Rookie year',
])
}
@classmethod
def _getteam_location(cls, request):
team_number = request['result']['parameters']['team_number']
team = Team.get_by_id('frc{}'.format(team_number))
if team:
fmt = 'Team {0} is from {1}. Would you like to know more about {0} or another team?'
text = fmt.format(
team_number, team.city_state_country)
tts = fmt.format(
cls._team_number_tts(team_number), team.city_state_country)
messages = cls._create_simple_response(text, tts=tts) + \
cls._create_suggestion_chips([
'Next match',
'Current status',
'Rookie year',
'Another team',
'No thanks',
])
else:
fmt = 'Team {0} does not exist. Please ask about another team.'
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
messages = cls._create_simple_response(text, tts=tts)
return {
'speech': text,
'messages': messages,
}
@classmethod
def _getteam_rookieyear(cls, request):
team_number = request['result']['parameters']['team_number']
team = Team.get_by_id('frc{}'.format(team_number))
if team:
fmt = 'Team {0} first competed in {1}. Would you like to know more about {0} or another team?'
text = fmt.format(
team_number, team.rookie_year)
tts = fmt.format(
cls._team_number_tts(team_number), team.rookie_year)
messages = cls._create_simple_response(text, tts=tts) + \
cls._create_suggestion_chips([
'Next match',
'Current status',
'Location',
'Another team',
'No thanks',
])
else:
fmt = 'Team {0} does not exist. Please ask about another team.'
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
messages = cls._create_simple_response(text, tts=tts)
return {
'speech': text,
'messages': messages,
}
@classmethod
def _getteam_status(cls, request):
team_number = request['result']['parameters']['team_number']
team_key = 'frc{}'.format(team_number)
team = Team.get_by_id(team_key)
if team:
events = TeamYearEventsQuery(team_key, datetime.datetime.now().year).fetch()
current_event = None
for event in events:
if event.now:
current_event = event
if current_event:
event_team = EventTeam.get_by_id('{}_{}'.format(current_event.key.id(), team_key))
text = EventTeamStatusHelper.generate_team_at_event_status_string(
team_key, event_team.status, formatting=False, event=current_event)
tts = 'Team {} {}'.format(
cls._team_number_tts(team_number),
EventTeamStatusHelper.generate_team_at_event_status_string(
team_key, event_team.status, formatting=False, event=current_event, include_team=False, verbose=True))
additional_prompt = ' Would you like to know more about {} or another team?'.format(team_number)
text += additional_prompt
tts += additional_prompt
messages = cls._create_simple_response(text, tts=tts) +\
cls._create_link_chip(current_event.display_name, 'https://www.thebluealliance.com/event/{}'.format(current_event.key.id()))
else:
fmt = 'Team {0} is not currently competing. Would you like to know more about {0} or another team?'
text = fmt.format(
team_number)
tts = fmt.format(
cls._team_number_tts(team_number))
messages = cls._create_simple_response(text, tts=tts)
messages += cls._create_suggestion_chips([
'Next match',
'Location',
'Rookie year',
'Another team',
'No thanks',
])
else:
fmt = 'Team {0} does not exist. Please ask about another team.'
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
messages = cls._create_simple_response(text, tts=tts)
return {
'speech': text,
'messages': messages,
}
@classmethod
def _getteam_nextmatch(cls, request):
team_number = request['result']['parameters']['team_number']
team_key = 'frc{}'.format(team_number)
team = Team.get_by_id(team_key)
if team:
events = TeamYearEventsQuery(team_key, datetime.datetime.now().year).fetch()
EventHelper.sort_events(events)
# Find first current or future event
for event in events:
if event.now:
matches = TeamEventMatchesQuery(team_key, event.key.id()).fetch()
matches = MatchHelper.play_order_sort_matches(matches)
if matches:
next_match = None
for match in matches:
if not match.has_been_played:
next_match = match
break
if next_match is not None:
if match.predicted_time:
eta = match.predicted_time - datetime.datetime.now()
eta_str = None
if eta < datetime.timedelta(minutes=5):
fmt = 'Team {0} will be playing in {1} soon at the {3}.'
else:
eta_str = ''
days = eta.days
hours, rem = divmod(eta.seconds, 3600)
minutes, _ = divmod(rem, 60)
if days:
eta_str += ' {} day{}'.format(days, '' if days == 1 else 's')
if hours:
eta_str += ' {} hour{}'.format(hours, '' if hours == 1 else 's')
if minutes:
eta_str += ' {} minute{}'.format(minutes, '' if minutes == 1 else 's')
fmt = 'Team {0} will be playing in {1} in about{2} at the {3}.'
text = fmt.format(team_number, match.verbose_name, eta_str, event.normalized_name)
tts = fmt.format(cls._team_number_tts(team_number), match.verbose_name, eta_str, event.normalized_name)
else:
fmt = 'Team {0} will be playing in {1} at the {2}.'
text = fmt.format(team_number, match.verbose_name, event.normalized_name)
tts = fmt.format(cls._team_number_tts(team_number), match.verbose_name, event.normalized_name)
add_messages = cls._create_link_chip(
match.verbose_name,
'https://www.thebluealliance.com/match/{}'.format(match.key.id()))
else:
fmt = 'Team {0} has no more scheduled matches at the {1}.'
text = fmt.format(team_number, event.normalized_name)
tts = fmt.format(cls._team_number_tts(team_number), event.normalized_name)
add_messages = []
else:
fmt = 'Team {0} has no scheduled matches at the {1}.'
text = fmt.format(team_number, event.normalized_name)
tts = fmt.format(cls._team_number_tts(team_number), event.normalized_name)
add_messages = []
break
elif event.future:
fmt = 'Team {0} will be competing at the {1} which begins on {2}.'
event_date = event.start_date.strftime("%B %d")
text = fmt.format(team_number, event.normalized_name, event_date)
tts = fmt.format(cls._team_number_tts(team_number), event.normalized_name, event_date)
add_messages = cls._create_link_chip(
'event page', 'https://www.thebluealliance.com/event/{}'.format(event.key.id()))
break
else:
fmt = 'Team {0} is not registered for any more events this season.'
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
add_messages = []
fmt = ' Would you like to know more about {} or another team?'
text += fmt.format(team_number)
tts += fmt.format(cls._team_number_tts(team_number))
add_messages += cls._create_suggestion_chips([
'Current status',
'Location',
'Rookie year',
'Another team',
'No thanks',
])
else:
fmt = 'Team {0} does not exist. Please ask about another team.'
text = fmt.format(team_number)
tts = fmt.format(cls._team_number_tts(team_number))
add_messages = []
return {
'speech': text,
'messages': cls._create_simple_response(text, tts=tts) + add_messages,
}
| {
"content_hash": "107a135c007f428395d92ad062568766",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 191,
"avg_line_length": 42.285318559556785,
"alnum_prop": 0.49754339993449065,
"repo_name": "phil-lopreiato/the-blue-alliance",
"id": "34c87f0e8bf461a0791c491383eb52297faf9822",
"size": "15265",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "helpers/apiai_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "342115"
},
{
"name": "Dockerfile",
"bytes": "1806"
},
{
"name": "HTML",
"bytes": "923112"
},
{
"name": "JavaScript",
"bytes": "519596"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2829552"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "15899"
}
],
"symlink_target": ""
} |
from kolla.tests import base
class ConfigTest(base.TestCase):
config_file = 'default.conf'
def test_debug_opt(self):
self.assertTrue(self.conf.debug)
| {
"content_hash": "b2033df76f3f7a47c5214a62166fd25b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 21.125,
"alnum_prop": 0.6982248520710059,
"repo_name": "limamauricio/mykolla",
"id": "28a9c8d8de1c177c5c932261d3232aec67af2382",
"size": "714",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "kolla/tests/common/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114463"
},
{
"name": "Ruby",
"bytes": "8268"
},
{
"name": "Shell",
"bytes": "30048"
}
],
"symlink_target": ""
} |
"""
WSGI config for my_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_website.settings")
application = get_wsgi_application()
| {
"content_hash": "132d9ac67adf6c29305e2717dc2afa54",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.8125,
"alnum_prop": 0.7682619647355163,
"repo_name": "meslater1030/my-website",
"id": "44a4db7ae855e5bfb93e5d7e58148699060c9691",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_website/my_website/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1085"
},
{
"name": "HTML",
"bytes": "4266"
},
{
"name": "Python",
"bytes": "5563"
}
],
"symlink_target": ""
} |
import hashlib
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import MagicMock, patch, mock_open, call
import pytest
import requests
import synapseclient.core.constants.concrete_types as concrete_types
import synapseclient.core.multithread_download as multithread_download
from synapseclient import File, Synapse
from synapseclient.core import sts_transfer
from synapseclient import client
from synapseclient.core import utils
from synapseclient.core.exceptions import SynapseHTTPError, SynapseMd5MismatchError, SynapseError, \
SynapseFileNotFoundError
# a callable that mocks the requests.get function
class MockRequestGetFunction(object):
def __init__(self, responses):
self.responses = responses
self.i = 0
def __call__(self, *args, **kwargs):
response = self.responses[self.i]
self.i += 1
return response
# a class to iterate bogus content
class IterateContents(object):
def __init__(self, contents, buffer_size, partial_start=0, partial_end=None):
self.contents = contents
self.buffer_size = buffer_size
self.i = partial_start
self.partial_end = partial_end
self.bytes_iterated = 0
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
if self.i >= len(self.contents):
raise StopIteration()
if self.partial_end and self.i >= self.partial_end:
raise requests.exceptions.ChunkedEncodingError("Simulated partial download! Connection reset by peer!")
start = self.i
end = min(self.i + self.buffer_size, len(self.contents))
if self.partial_end:
end = min(end, self.partial_end)
self.i = end
data = self.contents[start:end].encode('utf-8')
self.bytes_iterated += len(data)
return data
def total_bytes_iterated(self):
return self.bytes_iterated
def create_mock_response(url, response_type, **kwargs):
response = MagicMock()
response.request.url = url
response.request.method = kwargs.get('method', 'GET')
response.request.headers = {}
response.request.body = None
if response_type == "redirect":
response.status_code = 301
response.headers = {'location': kwargs['location']}
elif response_type == "error":
response.status_code = kwargs.get('status_code', 500)
response.reason = kwargs.get('reason', 'fake reason')
response.text = '{{"reason":"{}"}}'.format(kwargs.get('reason', 'fake reason'))
response.json = lambda: json.loads(response.text)
elif response_type == "stream":
response.status_code = kwargs.get('status_code', 200)
response.headers = {
'content-disposition': 'attachment; filename="fname.ext"',
'content-type': 'application/octet-stream',
'content-length': len(response.text)
}
def _create_iterator(buffer_size):
response._content_iterator = IterateContents(kwargs['contents'],
kwargs['buffer_size'],
kwargs.get('partial_start', 0),
kwargs.get('partial_end', None))
return response._content_iterator
response.iter_content = _create_iterator
response.raw.tell = lambda: response._content_iterator.total_bytes_iterated()
else:
response.status_code = 200
response.text = kwargs['text']
response.json = lambda: json.loads(response.text)
response.headers = {
'content-type': 'application/json',
'content-length': len(response.text)
}
return response
def mock_generate_headers(self, headers=None):
return {}
def test_mock_download(syn):
temp_dir = tempfile.gettempdir()
fileHandleId = "42"
objectId = "syn789"
objectType = "FileEntity"
# make bogus content
contents = "\n".join(str(i) for i in range(1000))
# compute MD5 of contents
m = hashlib.md5()
m.update(contents.encode('utf-8'))
contents_md5 = m.hexdigest()
url = "https://repo-prod.prod.sagebase.org/repo/v1/entity/syn6403467/file"
# 1. No redirects
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "stream", contents=contents, buffer_size=1024)
])
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers):
syn._download_from_URL(url, destination=temp_dir, fileHandleId=12345, expected_md5=contents_md5)
# 2. Multiple redirects
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "redirect", location="https://fakeurl.com/asdf"),
create_mock_response(url, "redirect", location="https://fakeurl.com/qwer"),
create_mock_response(url, "stream", contents=contents, buffer_size=1024)
])
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers):
syn._download_from_URL(url, destination=temp_dir, fileHandleId=12345, expected_md5=contents_md5)
# 3. recover from partial download
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "redirect", location="https://fakeurl.com/asdf"),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_end=len(contents) // 7 * 3,
status_code=200),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_start=len(contents) // 7 * 3,
partial_end=len(contents) // 7 * 5, status_code=206),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_start=len(contents) // 7 * 5,
status_code=206)
])
_getFileHandleDownload_return_value = {'preSignedURL': url,
'fileHandle': {'id': 12345, 'contentMd5': contents_md5,
'concreteType': concrete_types.S3_FILE_HANDLE}}
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(Synapse, '_getFileHandleDownload', return_value=_getFileHandleDownload_return_value), \
patch.object(sts_transfer, "is_storage_location_sts_enabled", return_value=False):
syn._downloadFileHandle(fileHandleId, objectId, objectType, destination=temp_dir)
# 4. as long as we're making progress, keep trying
responses = [
create_mock_response(url, "redirect", location="https://fakeurl.com/asdf"),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_start=0,
partial_end=len(contents) // 11, status_code=200)
]
for i in range(1, 12):
responses.append(
create_mock_response(url, "stream", contents=contents, buffer_size=1024,
partial_start=len(contents) // 11 * i,
partial_end=len(contents) // 11 * (i + 1), status_code=206))
mock_requests_get = MockRequestGetFunction(responses)
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(Synapse, '_getFileHandleDownload', return_value=_getFileHandleDownload_return_value), \
patch.object(sts_transfer, "is_storage_location_sts_enabled", return_value=False):
syn._downloadFileHandle(fileHandleId, objectId, objectType, destination=temp_dir)
# 5. don't recover, a partial download that never completes
# should eventually throw an exception
responses = [
create_mock_response(url, "redirect", location="https://fakeurl.com/asdf"),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_start=0,
partial_end=len(contents) // 11, status_code=200),
]
for i in range(1, 10):
responses.append(
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_start=len(contents) // 11,
partial_end=len(contents) // 11, status_code=200))
mock_requests_get = MockRequestGetFunction(responses)
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(Synapse, '_getFileHandleDownload', return_value=_getFileHandleDownload_return_value):
pytest.raises(Exception,
syn._downloadFileHandle, fileHandleId, objectId, objectType, destination=temp_dir)
# 6. 206 Range header not supported, respond with 200 and full file
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "redirect", location="https://fakeurl.com/asdf"),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial=len(contents) // 7 * 3,
status_code=200),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, status_code=200)
])
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(Synapse, '_getFileHandleDownload', return_value=_getFileHandleDownload_return_value), \
patch.object(sts_transfer, "is_storage_location_sts_enabled", return_value=False):
syn._downloadFileHandle(fileHandleId, objectId, objectType, destination=temp_dir)
# 7. Too many redirects
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "redirect", location="https://fakeurl.com/asdf") for i in range(100)])
# patch requests.get and also the method that generates signed
# headers (to avoid having to be logged in to Synapse)
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(Synapse, '_getFileHandleDownload', return_value=_getFileHandleDownload_return_value), \
patch.object(sts_transfer, "is_storage_location_sts_enabled", return_value=False):
pytest.raises(SynapseHTTPError, syn._downloadFileHandle, fileHandleId, objectId, objectType,
destination=temp_dir)
class Test__downloadFileHandle(unittest.TestCase):
@pytest.fixture(autouse=True, scope='function')
def init_syn(self, syn):
self.syn = syn
def tearDown(self) -> None:
self.syn.multi_threaded = False
def test_multithread_true__S3_fileHandle(self):
with patch.object(os, "makedirs"), \
patch.object(self.syn, "_getFileHandleDownload") as mock_getFileHandleDownload, \
patch.object(self.syn, "_download_from_url_multi_threaded") as mock_multi_thread_download, \
patch.object(self.syn, "cache"):
mock_getFileHandleDownload.return_value = {
'fileHandle': {
'id': '123',
'concreteType': concrete_types.S3_FILE_HANDLE,
'contentMd5': 'someMD5',
'contentSize': multithread_download.SYNAPSE_DEFAULT_DOWNLOAD_PART_SIZE + 1,
}
}
self.syn.multi_threaded = True
self.syn._downloadFileHandle(
fileHandleId=123,
objectId=456,
objectType="FileEntity",
destination="/myfakepath",
)
mock_multi_thread_download.assert_called_once_with(123, 456, "FileEntity", "/myfakepath",
expected_md5="someMD5")
def _multithread_not_applicable(self, file_handle):
with patch.object(os, "makedirs"), \
patch.object(self.syn, "_getFileHandleDownload") as mock_getFileHandleDownload, \
patch.object(self.syn, "_download_from_URL") as mock_download_from_URL, \
patch.object(self.syn, "cache"), \
patch.object(sts_transfer, "is_storage_location_sts_enabled", return_value=False):
mock_getFileHandleDownload.return_value = {
'fileHandle': file_handle,
'preSignedURL': 'asdf.com'
}
# multi_threaded/max_threads will have effect
self.syn.multi_threaded = True
self.syn._downloadFileHandle(
fileHandleId=123,
objectId=456,
objectType="FileEntity",
destination="/myfakepath"
)
mock_download_from_URL.assert_called_once_with("asdf.com", "/myfakepath", "123", expected_md5="someMD5")
def test_multithread_True__other_file_handle_type(self):
"""Verify that even if multithreaded is enabled we won't use it for unsupported file types"""
file_handle = {
'id': '123',
'concreteType': "someFakeConcreteType",
'contentMd5': 'someMD5'
}
self._multithread_not_applicable(file_handle)
def test_multithread_false__S3_fileHandle__small_file(self):
"""Verify that even if multithreaded is enabled we still won't use a multithreaded
download if the file is not large enough to make it worthwhile"""
file_handle = {
'id': '123',
'concreteType': concrete_types.S3_FILE_HANDLE,
'contentMd5': 'someMD5',
'contentSize': multithread_download.SYNAPSE_DEFAULT_DOWNLOAD_PART_SIZE - 1
}
self._multithread_not_applicable(file_handle)
def test_multithread_false__S3_fileHandle(self):
with patch.object(os, "makedirs"), \
patch.object(self.syn, "_getFileHandleDownload") as mock_getFileHandleDownload, \
patch.object(self.syn, "_download_from_URL") as mock_download_from_URL, \
patch.object(self.syn, "cache"), \
patch.object(sts_transfer, "is_storage_location_sts_enabled", return_value=False):
mock_getFileHandleDownload.return_value = {
'fileHandle': {
'id': '123',
'concreteType': concrete_types.S3_FILE_HANDLE,
'contentMd5': 'someMD5'
},
'preSignedURL': 'asdf.com'
}
self.syn.multi_threaded = False
self.syn._downloadFileHandle(
fileHandleId=123,
objectId=456,
objectType="FileEntity",
destination="/myfakepath"
)
mock_download_from_URL.assert_called_once_with("asdf.com", "/myfakepath", "123", expected_md5="someMD5")
class Test_download_from_url_multi_threaded:
@pytest.fixture(autouse=True, scope='function')
def init_syn(self, syn):
self.syn = syn
def test_md5_mismatch(self):
with patch.object(multithread_download, "download_file"), \
patch.object(utils, "md5_for_file") as mock_md5_for_file, \
patch.object(os, "remove") as mock_os_remove, \
patch.object(shutil, "move") as mock_move:
path = os.path.abspath("/myfakepath")
mock_md5_for_file.return_value.hexdigest.return_value = "unexpetedMd5"
pytest.raises(SynapseMd5MismatchError, self.syn._download_from_url_multi_threaded, file_handle_id=123,
object_id=456, object_type="FileEntity",
destination=path, expected_md5="myExpectedMd5")
mock_os_remove.assert_called_once_with(utils.temp_download_filename(path, 123))
mock_move.assert_not_called()
def test_md5_match(self):
with patch.object(multithread_download, "download_file"), \
patch.object(utils, "md5_for_file") as mock_md5_for_file, \
patch.object(os, "remove") as mock_os_remove, \
patch.object(shutil, "move") as mock_move:
path = os.path.abspath("/myfakepath")
expected_md5 = "myExpectedMd5"
mock_md5_for_file.return_value.hexdigest.return_value = expected_md5
self.syn._download_from_url_multi_threaded(
file_handle_id=123,
object_id=456,
object_type="FileEntity",
destination=path,
expected_md5=expected_md5,
)
mock_os_remove.assert_not_called()
mock_move.assert_called_once_with(utils.temp_download_filename(path, 123), path)
def test_download_end_early_retry(syn):
"""
-------Test to ensure download retry even if connection ends early--------
"""
url = "http://www.ayy.lmao/filerino.txt"
contents = "\n".join(str(i) for i in range(1000))
destination = os.path.normpath(os.path.expanduser("~/fake/path/filerino.txt"))
temp_destination = os.path.normpath(os.path.expanduser("~/fake/path/filerino.txt.temp"))
partial_content_break = len(contents) // 7 * 3
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "stream", contents=contents[:partial_content_break], buffer_size=1024,
partial_end=len(contents),
status_code=200),
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_start=partial_content_break,
status_code=206)
])
# make the first response's 'content-type' header say it will transfer the full content even though it
# is only partially doing so
mock_requests_get.responses[0].headers['content-length'] = len(contents)
mock_requests_get.responses[1].headers['content-length'] = len(contents[partial_content_break:])
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(utils, 'temp_download_filename', return_value=temp_destination) as mocked_temp_dest, \
patch.object(client, 'open', new_callable=mock_open(), create=True) as mocked_open, \
patch.object(os.path, 'exists', side_effect=[False, True]) as mocked_exists, \
patch.object(os.path, 'getsize', return_value=partial_content_break) as mocked_getsize, \
patch.object(utils, 'md5_for_file'), \
patch.object(shutil, 'move') as mocked_move:
# function under test
syn._download_from_URL(url, destination)
# assert temp_download_filename() called 2 times with same parameters
assert [call(destination, None)] * 2 == mocked_temp_dest.call_args_list
# assert exists called 2 times
assert [call(temp_destination)] * 2 == mocked_exists.call_args_list
# assert open() called 2 times with different parameters
assert [call(temp_destination, 'wb'), call(temp_destination, 'ab')] == mocked_open.call_args_list
# assert getsize() called 2 times
# once because exists()=True and another time because response status code = 206
assert [call(temp_destination)] * 2 == mocked_getsize.call_args_list
# assert shutil.move() called 1 time
mocked_move.assert_called_once_with(temp_destination, destination)
def test_download_md5_mismatch__not_local_file(syn):
"""
--------Test to ensure file gets removed on md5 mismatch--------
"""
url = "http://www.ayy.lmao/filerino.txt"
contents = "\n".join(str(i) for i in range(1000))
destination = os.path.normpath(os.path.expanduser("~/fake/path/filerino.txt"))
temp_destination = os.path.normpath(os.path.expanduser("~/fake/path/filerino.txt.temp"))
mock_requests_get = MockRequestGetFunction([
create_mock_response(url, "stream", contents=contents, buffer_size=1024, partial_end=len(contents),
status_code=200)
])
with patch.object(syn._requests_session, 'get', side_effect=mock_requests_get), \
patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(utils, 'temp_download_filename', return_value=temp_destination) as mocked_temp_dest, \
patch.object(client, 'open', new_callable=mock_open(), create=True) as mocked_open, \
patch.object(os.path, 'exists', side_effect=[False, True]) as mocked_exists, \
patch.object(shutil, 'move') as mocked_move, \
patch.object(os, 'remove') as mocked_remove:
# function under test
pytest.raises(SynapseMd5MismatchError, syn._download_from_URL, url, destination,
expected_md5="fake md5 is fake")
# assert temp_download_filename() called once
mocked_temp_dest.assert_called_once_with(destination, None)
# assert exists called 2 times
assert [call(temp_destination), call(destination)] == mocked_exists.call_args_list
# assert open() called once
mocked_open.assert_called_once_with(temp_destination, 'wb')
# assert shutil.move() called once
mocked_move.assert_called_once_with(temp_destination, destination)
# assert file was removed
mocked_remove.assert_called_once_with(destination)
def test_download_md5_mismatch_local_file(syn):
"""
--------Test to ensure file gets removed on md5 mismatch--------
"""
url = "file:///some/file/path.txt"
destination = os.path.normpath(os.path.expanduser("~/fake/path/filerino.txt"))
with patch.object(Synapse, '_generate_headers', side_effect=mock_generate_headers), \
patch.object(utils, 'file_url_to_path', return_value=destination) as mocked_file_url_to_path, \
patch.object(utils, 'md5_for_file', return_value=hashlib.md5()) as mocked_md5_for_file, \
patch('os.remove') as mocked_remove:
# function under test
pytest.raises(SynapseMd5MismatchError, syn._download_from_URL, url, destination,
expected_md5="fake md5 is fake")
mocked_file_url_to_path.assert_called_once_with(url, verify_exists=True)
mocked_md5_for_file.assert_called_once_with(destination)
# assert file was NOT removed
assert not mocked_remove.called
def test_download_file_entity__correct_local_state(syn):
mock_cache_path = utils.normalize_path("/i/will/show/you/the/path/yi.txt")
file_entity = File(parentId="syn123")
file_entity.dataFileHandleId = 123
with patch.object(syn.cache, 'get', return_value=mock_cache_path):
syn._download_file_entity(downloadLocation=None, entity=file_entity, ifcollision="overwrite.local",
submission=None)
assert mock_cache_path == utils.normalize_path(file_entity.path)
assert os.path.dirname(mock_cache_path) == file_entity.cacheDir
assert 1 == len(file_entity.files)
assert os.path.basename(mock_cache_path) == file_entity.files[0]
def test_getFileHandleDownload__error_UNAUTHORIZED(syn):
ret_val = {'requestedFiles': [{'failureCode': 'UNAUTHORIZED', }]}
with patch.object(syn, "restPOST", return_value=ret_val):
pytest.raises(SynapseError, syn._getFileHandleDownload, '123', 'syn456')
def test_getFileHandleDownload__error_NOT_FOUND(syn):
ret_val = {'requestedFiles': [{'failureCode': 'NOT_FOUND', }]}
with patch.object(syn, "restPOST", return_value=ret_val):
pytest.raises(SynapseFileNotFoundError, syn._getFileHandleDownload, '123', 'syn456')
| {
"content_hash": "b410e888e632b94150bafe81678dcd04",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 119,
"avg_line_length": 46.353159851301115,
"alnum_prop": 0.6310048921324886,
"repo_name": "thomasyu888/synapsePythonClient",
"id": "f94c950a8c046268a8640209d8c8f6b1b75c22f3",
"size": "24938",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/synapseclient/core/unit_test_download.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "Python",
"bytes": "1573386"
}
],
"symlink_target": ""
} |
from models import WorkItem, ProcessInstance
class ActivityState:
blocked = 0
inactive = 0
active = 0
suspended = 0
fallout = 0
complete = 0
total = 0
def __init__(self, activity):
wis = WorkItems.objects.filter(activity=activity)
self.total = wis.count()
self.blocked = wis.filter(status='blocked').count()
self.inactive = wis.filter(status='inactive').count()
self.active = wis.filter(status='active').count()
self.fallout = wis.filter(status='fallout').count()
self.complete = wis.filter(status='complete').count()
class ProcessState:
initiated = 0
running = 0
active = 0
complete = 0
terminated = 0
suspended = 0
total = 0
def __init__(self, process):
insts = ProcessInstance.objects.filter(process=process)
self.total = insts.count()
self.initiated = insts.filter(status='initiated').count()
self.running = insts.filter(status='running').count()
self.active = insts.filter(status='active').count()
self.complete = insts.filter(status='complete').count()
self.terminated = insts.filter(status='terminated').count()
self.suspended = insts.filter(status='suspended').count()
class ActivityStats:
number = 0
time_min = None
time_max = None
time_mean = None
def __init__(self, activity, user=None, year=None, month=None, day=None, datetime_interval=None):
pass
| {
"content_hash": "895255f909712cb1367e20fa00e41c36",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 101,
"avg_line_length": 33.54545454545455,
"alnum_prop": 0.6321138211382114,
"repo_name": "glasslion/djflow",
"id": "73886f0af7600663fce4b3297d563de336a20a6d",
"size": "1524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djflow/runtime/reporting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "179"
},
{
"name": "CSS",
"bytes": "2056"
},
{
"name": "HTML",
"bytes": "88891"
},
{
"name": "JavaScript",
"bytes": "80177"
},
{
"name": "Python",
"bytes": "134076"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
from models import *
class IfacesApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getIfaces(self, **kwargs):
"""
Read all Interface
Args:
relations, bool: If set to true, all relations of the Fact Sheet are fetched as well. Fetching all relations can be slower. Default: false. (optional)
filter, str: Full-text filter (optional)
Returns: Array[Iface]
"""
allParams = ['relations', 'filter']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getIfaces" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('relations' in params):
queryParams['relations'] = self.apiClient.toPathValue(params['relations'])
if ('filter' in params):
queryParams['filter'] = self.apiClient.toPathValue(params['filter'])
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[Iface]')
return responseObject
def createIface(self, **kwargs):
"""
Create a new Interface
Args:
body, Iface: Message-Body (optional)
Returns: Iface
"""
allParams = ['body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createIface" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Iface')
return responseObject
def getIface(self, ID, **kwargs):
"""
Read a Interface by a given ID
Args:
ID, str: Unique ID (required)
relations, bool: If set to true, all relations of the Fact Sheet are fetched as well. Fetching all relations can be slower. Default: false. (optional)
Returns: Iface
"""
allParams = ['ID', 'relations']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getIface" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('relations' in params):
queryParams['relations'] = self.apiClient.toPathValue(params['relations'])
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Iface')
return responseObject
def updateIface(self, ID, **kwargs):
"""
Update a Interface by a given ID
Args:
ID, str: Unique ID (required)
body, Iface: Message-Body (optional)
Returns: Iface
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateIface" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Iface')
return responseObject
def deleteIface(self, ID, **kwargs):
"""
Delete a Interface by a given ID
Args:
ID, str: Unique ID (required)
Returns:
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteIface" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasParents(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasParent]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasParents" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasParents'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasParent]')
return responseObject
def createFactSheetHasParent(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasParent: Message-Body (optional)
Returns: FactSheetHasParent
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasParent" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasParents'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasParent')
return responseObject
def getFactSheetHasParent(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasParent
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasParent" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasParents/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasParent')
return responseObject
def updateFactSheetHasParent(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasParent: Message-Body (optional)
Returns: FactSheetHasParent
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasParent" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasParents/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasParent')
return responseObject
def deleteFactSheetHasParent(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasParent" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasParents/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasChildren(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasChild]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasChildren" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasChildren'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasChild]')
return responseObject
def createFactSheetHasChild(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasChild: Message-Body (optional)
Returns: FactSheetHasChild
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasChild" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasChildren'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasChild')
return responseObject
def getFactSheetHasChild(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasChild
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasChild" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasChildren/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasChild')
return responseObject
def updateFactSheetHasChild(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasChild: Message-Body (optional)
Returns: FactSheetHasChild
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasChild" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasChildren/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasChild')
return responseObject
def deleteFactSheetHasChild(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasChild" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasChildren/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasDocuments(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasDocument]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasDocuments" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasDocuments'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasDocument]')
return responseObject
def createFactSheetHasDocument(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasDocument: Message-Body (optional)
Returns: FactSheetHasDocument
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasDocuments'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasDocument')
return responseObject
def getFactSheetHasDocument(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasDocument
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasDocuments/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasDocument')
return responseObject
def updateFactSheetHasDocument(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasDocument: Message-Body (optional)
Returns: FactSheetHasDocument
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasDocuments/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasDocument')
return responseObject
def deleteFactSheetHasDocument(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasDocuments/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasLifecycles(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasLifecycle]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasLifecycles" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasLifecycles'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasLifecycle]')
return responseObject
def createFactSheetHasLifecycle(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasLifecycle: Message-Body (optional)
Returns: FactSheetHasLifecycle
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasLifecycle" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasLifecycles'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasLifecycle')
return responseObject
def getFactSheetHasLifecycle(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasLifecycle
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasLifecycle" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasLifecycles/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasLifecycle')
return responseObject
def updateFactSheetHasLifecycle(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasLifecycle: Message-Body (optional)
Returns: FactSheetHasLifecycle
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasLifecycle" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasLifecycles/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasLifecycle')
return responseObject
def deleteFactSheetHasLifecycle(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasLifecycle" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasLifecycles/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getUserSubscriptions(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[UserSubscription]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getUserSubscriptions" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/userSubscriptions'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[UserSubscription]')
return responseObject
def createUserSubscription(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, UserSubscription: Message-Body (optional)
Returns: UserSubscription
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createUserSubscription" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/userSubscriptions'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'UserSubscription')
return responseObject
def getUserSubscription(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: UserSubscription
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getUserSubscription" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/userSubscriptions/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'UserSubscription')
return responseObject
def updateUserSubscription(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, UserSubscription: Message-Body (optional)
Returns: UserSubscription
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateUserSubscription" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/userSubscriptions/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'UserSubscription')
return responseObject
def deleteUserSubscription(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteUserSubscription" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/userSubscriptions/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasPredecessors(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasPredecessor]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasPredecessors" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasPredecessors'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasPredecessor]')
return responseObject
def createFactSheetHasPredecessor(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasPredecessor: Message-Body (optional)
Returns: FactSheetHasPredecessor
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasPredecessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasPredecessors'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasPredecessor')
return responseObject
def getFactSheetHasPredecessor(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasPredecessor
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasPredecessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasPredecessors/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasPredecessor')
return responseObject
def updateFactSheetHasPredecessor(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasPredecessor: Message-Body (optional)
Returns: FactSheetHasPredecessor
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasPredecessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasPredecessors/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasPredecessor')
return responseObject
def deleteFactSheetHasPredecessor(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasPredecessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasPredecessors/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasSuccessors(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasSuccessor]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasSuccessors" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasSuccessors'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasSuccessor]')
return responseObject
def createFactSheetHasSuccessor(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasSuccessor: Message-Body (optional)
Returns: FactSheetHasSuccessor
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasSuccessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasSuccessors'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasSuccessor')
return responseObject
def getFactSheetHasSuccessor(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasSuccessor
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasSuccessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasSuccessors/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasSuccessor')
return responseObject
def updateFactSheetHasSuccessor(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasSuccessor: Message-Body (optional)
Returns: FactSheetHasSuccessor
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasSuccessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasSuccessors/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasSuccessor')
return responseObject
def deleteFactSheetHasSuccessor(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasSuccessor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasSuccessors/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasRequiresAll(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasRequires]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasRequiresAll" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequires'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasRequires]')
return responseObject
def createFactSheetHasRequires(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasRequires: Message-Body (optional)
Returns: FactSheetHasRequires
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasRequires" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequires'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasRequires')
return responseObject
def getFactSheetHasRequires(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasRequires
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasRequires" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequires/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasRequires')
return responseObject
def updateFactSheetHasRequires(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasRequires: Message-Body (optional)
Returns: FactSheetHasRequires
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasRequires" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequires/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasRequires')
return responseObject
def deleteFactSheetHasRequires(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasRequires" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequires/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasRequiredByAll(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasRequiredby]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasRequiredByAll" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequiredby'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasRequiredby]')
return responseObject
def createFactSheetHasRequiredby(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasRequiredby: Message-Body (optional)
Returns: FactSheetHasRequiredby
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasRequiredby" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequiredby'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasRequiredby')
return responseObject
def getFactSheetHasRequiredby(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasRequiredby
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasRequiredby" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequiredby/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasRequiredby')
return responseObject
def updateFactSheetHasRequiredby(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasRequiredby: Message-Body (optional)
Returns: FactSheetHasRequiredby
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasRequiredby" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequiredby/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasRequiredby')
return responseObject
def deleteFactSheetHasRequiredby(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasRequiredby" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasRequiredby/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasIfaceProviders(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasIfaceProvider]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasIfaceProviders" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceProviders'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasIfaceProvider]')
return responseObject
def createFactSheetHasIfaceProvider(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasIfaceProvider: Message-Body (optional)
Returns: FactSheetHasIfaceProvider
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasIfaceProvider" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceProviders'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasIfaceProvider')
return responseObject
def getFactSheetHasIfaceProvider(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasIfaceProvider
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasIfaceProvider" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceProviders/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasIfaceProvider')
return responseObject
def updateFactSheetHasIfaceProvider(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasIfaceProvider: Message-Body (optional)
Returns: FactSheetHasIfaceProvider
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasIfaceProvider" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceProviders/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasIfaceProvider')
return responseObject
def deleteFactSheetHasIfaceProvider(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasIfaceProvider" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceProviders/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getFactSheetHasIfaceConsumers(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[FactSheetHasIfaceConsumer]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasIfaceConsumers" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceConsumers'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[FactSheetHasIfaceConsumer]')
return responseObject
def createFactSheetHasIfaceConsumer(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, FactSheetHasIfaceConsumer: Message-Body (optional)
Returns: FactSheetHasIfaceConsumer
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createFactSheetHasIfaceConsumer" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceConsumers'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasIfaceConsumer')
return responseObject
def getFactSheetHasIfaceConsumer(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: FactSheetHasIfaceConsumer
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getFactSheetHasIfaceConsumer" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceConsumers/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasIfaceConsumer')
return responseObject
def updateFactSheetHasIfaceConsumer(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, FactSheetHasIfaceConsumer: Message-Body (optional)
Returns: FactSheetHasIfaceConsumer
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateFactSheetHasIfaceConsumer" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceConsumers/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FactSheetHasIfaceConsumer')
return responseObject
def deleteFactSheetHasIfaceConsumer(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteFactSheetHasIfaceConsumer" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/factSheetHasIfaceConsumers/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getIfaceHasBusinessObjects(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[IfaceHasBusinessObject]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getIfaceHasBusinessObjects" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasBusinessObjects'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[IfaceHasBusinessObject]')
return responseObject
def createIfaceHasBusinessObject(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, IfaceHasBusinessObject: Message-Body (optional)
Returns: IfaceHasBusinessObject
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createIfaceHasBusinessObject" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasBusinessObjects'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'IfaceHasBusinessObject')
return responseObject
def getIfaceHasBusinessObject(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: IfaceHasBusinessObject
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getIfaceHasBusinessObject" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasBusinessObjects/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'IfaceHasBusinessObject')
return responseObject
def updateIfaceHasBusinessObject(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, IfaceHasBusinessObject: Message-Body (optional)
Returns: IfaceHasBusinessObject
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateIfaceHasBusinessObject" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasBusinessObjects/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'IfaceHasBusinessObject')
return responseObject
def deleteIfaceHasBusinessObject(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteIfaceHasBusinessObject" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasBusinessObjects/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def getIfaceHasResources(self, ID, **kwargs):
"""
Read all of relation
Args:
ID, str: Unique ID (required)
Returns: Array[IfaceHasResource]
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getIfaceHasResources" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasResources'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[IfaceHasResource]')
return responseObject
def createIfaceHasResource(self, ID, **kwargs):
"""
Create a new relation
Args:
ID, str: Unique ID (required)
body, IfaceHasResource: Message-Body (optional)
Returns: IfaceHasResource
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createIfaceHasResource" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasResources'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'IfaceHasResource')
return responseObject
def getIfaceHasResource(self, ID, relationID, **kwargs):
"""
Read by relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns: IfaceHasResource
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getIfaceHasResource" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasResources/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'IfaceHasResource')
return responseObject
def updateIfaceHasResource(self, ID, relationID, **kwargs):
"""
Update relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
body, IfaceHasResource: Message-Body (optional)
Returns: IfaceHasResource
"""
allParams = ['ID', 'relationID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateIfaceHasResource" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasResources/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'IfaceHasResource')
return responseObject
def deleteIfaceHasResource(self, ID, relationID, **kwargs):
"""
Delete relation by a given relationID
Args:
ID, str: Unique ID (required)
relationID, str: Unique ID of the Relation (required)
Returns:
"""
allParams = ['ID', 'relationID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteIfaceHasResource" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ifaces/{ID}/ifaceHasResources/{relationID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if ('relationID' in params):
replacement = str(self.apiClient.toPathValue(params['relationID']))
resourcePath = resourcePath.replace('{' + 'relationID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
| {
"content_hash": "2ad669cd61e6e4ab916949c3e5be148c",
"timestamp": "",
"source": "github",
"line_count": 3958,
"max_line_length": 162,
"avg_line_length": 32.29636179888833,
"alnum_prop": 0.5477082665122938,
"repo_name": "leanix/leanix-sdk-python",
"id": "d50a69d4ac09e09ad16da99af8a7b23dc1570858",
"size": "127851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/leanix/IfacesApi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1555496"
},
{
"name": "Scala",
"bytes": "1911"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import absolute_import
from .. import unittest
import mock
import docker
from docker.utils import LogConfig
from compose.service import Service
from compose.container import Container
from compose.const import LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
from compose.service import (
ConfigError,
NeedsBuildError,
NoSuchImageError,
build_port_bindings,
build_volume_binding,
get_container_data_volumes,
merge_volume_bindings,
parse_repository_tag,
parse_volume_spec,
split_port,
)
class ServiceTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.Client)
def test_name_validations(self):
self.assertRaises(ConfigError, lambda: Service(name='', image='foo'))
self.assertRaises(ConfigError, lambda: Service(name=' ', image='foo'))
self.assertRaises(ConfigError, lambda: Service(name='/', image='foo'))
self.assertRaises(ConfigError, lambda: Service(name='!', image='foo'))
self.assertRaises(ConfigError, lambda: Service(name='\xe2', image='foo'))
Service('a', image='foo')
Service('foo', image='foo')
Service('foo-bar', image='foo')
Service('foo.bar', image='foo')
Service('foo_bar', image='foo')
Service('_', image='foo')
Service('___', image='foo')
Service('-', image='foo')
Service('--', image='foo')
Service('.__.', image='foo')
def test_project_validation(self):
self.assertRaises(ConfigError, lambda: Service('bar'))
self.assertRaises(ConfigError, lambda: Service(name='foo', project='>', image='foo'))
Service(name='foo', project='bar.bar__', image='foo')
def test_containers(self):
service = Service('db', self.mock_client, 'myproject', image='foo')
self.mock_client.containers.return_value = []
self.assertEqual(service.containers(), [])
def test_containers_with_containers(self):
self.mock_client.containers.return_value = [
dict(Name=str(i), Image='foo', Id=i) for i in range(3)
]
service = Service('db', self.mock_client, 'myproject', image='foo')
self.assertEqual([c.id for c in service.containers()], range(3))
expected_labels = [
'{0}=myproject'.format(LABEL_PROJECT),
'{0}=db'.format(LABEL_SERVICE),
'{0}=False'.format(LABEL_ONE_OFF),
]
self.mock_client.containers.assert_called_once_with(
all=False,
filters={'label': expected_labels})
def test_container_without_name(self):
self.mock_client.containers.return_value = [
{'Image': 'foo', 'Id': '1', 'Name': '1'},
{'Image': 'foo', 'Id': '2', 'Name': None},
{'Image': 'foo', 'Id': '3'},
]
service = Service('db', self.mock_client, 'myproject', image='foo')
self.assertEqual([c.id for c in service.containers()], ['1'])
self.assertEqual(service._next_container_number(), 2)
self.assertEqual(service.get_container(1).id, '1')
def test_get_volumes_from_container(self):
container_id = 'aabbccddee'
service = Service(
'test',
image='foo',
volumes_from=[mock.Mock(id=container_id, spec=Container)])
self.assertEqual(service._get_volumes_from(), [container_id])
def test_get_volumes_from_service_container_exists(self):
container_ids = ['aabbccddee', '12345']
from_service = mock.create_autospec(Service)
from_service.containers.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids
]
service = Service('test', volumes_from=[from_service], image='foo')
self.assertEqual(service._get_volumes_from(), container_ids)
def test_get_volumes_from_service_no_container(self):
container_id = 'abababab'
from_service = mock.create_autospec(Service)
from_service.containers.return_value = []
from_service.create_container.return_value = mock.Mock(
id=container_id,
spec=Container)
service = Service('test', image='foo', volumes_from=[from_service])
self.assertEqual(service._get_volumes_from(), [container_id])
from_service.create_container.assert_called_once_with()
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, ("127.0.0.1", "1000"))
def test_split_port_with_protocol(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
self.assertEqual(internal_port, "2000/udp")
self.assertEqual(external_port, ("127.0.0.1", "1000"))
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, ("127.0.0.1", None))
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, "1000")
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, None)
def test_split_port_invalid(self):
with self.assertRaises(ConfigError):
split_port("0.0.0.0:1000:2000:tcp")
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
def test_split_domainname_none(self):
service = Service('foo', image='foo', hostname='name', client=self.mock_client)
self.mock_client.containers.return_value = []
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name', 'hostname')
self.assertFalse('domainname' in opts, 'domainname')
def test_memory_swap_limit(self):
service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, mem_limit=1000000000, memswap_limit=2000000000)
self.mock_client.containers.return_value = []
opts = service._get_container_create_options({'some': 'overrides'}, 1)
self.assertEqual(opts['host_config']['MemorySwap'], 2000000000)
self.assertEqual(opts['host_config']['Memory'], 1000000000)
def test_log_opt(self):
log_opt = {'address': 'tcp://192.168.0.42:123'}
service = Service(name='foo', image='foo', hostname='name', client=self.mock_client, log_driver='syslog', log_opt=log_opt)
self.mock_client.containers.return_value = []
opts = service._get_container_create_options({'some': 'overrides'}, 1)
self.assertIsInstance(opts['host_config']['LogConfig'], LogConfig)
self.assertEqual(opts['host_config']['LogConfig'].type, 'syslog')
self.assertEqual(opts['host_config']['LogConfig'].config, log_opt)
def test_split_domainname_fqdn(self):
service = Service(
'foo',
hostname='name.domain.tld',
image='foo',
client=self.mock_client)
self.mock_client.containers.return_value = []
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name', 'hostname')
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
def test_split_domainname_both(self):
service = Service(
'foo',
hostname='name',
image='foo',
domainname='domain.tld',
client=self.mock_client)
self.mock_client.containers.return_value = []
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name', 'hostname')
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
def test_split_domainname_weird(self):
service = Service(
'foo',
hostname='name.sub',
domainname='domain.tld',
image='foo',
client=self.mock_client)
self.mock_client.containers.return_value = []
opts = service._get_container_create_options({'image': 'foo'}, 1)
self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
def test_get_container_not_found(self):
self.mock_client.containers.return_value = []
service = Service('foo', client=self.mock_client, image='foo')
self.assertRaises(ValueError, service.get_container)
@mock.patch('compose.service.Container', autospec=True)
def test_get_container(self, mock_container_class):
container_dict = dict(Name='default_foo_2')
self.mock_client.containers.return_value = [container_dict]
service = Service('foo', image='foo', client=self.mock_client)
container = service.get_container(number=2)
self.assertEqual(container, mock_container_class.from_ps.return_value)
mock_container_class.from_ps.assert_called_once_with(
self.mock_client, container_dict)
@mock.patch('compose.service.log', autospec=True)
def test_pull_image(self, mock_log):
service = Service('foo', client=self.mock_client, image='someimage:sometag')
service.pull()
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sometag',
stream=True)
mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...')
def test_pull_image_no_tag(self):
service = Service('foo', client=self.mock_client, image='ababab')
service.pull()
self.mock_client.pull.assert_called_once_with(
'ababab',
tag='latest',
stream=True)
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container(self, _):
mock_container = mock.create_autospec(Container)
service = Service('foo', client=self.mock_client, image='someimage')
service.image = lambda: {'Id': 'abc123'}
new_container = service.recreate_container(mock_container)
mock_container.stop.assert_called_once_with(timeout=10)
self.mock_client.rename.assert_called_once_with(
mock_container.id,
'%s_%s' % (mock_container.short_id, mock_container.name))
new_container.start.assert_called_once_with()
mock_container.remove.assert_called_once_with()
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container_with_timeout(self, _):
mock_container = mock.create_autospec(Container)
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
service = Service('foo', client=self.mock_client, image='someimage')
service.recreate_container(mock_container, timeout=1)
mock_container.stop.assert_called_once_with(timeout=1)
def test_parse_repository_tag(self):
self.assertEqual(parse_repository_tag("root"), ("root", ""))
self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag"))
self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", ""))
self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag"))
self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", ""))
self.assertEqual(parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag"))
@mock.patch('compose.service.Container', autospec=True)
def test_create_container_latest_is_used_when_no_tag_specified(self, mock_container):
service = Service('foo', client=self.mock_client, image='someimage')
images = []
def pull(repo, tag=None, **kwargs):
self.assertEqual('someimage', repo)
self.assertEqual('latest', tag)
images.append({'Id': 'abc123'})
return []
service.image = lambda *args, **kwargs: mock_get_image(images)
self.mock_client.pull = pull
service.create_container()
self.assertEqual(1, len(images))
def test_create_container_with_build(self):
service = Service('foo', client=self.mock_client, build='.')
images = []
service.image = lambda *args, **kwargs: mock_get_image(images)
service.build = lambda: images.append({'Id': 'abc123'})
service.create_container(do_build=True)
self.assertEqual(1, len(images))
def test_create_container_no_build(self):
service = Service('foo', client=self.mock_client, build='.')
service.image = lambda: {'Id': 'abc123'}
service.create_container(do_build=False)
self.assertFalse(self.mock_client.build.called)
def test_create_container_no_build_but_needs_build(self):
service = Service('foo', client=self.mock_client, build='.')
service.image = lambda *args, **kwargs: mock_get_image([])
with self.assertRaises(NeedsBuildError):
service.create_container(do_build=False)
def test_build_does_not_pull(self):
self.mock_client.build.return_value = [
'{"stream": "Successfully built 12345"}',
]
service = Service('foo', client=self.mock_client, build='.')
service.build()
self.assertEqual(self.mock_client.build.call_count, 1)
self.assertFalse(self.mock_client.build.call_args[1]['pull'])
def mock_get_image(images):
if images:
return images[0]
else:
raise NoSuchImageError()
class ServiceVolumesTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.Client)
def test_parse_volume_spec_only_one_path(self):
spec = parse_volume_spec('/the/volume')
self.assertEqual(spec, (None, '/the/volume', 'rw'))
def test_parse_volume_spec_internal_and_external(self):
spec = parse_volume_spec('external:interval')
self.assertEqual(spec, ('external', 'interval', 'rw'))
def test_parse_volume_spec_with_mode(self):
spec = parse_volume_spec('external:interval:ro')
self.assertEqual(spec, ('external', 'interval', 'ro'))
spec = parse_volume_spec('external:interval:z')
self.assertEqual(spec, ('external', 'interval', 'z'))
def test_parse_volume_spec_too_many_parts(self):
with self.assertRaises(ConfigError):
parse_volume_spec('one:two:three:four')
def test_build_volume_binding(self):
binding = build_volume_binding(parse_volume_spec('/outside:/inside'))
self.assertEqual(binding, ('/inside', '/outside:/inside:rw'))
def test_get_container_data_volumes(self):
options = [
'/host/volume:/host/volume:ro',
'/new/volume',
'/existing/volume',
]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {
'Volumes': {
'/mnt/image/data': {},
}
}
}
container = Container(self.mock_client, {
'Image': 'ababab',
'Volumes': {
'/host/volume': '/host/volume',
'/existing/volume': '/var/lib/docker/aaaaaaaa',
'/removed/volume': '/var/lib/docker/bbbbbbbb',
'/mnt/image/data': '/var/lib/docker/cccccccc',
},
}, has_been_inspected=True)
expected = {
'/existing/volume': '/var/lib/docker/aaaaaaaa:/existing/volume:rw',
'/mnt/image/data': '/var/lib/docker/cccccccc:/mnt/image/data:rw',
}
binds = get_container_data_volumes(container, options)
self.assertEqual(binds, expected)
def test_merge_volume_bindings(self):
options = [
'/host/volume:/host/volume:ro',
'/host/rw/volume:/host/rw/volume',
'/new/volume',
'/existing/volume',
]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {'Volumes': {}}
}
intermediate_container = Container(self.mock_client, {
'Image': 'ababab',
'Volumes': {'/existing/volume': '/var/lib/docker/aaaaaaaa'},
}, has_been_inspected=True)
expected = [
'/host/volume:/host/volume:ro',
'/host/rw/volume:/host/rw/volume:rw',
'/var/lib/docker/aaaaaaaa:/existing/volume:rw',
]
binds = merge_volume_bindings(options, intermediate_container)
self.assertEqual(set(binds), set(expected))
def test_mount_same_host_path_to_two_volumes(self):
service = Service(
'web',
image='busybox',
volumes=[
'/host/path:/data1',
'/host/path:/data2',
],
client=self.mock_client,
)
self.mock_client.inspect_image.return_value = {
'Id': 'ababab',
'ContainerConfig': {
'Volumes': {}
}
}
create_options = service._get_container_create_options(
override_options={},
number=1,
)
self.assertEqual(
set(create_options['host_config']['Binds']),
set([
'/host/path:/data1:rw',
'/host/path:/data2:rw',
]),
)
def test_different_host_path_in_container_json(self):
service = Service(
'web',
image='busybox',
volumes=['/host/path:/data'],
client=self.mock_client,
)
self.mock_client.inspect_image.return_value = {
'Id': 'ababab',
'ContainerConfig': {
'Volumes': {
'/data': {},
}
}
}
self.mock_client.inspect_container.return_value = {
'Id': '123123123',
'Image': 'ababab',
'Volumes': {
'/data': '/mnt/sda1/host/path',
},
}
create_options = service._get_container_create_options(
override_options={},
number=1,
previous_container=Container(self.mock_client, {'Id': '123123123'}),
)
self.assertEqual(
create_options['host_config']['Binds'],
['/mnt/sda1/host/path:/data:rw'],
)
def test_create_with_special_volume_mode(self):
self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
create_calls = []
def create_container(*args, **kwargs):
create_calls.append((args, kwargs))
return {'Id': 'containerid'}
self.mock_client.create_container = create_container
volumes = ['/tmp:/foo:z']
Service(
'web',
client=self.mock_client,
image='busybox',
volumes=volumes,
).create_container()
self.assertEqual(len(create_calls), 1)
self.assertEqual(create_calls[0][1]['host_config']['Binds'], volumes)
| {
"content_hash": "1e2aeb4d64be9a1198d94c7967529429",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 140,
"avg_line_length": 38.076190476190476,
"alnum_prop": 0.5970985492746373,
"repo_name": "mindaugasrukas/compose",
"id": "7e5266dd79af16333cf8ea154205f53c8ef3f8d1",
"size": "19990",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/service_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "295818"
},
{
"name": "Shell",
"bytes": "16006"
}
],
"symlink_target": ""
} |
import os
import platform
import time
import random
global fishPoss
def bankList():
# slots 1 - 50
print("bankList")
def loadingScreen():
print(topBar)
time.sleep(0.3)
print(coderList)
time.sleep(0.3)
print(betaTesters)
time.sleep(0.3)
oloop=6
while(oloop > 0):
print('o', end="")
time.sleep(0.3)
oloop-=1
def inventoryList():
# Will store item IDs
# Format item:variation:uses:addons(fire, poison, enchantments, etc)
# Example: 1:0:100:2,5,3
slot1a = ""
slot1b = ""
slot1c = ""
slot2a = ""
slot2b = ""
slot2c = ""
slot3a = ""
slot3b = ""
slot3c = ""
slot4a = ""
slot4b = ""
slot4c = ""
slot5a = ""
slot5b = ""
slot5c = ""
slot6a = ""
slot6b = ""
slot6c = ""
slot7a = ""
slot7b = ""
slot7c = ""
slot8a = ""
slot8b = ""
slot8c = ""
slot9a = ""
slot9b = ""
slot9c = ""
# Game info
topBar = "<>-<>-<> || TextScapeRPG by S0Ndustries || <>-<>-<>"
coderList = "Coded by Evan Young & Elijah Keane"
betaTesters = "Evan, Elijah"
#
# User stats
money = 0
experience = 0
inventorySlotRows=9
bankSlotRows=50
memberStatus=0
#
# User skills
attackSkill = 1
strengthSkill = 1
defenceSkill = 1
rangedSkill = 1
magicSkill = 1
healthSkill = 1
craftingSkill = 1
miningSkill = 1
fishingSkill = 1
cookingSkill = 1
woodcuttingSkill = 1
agilitySkill = 1
herbloreSkill = 1
farmingSkill = 1
huntingSkill = 1
summoningSkill = 1
#
def loadingBar():
while(loading < 11):
print("[" + equal + "]")
loading += 1
sleep(0.1)
class fishChoose:
def init():
fishChoose.arrays()
if(fishingSkill < 5):
return
elif(fishingSkill < 10):
fishChoose.level5()
elif(fishingSkill < 15):
fishChoose.level10()
elif(fishingSkill < 16):
fishChoose.level15()
elif(fishingSkill < 20):
fishChoose.level16()
elif(fishingSkill < 23):
fishChoose.level20()
elif(fishingSkill < 25):
fishChoose.level23()
else:
fishChoose.level25()
def arrays():
global fishPoss
global fishingSkill5
global fishingSkill10
global fishingSkill15
global fishingSkill16
global fishingSkill20
global fishingSkill23
global fishingSkill25
fishPoss=["Shrimp", "Crayfish", "Minnow"]
fishingSkill5=["Karabwanji", "Sardine"]
fishingSkill10=["Herring"]
fishingSkill15=["Anchovies"]
fishingSkill16=["Mackeral"]
fishingSkill20=["Trout"]
fishingSkill23=["Cod"]
fishingSkill25=["Pike"]
def level5():
global fishPoss
fishPoss.extend(fishingSkill5)
def level10():
global fishPoss
fishChoose.level5()
fishPoss.extend(fishingSkill10)
def level15():
global fishPoss
fishChoose.level10()
fishPoss.extend(fishingSkill15)
def level16():
global fishPoss
fishChoose.level15()
fishPoss.extend(fishingSkill16)
def level20():
global fishPoss
fishChoose.level16()
fishPoss.extend(fishingSkill20)
def level23():
global fishPoss
fishChoose.level20()
fishPoss.extend(fishingSkill23)
def level25():
global fishPoss
fishChoose.level23()
fishPoss.extend(fishingSkill25)
fishChoose.init()
print(fishPoss)
| {
"content_hash": "fe71b634548669dd67f458a520ea1729",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 68,
"avg_line_length": 20.122093023255815,
"alnum_prop": 0.5908696908407974,
"repo_name": "S0Ndustries/TextScapeRPG",
"id": "453833badd78f20b4dacaa64a84f5b393f3f8e4b",
"size": "3461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unaddedCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136571"
}
],
"symlink_target": ""
} |
""" 'editor' hooks for common editors that work well with ipython
They should honor the line number argument, at least.
Contributions are *very* welcome.
"""
from __future__ import print_function
import os
import pipes
import subprocess
from IPython import get_ipython
from IPython.core.error import TryNext
from IPython.utils import py3compat
def install_editor(template, wait=False):
"""Installs the editor that is called by IPython for the %edit magic.
This overrides the default editor, which is generally set by your EDITOR
environment variable or is notepad (windows) or vi (linux). By supplying a
template string `run_template`, you can control how the editor is invoked
by IPython -- (e.g. the format in which it accepts command line options)
Parameters
----------
template : basestring
run_template acts as a template for how your editor is invoked by
the shell. It should contain '{filename}', which will be replaced on
invokation with the file name, and '{line}', $line by line number
(or 0) to invoke the file with.
wait : bool
If `wait` is true, wait until the user presses enter before returning,
to facilitate non-blocking editors that exit immediately after
the call.
"""
# not all editors support $line, so we'll leave out this check
# for substitution in ['$file', '$line']:
# if not substitution in run_template:
# raise ValueError(('run_template should contain %s'
# ' for string substitution. You supplied "%s"' % (substitution,
# run_template)))
def call_editor(self, filename, line=0):
if line is None:
line = 0
cmd = template.format(filename=pipes.quote(filename), line=line)
print(">", cmd)
proc = subprocess.Popen(cmd, shell=True)
if wait and proc.wait() != 0:
raise TryNext()
if wait:
py3compat.input("Press Enter when done editing:")
get_ipython().set_hook('editor', call_editor)
get_ipython().editor = template
# in these, exe is always the path/name of the executable. Useful
# if you don't have the editor directory in your path
def komodo(exe=u'komodo'):
""" Activestate Komodo [Edit] """
install_editor(exe + u' -l {line} {filename}', wait=True)
def scite(exe=u"scite"):
""" SciTE or Sc1 """
install_editor(exe + u' {filename} -goto:{line}')
def notepadplusplus(exe=u'notepad++'):
""" Notepad++ http://notepad-plus.sourceforge.net """
install_editor(exe + u' -n{line} {filename}')
def jed(exe=u'jed'):
""" JED, the lightweight emacsish editor """
install_editor(exe + u' +{line} {filename}')
def idle(exe=u'idle'):
""" Idle, the editor bundled with python
Parameters
----------
exe : str, None
If none, should be pretty smart about finding the executable.
"""
if exe is None:
import idlelib
p = os.path.dirname(idlelib.__filename__)
# i'm not sure if this actually works. Is this idle.py script
# guarenteed to be executable?
exe = os.path.join(p, 'idle.py')
install_editor(exe + u' {filename}')
def mate(exe=u'mate'):
""" TextMate, the missing editor"""
# wait=True is not required since we're using the -w flag to mate
install_editor(exe + u' -w -l {line} {filename}')
# ##########################################
# these are untested, report any problems
# ##########################################
def emacs(exe=u'emacs'):
install_editor(exe + u' +{line} {filename}')
def gnuclient(exe=u'gnuclient'):
install_editor(exe + u' -nw +{line} {filename}')
def crimson_editor(exe=u'cedt.exe'):
install_editor(exe + u' /L:{line} {filename}')
def kate(exe=u'kate'):
install_editor(exe + u' -u -l {line} {filename}')
| {
"content_hash": "168e6ab5d6aac2bb518762dd77fafdd8",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 31.120967741935484,
"alnum_prop": 0.6278828712101581,
"repo_name": "Lightmatter/django-inlineformfield",
"id": "2237074e84328015f241adc33df96d2c033db8d0",
"size": "3859",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/lib/editorhooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43622"
},
{
"name": "Groff",
"bytes": "3667"
},
{
"name": "HTML",
"bytes": "108126"
},
{
"name": "JavaScript",
"bytes": "853457"
},
{
"name": "Python",
"bytes": "10506732"
},
{
"name": "Shell",
"bytes": "3801"
},
{
"name": "Smarty",
"bytes": "21023"
}
],
"symlink_target": ""
} |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from os.path import join as path_join
from time import strftime
def save_plot(v,k,goal_val,sim_type,generation,score,output_path,plot_title=None,yrange=None,log_plot=False,constraints=None,name=''):
#For every measurement in results
plt.figure()
freq = v[0]
gain = v[1]
if log_plot:#Logarithmic plot
plt.semilogx(freq,gain,'g',basex=10)
plt.semilogx(freq,goal_val,'b',basex=10)
#if self.plot_weight:
# plt.semilogx(freq,weight_val,'r--',basex=10)
if constraints!=None:
plt.plot(*zip(*constraints), marker='.', color='r', ls='')
else:
plt.plot(freq,gain,'g')
plt.plot(freq,goal_val,'b')
#if self.plot_weight:
# plt.plot(freq,weight_val,'r--')
if constraints!=None:
plt.plot(*zip(*constraints), marker='.', color='r', ls='')
# update axis ranges
ax = []
ax[0:4] = plt.axis()
# check if we were given a frequency range for the plot
if yrange!=None:
plt.axis([min(freq),max(freq),yrange[0],yrange[1]])
else:
plt.axis([min(freq),max(freq),min(-0.5,-0.5+min(goal_val)),max(1.5,0.5+max(goal_val))])
if sim_type=='dc':
plt.xlabel("Input (V)")
if sim_type=='ac':
plt.xlabel("Input (Hz)")
if sim_type=='tran':
plt.xlabel("Time (s)")
if plot_title!=None:
plt.title(plot_title)
else:
plt.title(k)
plt.annotate('Generation '+str(generation),xy=(0.05,0.95),xycoords='figure fraction')
if score!=None:
plt.annotate('Score '+'{0:.2f}'.format(score),xy=(0.75,0.95),xycoords='figure fraction')
plt.grid(True)
# turn on the minor gridlines to give that awesome log-scaled look
plt.grid(True,which='minor')
if len(k)>=3 and k[1:3] == 'db':
plt.ylabel("Output (dB)")
elif k[0]=='v':
plt.ylabel("Output (V)")
elif k[0]=='i':
plt.ylabel("Output (A)")
plt.savefig(path_join(output_path,strftime("%Y-%m-%d %H:%M:%S")+'-'+k+'-'+str(name)+'.png'))
data = input()
save_plot(*data)
| {
"content_hash": "a09a5a87515725a19973ec5924801d48",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 134,
"avg_line_length": 33.34375,
"alnum_prop": 0.5857544517338332,
"repo_name": "mekayama/evolutionary-circuits",
"id": "50761cd3fc4a129079826273fd0ea21af19ff7de",
"size": "2134",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "evolutionary/plotting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79532"
}
],
"symlink_target": ""
} |
import numpy
# based on the vicar2png module by Jessica McKellar (jesstess at mit.edu)
# substantial modifications have been made to the code. However for
# thoroughness, I am including her Copyright under the MIT License below:
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Jessica McKellar
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
'''
class VICARMetadata(object):
"""
Contains VICAR metadata accessible as uppercase class attributes,
e.g.:
vicar.RECSIZE
vicar.FORMAT
"""
def __init__(self, metadata):
"""
metadata: A dictionary of VICAR label/value pairs.
"""
for key, value in metadata.iteritems():
if value.isdigit():
value = int(value)
setattr(self, key.upper(), value)
def addMetadataToDict(metadata, metadata_dict):
gettingTag = True
has_lparen = False
has_lquote = False
tag_buf = ''
for char in metadata:
if gettingTag:
if char == '=':
tag = tag_buf
tag_buf = ''
gettingTag = False
has_lparen = False
has_lquote = False
elif char != ' ':
tag_buf += char
else: # getting value
if char == "'":
has_lquote = not has_lquote
if has_lparen:
tag_buf += char
elif char == "(" and not has_lquote:
has_lparen = True
tag_buf += char
elif char == ")" and not has_lquote:
has_lparen = False
tag_buf += char
elif char == " " and tag_buf and not (has_lquote or has_lparen):
# We have a full value, save it.
value = tag_buf
metadata_dict[tag] = value
gettingTag = True
has_lparen = False
has_lquote = False
tag_buf = ""
elif char == " " and not has_lquote:
continue
else:
tag_buf += char
return metadata_dict
def process_metadata(metadata_fd):
# A VICAR file must start with 'LBLSIZE=<integer label size>'.
lblsize_field = metadata_fd.read(len("LBLSIZE="))
if lblsize_field.upper() != "LBLSIZE=":
raise ValueError("Malformed VICAR file: doesn't start with LBLSIZE.")
lblsize = ""
while True:
char = metadata_fd.read(1)
if char == " ":
break
else:
lblsize += char
try:
lblsize = int(lblsize)
except ValueError:
raise ValueError("Malformed VICAR file: contains non-integer LBLSIZE.")
# Read in the rest of the VICAR metadata.
metadata_fd.seek(0)
metadata = metadata_fd.read(lblsize)
metadata_dict = {}
metadata_dict = addMetadataToDict(metadata, metadata_dict)
vicar = VICARMetadata(metadata_dict)
if(hasattr(vicar, 'EOL')):
if vicar.EOL == 1:
if vicar.FORMAT == 'BYTE':
byteCount = 1
elif vicar.FORMAT == 'HALF':
byteCount = 2
elif vicar.FORMAT == 'FULL':
byteCount = 4
elif vicar.FORMAT == 'REAL':
byteCount = 4
elif vicar.FORMAT == 'DOUB':
byteCount = 8
else:
raise ValueError('Unrecognized Vicar FORMAT: %s in file: %s'%(vicar.FORMAT,metadata_fd.name))
# Read in the VICAR metadata from the end of the file
metadata_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE
+ byteCount*vicar.N1*vicar.N2*vicar.N3)
# A VICAR file must start with 'LBLSIZE=<integer label size>'.
lblsize_field = metadata_fd.read(len("LBLSIZE="))
if lblsize_field.upper() != "LBLSIZE=":
raise ValueError("Malformed VICAR file: EOL doesn't start with LBLSIZE.")
lblsize = ""
while True:
char = metadata_fd.read(1)
if char == " ":
break
else:
lblsize += char
try:
lblsize = int(lblsize)
except ValueError:
raise ValueError("Malformed VICAR file: contains non-integer LBLSIZE.")
metadata_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE
+ byteCount*vicar.N1*vicar.N2*vicar.N3)
metadata = metadata_fd.read(lblsize)
metadata_dict = addMetadataToDict(metadata, metadata_dict)
metadata_fd.close()
return VICARMetadata(metadata_dict)
def extract_image(vicar, image_fd):
image_fd.seek(vicar.LBLSIZE + vicar.NLB * vicar.RECSIZE)
if vicar.FORMAT == 'BYTE':
outType = numpy.int8
elif vicar.FORMAT == 'HALF':
outType = numpy.int16
elif vicar.FORMAT == 'FULL':
outType = numpy.int32
elif vicar.FORMAT == 'REAL':
outType = numpy.float32
elif vicar.FORMAT == 'DOUB':
outType = numpy.float64
else:
raise ValueError('Unrecognized Vicar FORMAT: %s in file: %s'%(vicar.FORMAT,image_fd.name))
if vicar.ORG != 'BSQ':
raise ValueError('Vicar ORG: %i is not supported.'%vicar.ORG)
if vicar.NB > 1:
print 'Reading only the first image of %i images in the file'%vicar.NB
nx = vicar.NS
ny = vicar.NL
image = numpy.fromfile(image_fd,dtype=outType,count=nx*ny).reshape(ny,nx)
return image
def readVicar(infile):
metadata_fd = open(infile, "r")
vicar_metadata = process_metadata(metadata_fd)
image_fd = open(infile, "rb")
image = extract_image(vicar_metadata, image_fd)
return (image,vicar_metadata)
| {
"content_hash": "8edaf5cbbeee825c67d2fd29d3e89937",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 104,
"avg_line_length": 30.602040816326532,
"alnum_prop": 0.5970323441147048,
"repo_name": "gabyx/acciv",
"id": "0c41bbb65ccf764a1f630cb4ac62c7590ba90d53",
"size": "6017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/convertImages/Vicar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1358"
},
{
"name": "C++",
"bytes": "286340"
},
{
"name": "CMake",
"bytes": "5926"
},
{
"name": "Makefile",
"bytes": "649"
},
{
"name": "Matlab",
"bytes": "17000"
},
{
"name": "Python",
"bytes": "60835"
},
{
"name": "Shell",
"bytes": "7525"
}
],
"symlink_target": ""
} |
import pytest
from thefuck.rules.grep_arguments_order import get_new_command, match
from tests.utils import Command
stderr = 'grep: {}: No such file or directory'.format
@pytest.fixture(autouse=True)
def os_path(monkeypatch):
monkeypatch.setattr('os.path.isfile', lambda x: not x.startswith('-'))
@pytest.mark.parametrize('script, file', [
('grep test.py test', 'test'),
('grep -lir . test', 'test'),
('egrep test.py test', 'test'),
('egrep -lir . test', 'test')])
def test_match(script, file):
assert match(Command(script, stderr=stderr(file)))
@pytest.mark.parametrize('script, stderr', [
('cat test.py', stderr('test')),
('grep test test.py', ''),
('grep -lir test .', ''),
('egrep test test.py', ''),
('egrep -lir test .', '')])
def test_not_match(script, stderr):
assert not match(Command(script, stderr=stderr))
@pytest.mark.parametrize('script, stderr, result', [
('grep test.py test', stderr('test'), 'grep test test.py'),
('grep -lir . test', stderr('test'), 'grep -lir test .'),
('grep . test -lir', stderr('test'), 'grep test -lir .'),
('egrep test.py test', stderr('test'), 'egrep test test.py'),
('egrep -lir . test', stderr('test'), 'egrep -lir test .'),
('egrep . test -lir', stderr('test'), 'egrep test -lir .')])
def test_get_new_command(script, stderr, result):
assert get_new_command(Command(script, stderr=stderr)) == result
| {
"content_hash": "6e66b593da32e599e846e1084513e935",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 35.75,
"alnum_prop": 0.6272727272727273,
"repo_name": "mcarton/thefuck",
"id": "b2896493017a17f011a7db935f63554122974cbc",
"size": "1430",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/rules/test_grep_arguments_order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266817"
},
{
"name": "Shell",
"bytes": "1525"
}
],
"symlink_target": ""
} |
"""Public API functions and helpers for declarative."""
from ...schema import Table, MetaData
from ...orm import synonym as _orm_synonym, \
comparable_property,\
interfaces, properties, attributes
from ...orm.util import polymorphic_union
from ...orm.base import _mapper_or_none
from ...util import OrderedDict, hybridmethod, hybridproperty
from ... import util
from ... import exc
import weakref
from .base import _as_declarative, \
_declarative_constructor,\
_DeferredMapperConfig, _add_attribute
from .clsregistry import _class_resolver
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been "
"instrumented declaratively" % cls)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
"""
for class_ in cls.__mro__[1:]:
if getattr(class_, '__table__', None) is not None:
return True
return False
#
# 所有的Base都是由 DeclarativeMeta 生成
# 所有的Base的派生类,即Model都会由 DeclarativeMeta 创建
#
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' not in cls.__dict__:
_as_declarative(cls, classname, cls.__dict__)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
_add_attribute(cls, key, value)
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being
decorated is the 'descriptor', otherwise passes its arguments through to
synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative setting
and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(interfaces._MappedAttribute, property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
.. versionchanged:: 0.8 :class:`.declared_attr` can be used with
non-ORM or extension attributes, such as user-defined attributes
or :func:`.association_proxy` objects, which will be assigned
to the class at class construction time.
"""
def __init__(self, fget, cascading=False):
super(declared_attr, self).__init__(fget)
self.__doc__ = fget.__doc__
self._cascading = cascading
def __get__(desc, self, cls):
reg = cls.__dict__.get('_sa_declared_attr_reg', None)
if reg is None:
manager = attributes.manager_of_class(cls)
if manager is None:
util.warn(
"Unmanaged access of declarative attribute %s from "
"non-mapped class %s" %
(desc.fget.__name__, cls.__name__))
return desc.fget(cls)
if reg is None:
return desc.fget(cls)
elif desc in reg:
return reg[desc]
else:
reg[desc] = obj = desc.fget(cls)
return obj
@hybridmethod
def _stateful(cls, **kw):
return _stateful_declared_attr(**kw)
@hybridproperty
def cascading(cls):
"""Mark a :class:`.declared_attr` as cascading.
This is a special-use modifier which indicates that a column
or MapperProperty-based declared attribute should be configured
distinctly per mapped subclass, within a mapped-inheritance scenario.
Below, both MyClass as well as MySubClass will have a distinct
``id`` Column object established::
class HasSomeAttribute(object):
@declared_attr.cascading
def some_id(cls):
if has_inherited_table(cls):
return Column(
ForeignKey('myclass.id'), primary_key=True)
else:
return Column(Integer, primary_key=True)
return Column('id', Integer, primary_key=True)
class MyClass(HasSomeAttribute, Base):
""
# ...
class MySubClass(MyClass):
""
# ...
The behavior of the above configuration is that ``MySubClass``
will refer to both its own ``id`` column as well as that of
``MyClass`` underneath the attribute named ``some_id``.
.. seealso::
:ref:`declarative_inheritance`
:ref:`mixin_inheritance_columns`
"""
return cls._stateful(cascading=True)
class _stateful_declared_attr(declared_attr):
def __init__(self, **kw):
self.kw = kw
def _stateful(self, **kw):
new_kw = self.kw.copy()
new_kw.update(kw)
return _stateful_declared_attr(**new_kw)
def __call__(self, fn):
return declared_attr(fn, **self.kw)
def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
name='Base', constructor=_declarative_constructor,
class_registry=None,
metaclass=DeclarativeMeta):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.schema.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.schema.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. seealso::
:func:`.as_declarative`
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
if class_registry is None:
class_registry = weakref.WeakValueDictionary()
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(_decl_class_registry=class_registry,
metadata=lcl_metadata)
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
def as_declarative(**kw):
"""
Class decorator for :func:`.declarative_base`.
Provides a syntactical shortcut to the ``cls`` argument
sent to :func:`.declarative_base`, allowing the base class
to be converted in-place to a "declarative" base::
from sqlalchemy.ext.declarative import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
All keyword arguments passed to :func:`.as_declarative` are passed
along to :func:`.declarative_base`.
.. versionadded:: 0.8.3
.. seealso::
:func:`.declarative_base`
"""
def decorate(cls):
kw['cls'] = cls
kw['name'] = cls.__name__
return declarative_base(**kw)
return decorate
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
"""
@classmethod
def _create_polymorphic_union(cls, mappers):
return polymorphic_union(OrderedDict(
(mp.polymorphic_identity, mp.local_table)
for mp in mappers
), 'type', 'pjoin')
@classmethod
def __declare_first__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers)
m._set_with_polymorphic(("*", pjoin))
m._set_polymorphic_on(pjoin.c.type)
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.AbstractConcreteBase` does produce a mapped class
for the base class, however it is not persisted to any table; it
is instead mapped directly to the "polymorphic" selectable directly
and is only used for selecting. Compare to :class:`.ConcreteBase`,
which does create a persisted table for the base class.
Example::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
The abstract base class is handled by declarative in a special way;
at class configuration time, it behaves like a declarative mixin
or an ``__abstract__`` base class. Once classes are configured
and mappings are produced, it then gets mapped itself, but
after all of its decscendants. This is a very unique system of mapping
not found in any other SQLAlchemy system.
Using this approach, we can specify columns and properties
that will take place on mapped subclasses, in the way that
we normally do as in :ref:`declarative_mixins`::
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
class Employee(AbstractConcreteBase, Base):
employee_id = Column(Integer, primary_key=True)
@declared_attr
def company_id(cls):
return Column(ForeignKey('company.id'))
@declared_attr
def company(cls):
return relationship("Company")
class Manager(Employee):
__tablename__ = 'manager'
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
When we make use of our mappings however, both ``Manager`` and
``Employee`` will have an independently usable ``.company`` attribute::
session.query(Employee).filter(Employee.company.has(id=5))
.. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase`
have been reworked to support relationships established directly
on the abstract base, without any special configurational steps.
"""
__no_table__ = True
@classmethod
def __declare_first__(cls):
cls._sa_decl_prepare_nocascade()
@classmethod
def _sa_decl_prepare_nocascade(cls):
if getattr(cls, '__mapper__', None):
return
to_map = _DeferredMapperConfig.config_for_cls(cls)
# can't rely on 'self_and_descendants' here
# since technically an immediate subclass
# might not be mapped, but a subclass
# may be.
mappers = []
stack = list(cls.__subclasses__())
while stack:
klass = stack.pop()
stack.extend(klass.__subclasses__())
mn = _mapper_or_none(klass)
if mn is not None:
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
to_map.local_table = pjoin
m_args = to_map.mapper_args_fn or dict
def mapper_args():
args = m_args()
args['polymorphic_on'] = pjoin.c.type
return args
to_map.mapper_args_fn = mapper_args
m = to_map.map()
for scls in cls.__subclasses__():
sm = _mapper_or_none(scls)
if sm and sm.concrete and cls in scls.__bases__:
sm._set_concrete_base(m)
class DeferredReflection(object):
"""A helper class for construction of mappings based on
a deferred reflection step.
Normally, declarative can be used with reflection by
setting a :class:`.Table` object using autoload=True
as the ``__table__`` attribute on a declarative class.
The caveat is that the :class:`.Table` must be fully
reflected, or at the very least have a primary key column,
at the point at which a normal declarative mapping is
constructed, meaning the :class:`.Engine` must be available
at class declaration time.
The :class:`.DeferredReflection` mixin moves the construction
of mappers to be at a later point, after a specific
method is called which first reflects all :class:`.Table`
objects created so far. Classes can define it as such::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import DeferredReflection
Base = declarative_base()
class MyClass(DeferredReflection, Base):
__tablename__ = 'mytable'
Above, ``MyClass`` is not yet mapped. After a series of
classes have been defined in the above fashion, all tables
can be reflected and mappings created using
:meth:`.prepare`::
engine = create_engine("someengine://...")
DeferredReflection.prepare(engine)
The :class:`.DeferredReflection` mixin can be applied to individual
classes, used as the base for the declarative base itself,
or used in a custom abstract class. Using an abstract base
allows that only a subset of classes to be prepared for a
particular prepare step, which is necessary for applications
that use more than one engine. For example, if an application
has two engines, you might use two bases, and prepare each
separately, e.g.::
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
class MyClass(ReflectedOne):
__tablename__ = 'mytable'
class MyOtherClass(ReflectedOne):
__tablename__ = 'myothertable'
class YetAnotherClass(ReflectedTwo):
__tablename__ = 'yetanothertable'
# ... etc.
Above, the class hierarchies for ``ReflectedOne`` and
``ReflectedTwo`` can be configured separately::
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
.. versionadded:: 0.8
"""
@classmethod
def prepare(cls, engine):
"""Reflect all :class:`.Table` objects for all current
:class:`.DeferredReflection` subclasses"""
to_map = _DeferredMapperConfig.classes_for_base(cls)
for thingy in to_map:
cls._sa_decl_prepare(thingy.local_table, engine)
thingy.map()
mapper = thingy.cls.__mapper__
metadata = mapper.class_.metadata
for rel in mapper._props.values():
if isinstance(rel, properties.RelationshipProperty) and \
rel.secondary is not None:
if isinstance(rel.secondary, Table):
cls._reflect_table(rel.secondary, engine)
elif isinstance(rel.secondary, _class_resolver):
rel.secondary._resolvers += (
cls._sa_deferred_table_resolver(engine, metadata),
)
@classmethod
def _sa_deferred_table_resolver(cls, engine, metadata):
def _resolve(key):
t1 = Table(key, metadata)
cls._reflect_table(t1, engine)
return t1
return _resolve
@classmethod
def _sa_decl_prepare(cls, local_table, engine):
# autoload Table, which is already
# present in the metadata. This
# will fill in db-loaded columns
# into the existing Table object.
if local_table is not None:
cls._reflect_table(local_table, engine)
@classmethod
def _reflect_table(cls, table, engine):
Table(table.name,
table.metadata,
extend_existing=True,
autoload_replace=False,
autoload=True,
autoload_with=engine,
schema=table.schema)
| {
"content_hash": "72586e69269beb4da1d25c10da8c92ef",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 79,
"avg_line_length": 33.88619119878604,
"alnum_prop": 0.6194527786485156,
"repo_name": "wfxiang08/sqlalchemy",
"id": "0c0ab45c83437257b5ffcb78b4fdfe2b3758163c",
"size": "22643",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/wftest",
"path": "lib/sqlalchemy/ext/declarative/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8048336"
}
],
"symlink_target": ""
} |
"""
functions for implementing parts of the HSMs machinery in software
"""
# Copyright (c) 2012 Yubico AB
# See the file COPYING for licence statement.
import struct
import json
import os
__all__ = [
# constants
# functions
'aesCCM',
'crc16',
# classes
'SoftYHSM'
]
import pyhsm
import pyhsm.exception
from Crypto.Cipher import AES
def _xor_block(a, b):
""" XOR two blocks of equal length. """
return ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
class _ctr_counter():
"""
An object implementation of the struct aesCtr.
"""
def __init__(self, key_handle, nonce, flags = None, value = 0):
self.flags = pyhsm.defines.YSM_CCM_CTR_SIZE - 1 if flags is None else flags
self.key_handle = key_handle
self.nonce = nonce
self.value = value
def next(self):
"""
Return next counter value, encoded into YSM_BLOCK_SIZE.
"""
self.value += 1
return self.pack()
def pack(self):
fmt = b'< B I %is BBB 2s' % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)
val = struct.pack('> H', self.value)
return struct.pack(fmt,
self.flags,
self.key_handle,
self.nonce,
0, 0, 0, # rfu
val
)
class _cbc_mac():
def __init__(self, key, key_handle, nonce, data_len):
"""
Initialize CBC-MAC like the YubiHSM does.
"""
flags = (((pyhsm.defines.YSM_AEAD_MAC_SIZE - 2) / 2) << 3) | (pyhsm.defines.YSM_CCM_CTR_SIZE - 1)
t = _ctr_counter(key_handle, nonce, flags = flags, value = data_len)
t_mac = t.pack()
self.mac_aes = AES.new(key, AES.MODE_ECB)
self.mac = self.mac_aes.encrypt(t_mac)
def update(self, block):
block = block.ljust(pyhsm.defines.YSM_BLOCK_SIZE, chr(0x0))
t1 = _xor_block(self.mac, block)
t2 = self.mac_aes.encrypt(t1)
self.mac = t2
def finalize(self, block):
"""
The final step of CBC-MAC encrypts before xor.
"""
t1 = self.mac_aes.encrypt(block)
t2 = _xor_block(self.mac, t1)
self.mac = t2
def get(self):
return self.mac[: pyhsm.defines.YSM_AEAD_MAC_SIZE]
def _split_data(data, pos):
a = data[:pos]
b = data[pos:]
return (a, b,)
def aesCCM(key, key_handle, nonce, data, decrypt=False):
"""
Function implementing YubiHSM AEAD encrypt/decrypt in software.
"""
if decrypt:
(data, saved_mac) = _split_data(data, len(data) - pyhsm.defines.YSM_AEAD_MAC_SIZE)
nonce = pyhsm.util.input_validate_nonce(nonce, pad = True)
mac = _cbc_mac(key, key_handle, nonce, len(data))
counter = _ctr_counter(key_handle, nonce, value = 0)
ctr_aes = AES.new(key, AES.MODE_CTR, counter = counter.next)
out = []
while data:
(thisblock, data) = _split_data(data, pyhsm.defines.YSM_BLOCK_SIZE)
# encrypt/decrypt and CBC MAC
if decrypt:
aes_out = ctr_aes.decrypt(thisblock)
mac.update(aes_out)
else:
mac.update(thisblock)
aes_out = ctr_aes.encrypt(thisblock)
out.append(aes_out)
# Finalize MAC
counter.value = 0
mac.finalize(counter.pack())
if decrypt:
if mac.get() != saved_mac:
raise pyhsm.exception.YHSM_Error('AEAD integrity check failed')
else:
out.append(mac.get())
return ''.join(out)
def crc16(data):
"""
Calculate an ISO13239 CRC checksum of the input buffer.
"""
m_crc = 0xffff
for this in data:
m_crc ^= ord(this)
for _ in range(8):
j = m_crc & 1
m_crc >>= 1
if j:
m_crc ^= 0x8408
return m_crc
class SoftYHSM(object):
def __init__(self, keys, debug=False):
self._buffer = ''
self.debug = debug
if not keys:
raise ValueError('Data contains no key handles!')
for k, v in keys.items():
if len(v) not in AES.key_size:
raise ValueError('Keyhandle of unsupported length: %d (was %d bytes)' % (k, len(v)))
self.keys = keys
@classmethod
def from_file(cls, filename, debug=False):
with open(filename, 'r') as f:
return cls.from_json(f.read(), debug)
@classmethod
def from_json(cls, data, debug=False):
data = json.loads(data)
if not isinstance(data, dict):
raise ValueError('Data does not contain object as root element.')
keys = {}
for kh, aes_key_hex in data.items():
keys[int(kh)] = aes_key_hex.decode('hex')
return cls(keys, debug)
def _get_key(self, kh, cmd):
try:
return self.keys[kh]
except KeyError:
raise pyhsm.exception.YHSM_CommandFailed(
pyhsm.defines.cmd2str(cmd),
pyhsm.defines.YSM_KEY_HANDLE_INVALID)
def validate_aead_otp(self, public_id, otp, key_handle, aead):
aes_key = self._get_key(key_handle, pyhsm.defines.YSM_AEAD_YUBIKEY_OTP_DECODE)
cmd = pyhsm.validate_cmd.YHSM_Cmd_AEAD_Validate_OTP(
None, public_id, otp, key_handle, aead)
aead_pt = aesCCM(aes_key, cmd.key_handle, cmd.public_id, aead, True)
yk_key, yk_uid = aead_pt[:16], aead_pt[16:]
ecb_aes = AES.new(yk_key, AES.MODE_ECB)
otp_plain = ecb_aes.decrypt(otp)
uid = otp_plain[:6]
use_ctr, ts_low, ts_high, session_ctr, rnd, crc = struct.unpack(
'<HHBBHH', otp_plain[6:])
if uid == yk_uid and crc16(otp_plain) == 0xf0b8:
return pyhsm.validate_cmd.YHSM_ValidationResult(
cmd.public_id, use_ctr, session_ctr, ts_high, ts_low
)
raise pyhsm.exception.YHSM_CommandFailed(
pyhsm.defines.cmd2str(cmd.command), pyhsm.defines.YSM_OTP_INVALID)
def load_secret(self, secret):
self._buffer = secret.pack()
def load_random(self, num_bytes, offset = 0):
self._buffer = self._buffer[:offset] + os.urandom(num_bytes)
def generate_aead(self, nonce, key_handle):
aes_key = self._get_key(key_handle, pyhsm.defines.YSM_BUFFER_AEAD_GENERATE)
ct = pyhsm.soft_hsm.aesCCM(aes_key, key_handle, nonce, self._buffer,
False)
return pyhsm.aead_cmd.YHSM_GeneratedAEAD(nonce, key_handle, ct)
| {
"content_hash": "d5b13345448438af40e0c12ac6f60505",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 105,
"avg_line_length": 30.406542056074766,
"alnum_prop": 0.5633932687874597,
"repo_name": "Yubico/python-pyhsm-dpkg",
"id": "4d6f1584f29633aa654a579e67b9ce244b4ded0b",
"size": "6507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyhsm/soft_hsm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "292912"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
from kitsune.sumo.models import ModelBase
class InboxMessage(ModelBase):
"""A message in a user's private message inbox."""
to = models.ForeignKey(User, related_name='inbox')
sender = models.ForeignKey(User, null=True, blank=True)
message = models.TextField()
created = models.DateTimeField(default=datetime.now, db_index=True)
read = models.BooleanField(default=False, db_index=True)
replied = models.BooleanField(default=False)
unread = property(lambda self: not self.read)
def __unicode__(self):
s = self.message[0:30]
return u'to:%s from:%s %s' % (self.to, self.sender, s)
@property
def content_parsed(self):
from kitsune.sumo.helpers import wiki_to_html
return wiki_to_html(self.message)
class Meta:
db_table = 'messages_inboxmessage'
class OutboxMessage(ModelBase):
sender = models.ForeignKey(User, related_name='outbox')
to = models.ManyToManyField(User)
message = models.TextField()
created = models.DateTimeField(default=datetime.now, db_index=True)
def __unicode__(self):
to = u', '.join([u.username for u in self.to.all()])
return u'from:%s to:%s %s' % (self.sender, to, self.message[0:30])
@property
def content_parsed(self):
from kitsune.sumo.helpers import wiki_to_html
return wiki_to_html(self.message)
class Meta:
db_table = 'messages_outboxmessage'
| {
"content_hash": "265e42615560a17b3c3d6be9df69eac0",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 31.46938775510204,
"alnum_prop": 0.6750972762645915,
"repo_name": "rlr/kitsune",
"id": "4539915d72eae8d453f56e677eaa0dfa07abcd4f",
"size": "1542",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "kitsune/messages/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "280414"
},
{
"name": "HTML",
"bytes": "623485"
},
{
"name": "JavaScript",
"bytes": "723970"
},
{
"name": "Python",
"bytes": "2713073"
},
{
"name": "Shell",
"bytes": "10281"
},
{
"name": "Smarty",
"bytes": "2062"
}
],
"symlink_target": ""
} |
import logging
import unittest
from unittest import mock
from unittest.mock import call
import pytest
from docker.constants import DEFAULT_TIMEOUT_SECONDS
from docker.errors import APIError
from airflow.exceptions import AirflowException
try:
from docker import APIClient
from docker.types import DeviceRequest, Mount
from airflow.providers.docker.hooks.docker import DockerHook
from airflow.providers.docker.operators.docker import DockerOperator
except ImportError:
pass
TEMPDIR_MOCK_RETURN_VALUE = '/mkdtemp'
class TestDockerOperator(unittest.TestCase):
def setUp(self):
self.tempdir_patcher = mock.patch('airflow.providers.docker.operators.docker.TemporaryDirectory')
self.tempdir_mock = self.tempdir_patcher.start()
self.tempdir_mock.return_value.__enter__.return_value = TEMPDIR_MOCK_RETURN_VALUE
self.client_mock = mock.Mock(spec=APIClient)
self.client_mock.create_container.return_value = {'Id': 'some_id'}
self.client_mock.images.return_value = []
self.client_mock.pull.return_value = {"status": "pull log"}
self.client_mock.wait.return_value = {"StatusCode": 0}
self.client_mock.create_host_config.return_value = mock.Mock()
self.log_messages = ['container log 😁 ', b'byte string container log']
self.client_mock.attach.return_value = self.log_messages
# If logs() is called with tail then only return the last value, otherwise return the whole log.
self.client_mock.logs.side_effect = (
lambda **kwargs: iter(self.log_messages[-kwargs['tail'] :])
if 'tail' in kwargs
else iter(self.log_messages)
)
self.client_class_patcher = mock.patch(
'airflow.providers.docker.operators.docker.APIClient',
return_value=self.client_mock,
)
self.client_class_mock = self.client_class_patcher.start()
def tearDown(self) -> None:
self.tempdir_patcher.stop()
self.client_class_patcher.stop()
def test_execute(self):
operator = DockerOperator(
api_version='1.19',
command='env',
environment={'UNIT': 'TEST'},
private_environment={'PRIVATE': 'MESSAGE'},
image='ubuntu:latest',
network_mode='bridge',
owner='unittest',
task_id='unittest',
mounts=[Mount(source='/host/path', target='/container/path', type='bind')],
entrypoint='["sh", "-c"]',
working_dir='/container/path',
shm_size=1000,
host_tmp_dir='/host/airflow',
container_name='test_container',
tty=True,
device_requests=[DeviceRequest(count=-1, capabilities=[['gpu']])],
)
operator.execute(None)
self.client_class_mock.assert_called_once_with(
base_url='unix://var/run/docker.sock', tls=None, version='1.19', timeout=DEFAULT_TIMEOUT_SECONDS
)
self.client_mock.create_container.assert_called_once_with(
command='env',
name='test_container',
environment={'AIRFLOW_TMP_DIR': '/tmp/airflow', 'UNIT': 'TEST', 'PRIVATE': 'MESSAGE'},
host_config=self.client_mock.create_host_config.return_value,
image='ubuntu:latest',
user=None,
entrypoint=['sh', '-c'],
working_dir='/container/path',
tty=True,
)
self.client_mock.create_host_config.assert_called_once_with(
mounts=[
Mount(source='/host/path', target='/container/path', type='bind'),
Mount(source='/mkdtemp', target='/tmp/airflow', type='bind'),
],
network_mode='bridge',
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=[DeviceRequest(count=-1, capabilities=[['gpu']])],
)
self.tempdir_mock.assert_called_once_with(dir='/host/airflow', prefix='airflowtmp')
self.client_mock.images.assert_called_once_with(name='ubuntu:latest')
self.client_mock.attach.assert_called_once_with(
container='some_id', stdout=True, stderr=True, stream=True
)
self.client_mock.pull.assert_called_once_with('ubuntu:latest', stream=True, decode=True)
self.client_mock.wait.assert_called_once_with('some_id')
assert (
operator.cli.pull('ubuntu:latest', stream=True, decode=True) == self.client_mock.pull.return_value
)
def test_execute_no_temp_dir(self):
operator = DockerOperator(
api_version='1.19',
command='env',
environment={'UNIT': 'TEST'},
private_environment={'PRIVATE': 'MESSAGE'},
image='ubuntu:latest',
network_mode='bridge',
owner='unittest',
task_id='unittest',
mounts=[Mount(source='/host/path', target='/container/path', type='bind')],
mount_tmp_dir=False,
entrypoint='["sh", "-c"]',
working_dir='/container/path',
shm_size=1000,
host_tmp_dir='/host/airflow',
container_name='test_container',
tty=True,
)
operator.execute(None)
self.client_class_mock.assert_called_once_with(
base_url='unix://var/run/docker.sock', tls=None, version='1.19', timeout=DEFAULT_TIMEOUT_SECONDS
)
self.client_mock.create_container.assert_called_once_with(
command='env',
name='test_container',
environment={'UNIT': 'TEST', 'PRIVATE': 'MESSAGE'},
host_config=self.client_mock.create_host_config.return_value,
image='ubuntu:latest',
user=None,
entrypoint=['sh', '-c'],
working_dir='/container/path',
tty=True,
)
self.client_mock.create_host_config.assert_called_once_with(
mounts=[
Mount(source='/host/path', target='/container/path', type='bind'),
],
network_mode='bridge',
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=None,
)
self.tempdir_mock.assert_not_called()
self.client_mock.images.assert_called_once_with(name='ubuntu:latest')
self.client_mock.attach.assert_called_once_with(
container='some_id', stdout=True, stderr=True, stream=True
)
self.client_mock.pull.assert_called_once_with('ubuntu:latest', stream=True, decode=True)
self.client_mock.wait.assert_called_once_with('some_id')
assert (
operator.cli.pull('ubuntu:latest', stream=True, decode=True) == self.client_mock.pull.return_value
)
def test_execute_fallback_temp_dir(self):
self.client_mock.create_container.side_effect = [
APIError(message="wrong path: " + TEMPDIR_MOCK_RETURN_VALUE),
{'Id': 'some_id'},
]
operator = DockerOperator(
api_version='1.19',
command='env',
environment={'UNIT': 'TEST'},
private_environment={'PRIVATE': 'MESSAGE'},
image='ubuntu:latest',
network_mode='bridge',
owner='unittest',
task_id='unittest',
mounts=[Mount(source='/host/path', target='/container/path', type='bind')],
mount_tmp_dir=True,
entrypoint='["sh", "-c"]',
working_dir='/container/path',
shm_size=1000,
host_tmp_dir='/host/airflow',
container_name='test_container',
tty=True,
)
with self.assertLogs(operator.log, level=logging.WARNING) as captured:
operator.execute(None)
assert (
"WARNING:airflow.task.operators:Using remote engine or docker-in-docker "
"and mounting temporary volume from host is not supported" in captured.output[0]
)
self.client_class_mock.assert_called_once_with(
base_url='unix://var/run/docker.sock', tls=None, version='1.19', timeout=DEFAULT_TIMEOUT_SECONDS
)
self.client_mock.create_container.assert_has_calls(
[
call(
command='env',
name='test_container',
environment={'AIRFLOW_TMP_DIR': '/tmp/airflow', 'UNIT': 'TEST', 'PRIVATE': 'MESSAGE'},
host_config=self.client_mock.create_host_config.return_value,
image='ubuntu:latest',
user=None,
entrypoint=['sh', '-c'],
working_dir='/container/path',
tty=True,
),
call(
command='env',
name='test_container',
environment={'UNIT': 'TEST', 'PRIVATE': 'MESSAGE'},
host_config=self.client_mock.create_host_config.return_value,
image='ubuntu:latest',
user=None,
entrypoint=['sh', '-c'],
working_dir='/container/path',
tty=True,
),
]
)
self.client_mock.create_host_config.assert_has_calls(
[
call(
mounts=[
Mount(source='/host/path', target='/container/path', type='bind'),
Mount(source='/mkdtemp', target='/tmp/airflow', type='bind'),
],
network_mode='bridge',
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=None,
),
call(
mounts=[
Mount(source='/host/path', target='/container/path', type='bind'),
],
network_mode='bridge',
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=None,
),
]
)
self.tempdir_mock.assert_called_once_with(dir='/host/airflow', prefix='airflowtmp')
self.client_mock.images.assert_called_once_with(name='ubuntu:latest')
self.client_mock.attach.assert_called_once_with(
container='some_id', stdout=True, stderr=True, stream=True
)
self.client_mock.pull.assert_called_once_with('ubuntu:latest', stream=True, decode=True)
self.client_mock.wait.assert_called_once_with('some_id')
assert (
operator.cli.pull('ubuntu:latest', stream=True, decode=True) == self.client_mock.pull.return_value
)
def test_private_environment_is_private(self):
operator = DockerOperator(
private_environment={'PRIVATE': 'MESSAGE'}, image='ubuntu:latest', task_id='unittest'
)
assert operator._private_environment == {
'PRIVATE': 'MESSAGE'
}, "To keep this private, it must be an underscored attribute."
@mock.patch('airflow.providers.docker.operators.docker.tls.TLSConfig')
def test_execute_tls(self, tls_class_mock):
tls_mock = mock.Mock()
tls_class_mock.return_value = tls_mock
operator = DockerOperator(
docker_url='tcp://127.0.0.1:2376',
image='ubuntu',
owner='unittest',
task_id='unittest',
tls_client_cert='cert.pem',
tls_ca_cert='ca.pem',
tls_client_key='key.pem',
)
operator.execute(None)
tls_class_mock.assert_called_once_with(
assert_hostname=None,
ca_cert='ca.pem',
client_cert=('cert.pem', 'key.pem'),
ssl_version=None,
verify=True,
)
self.client_class_mock.assert_called_once_with(
base_url='https://127.0.0.1:2376', tls=tls_mock, version=None, timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_execute_unicode_logs(self):
self.client_mock.attach.return_value = ['unicode container log 😁']
originalRaiseExceptions = logging.raiseExceptions
logging.raiseExceptions = True
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest')
with mock.patch('traceback.print_exception') as print_exception_mock:
operator.execute(None)
logging.raiseExceptions = originalRaiseExceptions
print_exception_mock.assert_not_called()
def test_execute_container_fails(self):
failed_msg = {'StatusCode': 1}
log_line = ['unicode container log 😁 ', b'byte string container log']
expected_message = 'Docker container failed: {failed_msg} lines {expected_log_output}'
self.client_mock.attach.return_value = log_line
self.client_mock.wait.return_value = failed_msg
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest')
with pytest.raises(AirflowException) as raised_exception:
operator.execute(None)
assert str(raised_exception.value) == expected_message.format(
failed_msg=failed_msg,
expected_log_output=f'{log_line[0].strip()}\n{log_line[1].decode("utf-8")}',
)
def test_auto_remove_container_fails(self):
self.client_mock.wait.return_value = {"StatusCode": 1}
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest', auto_remove=True)
operator.container = {'Id': 'some_id'}
with pytest.raises(AirflowException):
operator.execute(None)
self.client_mock.remove_container.assert_called_once_with('some_id')
@staticmethod
def test_on_kill():
client_mock = mock.Mock(spec=APIClient)
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest')
operator.cli = client_mock
operator.container = {'Id': 'some_id'}
operator.on_kill()
client_mock.stop.assert_called_once_with('some_id')
def test_execute_no_docker_conn_id_no_hook(self):
# Create the DockerOperator
operator = DockerOperator(image='publicregistry/someimage', owner='unittest', task_id='unittest')
# Mock out the DockerHook
hook_mock = mock.Mock(name='DockerHook mock', spec=DockerHook)
hook_mock.get_conn.return_value = self.client_mock
operator.get_hook = mock.Mock(
name='DockerOperator.get_hook mock', spec=DockerOperator.get_hook, return_value=hook_mock
)
operator.execute(None)
assert operator.get_hook.call_count == 0, 'Hook called though no docker_conn_id configured'
@mock.patch('airflow.providers.docker.operators.docker.DockerHook')
def test_execute_with_docker_conn_id_use_hook(self, hook_class_mock):
# Create the DockerOperator
operator = DockerOperator(
image='publicregistry/someimage',
owner='unittest',
task_id='unittest',
docker_conn_id='some_conn_id',
)
# Mock out the DockerHook
hook_mock = mock.Mock(name='DockerHook mock', spec=DockerHook)
hook_mock.get_conn.return_value = self.client_mock
hook_class_mock.return_value = hook_mock
operator.execute(None)
assert self.client_class_mock.call_count == 0, 'Client was called on the operator instead of the hook'
assert hook_class_mock.call_count == 1, 'Hook was not called although docker_conn_id configured'
assert self.client_mock.pull.call_count == 1, 'Image was not pulled using operator client'
def test_execute_xcom_behavior(self):
self.client_mock.pull.return_value = [b'{"status":"pull log"}']
kwargs = {
'api_version': '1.19',
'command': 'env',
'environment': {'UNIT': 'TEST'},
'private_environment': {'PRIVATE': 'MESSAGE'},
'image': 'ubuntu:latest',
'network_mode': 'bridge',
'owner': 'unittest',
'task_id': 'unittest',
'mounts': [Mount(source='/host/path', target='/container/path', type='bind')],
'working_dir': '/container/path',
'shm_size': 1000,
'host_tmp_dir': '/host/airflow',
'container_name': 'test_container',
'tty': True,
}
xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=False)
xcom_all_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=True)
no_xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=False)
xcom_push_result = xcom_push_operator.execute(None)
xcom_all_result = xcom_all_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
assert xcom_push_result == 'byte string container log'
assert xcom_all_result == ['container log 😁', 'byte string container log']
assert no_xcom_push_result is None
def test_execute_xcom_behavior_bytes(self):
self.log_messages = [b'container log 1 ', b'container log 2']
self.client_mock.pull.return_value = [b'{"status":"pull log"}']
self.client_mock.attach.return_value = iter([b'container log 1 ', b'container log 2'])
# Make sure the logs side effect is updated after the change
self.client_mock.attach.side_effect = (
lambda **kwargs: iter(self.log_messages[-kwargs['tail'] :])
if 'tail' in kwargs
else iter(self.log_messages)
)
kwargs = {
'api_version': '1.19',
'command': 'env',
'environment': {'UNIT': 'TEST'},
'private_environment': {'PRIVATE': 'MESSAGE'},
'image': 'ubuntu:latest',
'network_mode': 'bridge',
'owner': 'unittest',
'task_id': 'unittest',
'mounts': [Mount(source='/host/path', target='/container/path', type='bind')],
'working_dir': '/container/path',
'shm_size': 1000,
'host_tmp_dir': '/host/airflow',
'container_name': 'test_container',
'tty': True,
}
xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=False)
xcom_all_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=True)
no_xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=False)
xcom_push_result = xcom_push_operator.execute(None)
xcom_all_result = xcom_all_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
# Those values here are different than log above as they are from setup
assert xcom_push_result == 'container log 2'
assert xcom_all_result == ['container log 1', 'container log 2']
assert no_xcom_push_result is None
def test_execute_xcom_behavior_no_result(self):
self.log_messages = []
self.client_mock.pull.return_value = [b'{"status":"pull log"}']
self.client_mock.attach.return_value = iter([])
kwargs = {
'api_version': '1.19',
'command': 'env',
'environment': {'UNIT': 'TEST'},
'private_environment': {'PRIVATE': 'MESSAGE'},
'image': 'ubuntu:latest',
'network_mode': 'bridge',
'owner': 'unittest',
'task_id': 'unittest',
'mounts': [Mount(source='/host/path', target='/container/path', type='bind')],
'working_dir': '/container/path',
'shm_size': 1000,
'host_tmp_dir': '/host/airflow',
'container_name': 'test_container',
'tty': True,
}
xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=False)
xcom_all_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=True)
no_xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=False)
xcom_push_result = xcom_push_operator.execute(None)
xcom_all_result = xcom_all_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
assert xcom_push_result is None
assert xcom_all_result is None
assert no_xcom_push_result is None
def test_extra_hosts(self):
hosts_obj = mock.Mock()
operator = DockerOperator(task_id='test', image='test', extra_hosts=hosts_obj)
operator.execute(None)
self.client_mock.create_container.assert_called_once()
assert 'host_config' in self.client_mock.create_container.call_args[1]
assert 'extra_hosts' in self.client_mock.create_host_config.call_args[1]
assert hosts_obj is self.client_mock.create_host_config.call_args[1]['extra_hosts']
def test_privileged(self):
privileged = mock.Mock()
operator = DockerOperator(task_id='test', image='test', privileged=privileged)
operator.execute(None)
self.client_mock.create_container.assert_called_once()
assert 'host_config' in self.client_mock.create_container.call_args[1]
assert 'privileged' in self.client_mock.create_host_config.call_args[1]
assert privileged is self.client_mock.create_host_config.call_args[1]['privileged']
| {
"content_hash": "fcd0a2b035725f37784bdeecf87cf0ed",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 110,
"avg_line_length": 41.58239700374532,
"alnum_prop": 0.5744652105381671,
"repo_name": "danielvdende/incubator-airflow",
"id": "d03839d727e47d4bccf6f715075d230b03de761f",
"size": "23004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/providers/docker/operators/test_docker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
"""
Make a pie charts of varying size - see
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie for the docstring.
This example shows a basic pie charts with labels optional features,
like autolabeling the percentage, offsetting a slice with "explode"
and adding a shadow, in different sizes.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Some data
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15, 30, 45, 10]
explode=(0, 0.05, 0, 0)
# Make square figures and axes
the_grid = GridSpec(2, 2)
plt.subplot(the_grid[0, 0], aspect=1)
plt.pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
plt.subplot(the_grid[0, 1], aspect=1)
plt.pie(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=True)
plt.subplot(the_grid[1, 0], aspect=1)
patches, texts, autotexts = plt.pie(fracs, labels=labels,
autopct='%.0f%%',
shadow=True, radius=0.5)
# Make the labels on the small plot easier to read.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.subplot(the_grid[1, 1], aspect=1)
patches, texts, autotexts = plt.pie(fracs, explode=explode,
labels=labels, autopct='%.0f%%',
shadow=False, radius=0.5)
# Turn off shadow for tiny plot
# with exploded slice.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.show()
| {
"content_hash": "de6027ceac98b5c68128a6479cd83589",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 27.93103448275862,
"alnum_prop": 0.6160493827160494,
"repo_name": "RobertABT/heightmap",
"id": "c3cd16fcb37edfef36e3af900352331fa4d2205e",
"size": "1620",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "build/matplotlib/examples/pylab_examples/pie_demo2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25165856"
},
{
"name": "C++",
"bytes": "5251754"
},
{
"name": "CSS",
"bytes": "17123"
},
{
"name": "FORTRAN",
"bytes": "6353469"
},
{
"name": "JavaScript",
"bytes": "816504"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "284551"
},
{
"name": "Python",
"bytes": "13223936"
},
{
"name": "TeX",
"bytes": "37261"
}
],
"symlink_target": ""
} |
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| {
"content_hash": "8f0a3cf0c5ef840441811f2ef89be649",
"timestamp": "",
"source": "github",
"line_count": 1708,
"max_line_length": 105,
"avg_line_length": 39.30152224824356,
"alnum_prop": 0.6072817197252969,
"repo_name": "jaidevd/scikit-learn",
"id": "1c160be7870bc6b1d656c5db6185a8298589324a",
"size": "67127",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/forest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6837661"
},
{
"name": "Shell",
"bytes": "14318"
}
],
"symlink_target": ""
} |
from django.core.management import execute_manager
import sys, os
filedir = os.path.dirname(__file__)
submodules_list = os.listdir(os.path.join(filedir, 'submodules'))
for d in submodules_list:
if d == "__init__.py" or d == '.' or d == '..':
continue
sys.path.insert(1, os.path.join(filedir, 'submodules', d))
sys.path.append(os.path.join(filedir,'submodules'))
if __name__ == "__main__":
# proxy for whether we're running gunicorn with -k gevent
if "gevent" in sys.argv:
from restkit.session import set_session; set_session("gevent")
from gevent.monkey import patch_all; patch_all()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "346985d9bbc673ebc9d16a080b213892",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.6791979949874687,
"repo_name": "gmimano/commcaretest",
"id": "6fc280d6af44a218adf67d7a3467995cc53a5575",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.CoordinateSystem,
"description",
"""
Produces scenes containing a coordinate system. Coordinate systems
have two main uses :
- To visualise the transform at a particular location. In this
respect they're similar to locators or nulls in other packages.
- To define a named coordinate system to be used in shaders at
render time. This is useful for defining projections or procedural
solid textures. The full path to the location of the coordinate
system should be used to refer to it within shaders.
""",
)
| {
"content_hash": "cd7febbbdee80ed1efd42ea442affa4f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.7706270627062707,
"repo_name": "andrewkaufman/gaffer",
"id": "e371ab3ed947c846de4d12b40f51db379d27879e",
"size": "2409",
"binary": false,
"copies": "13",
"ref": "refs/heads/main",
"path": "python/GafferSceneUI/CoordinateSystemUI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9572701"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10279312"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14580"
}
],
"symlink_target": ""
} |
def pytest_addoption(parser):
selenium_class_names = ("Android", "Chrome", "Firefox", "Ie", "Opera", "PhantomJS", "Remote", "Safari")
parser.addoption("--webdriver", action="store", choices=selenium_class_names,
default="PhantomJS",
help="Selenium WebDriver interface to use for running the test. Default: PhantomJS")
parser.addoption("--webdriver-options", action="store", default="{}",
help="Python dictionary of options to pass to the Selenium WebDriver class. Default: {}")
| {
"content_hash": "421bf4f5d9ba7e513d15433431c92d4b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 110,
"avg_line_length": 79,
"alnum_prop": 0.6365280289330922,
"repo_name": "edisongustavo/asv",
"id": "db13c2ab44208e864cf96381587e30b2db40a44c",
"size": "554",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "CSS",
"bytes": "2738"
},
{
"name": "HTML",
"bytes": "8018"
},
{
"name": "JavaScript",
"bytes": "107683"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "378142"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('argus', '0002_auto_20170609_2316'),
]
operations = [
migrations.AddField(
model_name='argusadsl',
name='fio',
field=models.CharField(default='N/A', max_length=256),
),
migrations.AlterField(
model_name='argusadsl',
name='address',
field=models.CharField(default='N/A', max_length=256),
),
migrations.AlterField(
model_name='argusadsl',
name='room',
field=models.CharField(default='N/A', max_length=256),
),
migrations.AlterField(
model_name='argusadsl',
name='xdsl_slot',
field=models.CharField(default='N/A', max_length=256),
),
]
| {
"content_hash": "fc8629bb22a755e49600069f34be40eb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 27.515151515151516,
"alnum_prop": 0.5473568281938326,
"repo_name": "dehu4ka/lna",
"id": "7c29b59ea1f161ca705c2e96ed524a448517f3b4",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argus/migrations/0003_auto_20170609_2342.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7385"
},
{
"name": "HTML",
"bytes": "75367"
},
{
"name": "JavaScript",
"bytes": "106914"
},
{
"name": "Python",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
import webob.dec
import webob.exc
from nova.api.openstack import wsgi
from nova import context
from nova import wsgi as base_wsgi
CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
class NoAuthMiddlewareBase(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
def base_call(self, req, project_id_in_path, always_admin=True):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
if project_id_in_path:
os_url = '/'.join([req.url.rstrip('/'), project_id])
else:
os_url = req.url.rstrip('/')
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status = '204'
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
is_admin = always_admin or (user_id == 'admin')
ctx = context.RequestContext(user_id,
project_id,
is_admin=is_admin,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class NoAuthMiddleware(NoAuthMiddlewareBase):
"""Return a fake token if one isn't specified.
noauth2 provides admin privs if 'admin' is provided as the user id.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req, True, always_admin=False)
class NoAuthMiddlewareV2_18(NoAuthMiddlewareBase):
"""Return a fake token if one isn't specified.
This provides a version of the middleware which does not add
project_id into server management urls.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req, False, always_admin=False)
| {
"content_hash": "182872a49351e73f1ca074915566b709",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 36.394366197183096,
"alnum_prop": 0.5948142414860681,
"repo_name": "cernops/nova",
"id": "bbaf74d20de001ed7b21c9c105ce005b73b25eab",
"size": "3247",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "nova/api/openstack/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "983"
},
{
"name": "JavaScript",
"bytes": "2639"
},
{
"name": "Python",
"bytes": "17413087"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "295563"
}
],
"symlink_target": ""
} |
from nova import db
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class TaskLog(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'task_name': fields.StringField(),
'state': fields.StringField(read_only=True),
'host': fields.StringField(),
'period_beginning': fields.DateTimeField(),
'period_ending': fields.DateTimeField(),
'message': fields.StringField(),
'task_items': fields.IntegerField(),
'errors': fields.IntegerField(),
}
@staticmethod
def _from_db_object(context, task_log, db_task_log):
for field in task_log.fields:
setattr(task_log, field, db_task_log[field])
task_log._context = context
task_log.obj_reset_changes()
return task_log
@base.serialize_args
@base.remotable_classmethod
def get(cls, context, task_name, period_beginning, period_ending, host,
state=None):
db_task_log = db.task_log_get(context, task_name, period_beginning,
period_ending, host, state=state)
if db_task_log:
return cls._from_db_object(context, cls(context), db_task_log)
@base.remotable
def begin_task(self):
db.task_log_begin_task(
self._context, self.task_name, self.period_beginning,
self.period_ending, self.host, task_items=self.task_items,
message=self.message)
@base.remotable
def end_task(self):
db.task_log_end_task(
self._context, self.task_name, self.period_beginning,
self.period_ending, self.host, errors=self.errors,
message=self.message)
@base.NovaObjectRegistry.register
class TaskLogList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('TaskLog'),
}
obj_relationships = {
'objects': [('1.0', '1.0')],
}
@base.serialize_args
@base.remotable_classmethod
def get_all(cls, context, task_name, period_beginning, period_ending,
host=None, state=None):
db_task_logs = db.task_log_get_all(context, task_name,
period_beginning, period_ending,
host=host, state=state)
return base.obj_make_list(context, cls(context), TaskLog, db_task_logs)
| {
"content_hash": "b8c684521950dd4bd657bed5cdb245f7",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 35.42465753424658,
"alnum_prop": 0.6067285382830626,
"repo_name": "whitepages/nova",
"id": "e3c066f9d80a4bbb41c68c69b3b76c5c5eeb3aef",
"size": "3159",
"binary": false,
"copies": "29",
"ref": "refs/heads/master",
"path": "nova/objects/task_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16549579"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259485"
}
],
"symlink_target": ""
} |
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
except AttributeError:
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None, from_tf=False):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
# if not os.path.exists(url_or_filename):
# raise ValueError("Local cached file does not exist: {}".format(parsed))
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif from_tf and os.path.exists(url_or_filename + ".meta"):
# TF checkpoint exists
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise ValueError("local cached file {} doesn't exist".format(cache_path))
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| {
"content_hash": "8c2b1d2ca2b4b673e0e3cadfb468e3f9",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 112,
"avg_line_length": 33.71875,
"alnum_prop": 0.6314874884151993,
"repo_name": "mlperf/training_results_v0.7",
"id": "3f5d95072044931e758d4b60c8fffe6debb097fb",
"size": "9260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NVIDIA/benchmarks/bert/implementations/pytorch/file_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
"""The virtual interfaces extension."""
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import compute
from nova.i18n import _
from nova import network
from nova.policies import virtual_interfaces as vif_policies
def _translate_vif_summary_view(req, vif):
"""Maps keys for VIF summary view."""
d = {}
d['id'] = vif.uuid
d['mac_address'] = vif.address
if api_version_request.is_supported(req, min_version='2.12'):
d['net_id'] = vif.net_uuid
# NOTE(gmann): This is for v2.1 compatible mode where response should be
# same as v2 one.
if req.is_legacy_v2():
d['OS-EXT-VIF-NET:net_id'] = vif.net_uuid
return d
class ServerVirtualInterfaceController(wsgi.Controller):
"""The instance VIF API controller for the OpenStack API.
This API is deprecated from the Microversion '2.44'.
"""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(ServerVirtualInterfaceController, self).__init__()
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['nova.context']
context.can(vif_policies.BASE_POLICY_NAME)
instance = common.get_instance(self.compute_api, context, server_id)
try:
vifs = self.network_api.get_vifs_by_instance(context, instance)
except NotImplementedError:
msg = _('Listing virtual interfaces is not supported by this '
'cloud.')
raise webob.exc.HTTPBadRequest(explanation=msg)
limited_list = common.limited(vifs, req)
res = [entity_maker(req, vif) for vif in limited_list]
return {'virtual_interfaces': res}
@wsgi.Controller.api_version("2.1", "2.43")
@wsgi.expected_errors((400, 404))
def index(self, req, server_id):
"""Returns the list of VIFs for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_vif_summary_view)
| {
"content_hash": "ace126e93d053231f2d05d7c450a343c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 36.083333333333336,
"alnum_prop": 0.6480369515011547,
"repo_name": "phenoxim/nova",
"id": "46724c9c4aaf6ece58b459e8c29f7cc4a0a26e18",
"size": "2796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/virtual_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16289098"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "282020"
}
],
"symlink_target": ""
} |
"""
Twisted runner: run and monitor processes
Maintainer: Andrew Bennetts
classic inetd(8) support:
Future Plans: The basic design should be final. There are some bugs that need
fixing regarding UDP and Sun-RPC support. Perhaps some day xinetd
compatibility will be added.
procmon:monitor and restart processes
"""
from twisted.runner._version import version
__version__ = version.short()
| {
"content_hash": "411bd5615624ac59364effc4cbc2513e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.75,
"alnum_prop": 0.7475728155339806,
"repo_name": "timkrentz/SunTracker",
"id": "f7f80142682ee2eaa2d12695e92cc59eb4d8a700",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/runner/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
import unittest
import scram_modules.alignedreads as ar
import scram_modules.srnaseq as srna
import scram_modules.refseq as refseq
import scram_modules.dna as dna
import os
_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class TestARMethods(unittest.TestCase):
def test_srna_profile_1(self):
"""
Test a single read aligning a reference once in the sense orientation
"""
test_seq = self.load_test_read_file()
single_ref = self.load_test_ref_file("test_ref_1.fa")
aligned = self.align_reads(single_ref, test_seq)
test_aligned=ar.AlignedReads()
test_aligned[dna.DNA("ATGCGTATGGCGATGAGAGTA")]=[[0, 500000.0]]
self.assertEqual(aligned, test_aligned) | {
"content_hash": "1df5a5d1a3688f9e024629f1f2c93a6a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 29.36,
"alnum_prop": 0.6852861035422343,
"repo_name": "Carroll-Lab/scram",
"id": "cd27cbe22313f9491453476a25b3cd46f822e637",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cdp_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89516"
}
],
"symlink_target": ""
} |
from django.urls import path
from . import views
app_name = "owntracks"
urlpatterns = [
path('owntracks/logtracks', views.manage_owntrack_log, name='logtracks'),
path('owntracks/show_maps', views.show_maps, name='show_maps'),
path('owntracks/get_datas', views.get_datas, name='get_datas'),
path('owntracks/show_dates', views.show_log_dates, name='show_dates')
]
| {
"content_hash": "5203927be74f9c9deff8160d2c0ba6a9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 31.75,
"alnum_prop": 0.7007874015748031,
"repo_name": "liangliangyy/DjangoBlog",
"id": "c19ada8782e81ad195fd904d4a639504aaf05482",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "owntracks/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "487"
},
{
"name": "HTML",
"bytes": "54461"
},
{
"name": "Python",
"bytes": "224212"
},
{
"name": "Shell",
"bytes": "1247"
}
],
"symlink_target": ""
} |
from pythonbacktest.datafeed import PriceBar
from . import *
class BasicBackTestEngine(AbstractBackTestEngine):
def __init__(self):
AbstractBackTestEngine.__init__(self)
# run backtest on single day only
def start_single_date(self, date, strategy, indicators_history, broker):
indicators_history_per_date = indicators_history.get_indicator_history_for_day(date)
if indicators_history_per_date is None:
raise ValueError("Can't get history for given date: " + str(date))
price_bar_index = 0
number_of_pricebars = len(indicators_history_per_date)
for timestamp, indicators_snapshot in indicators_history_per_date:
snapshot_data = indicators_snapshot.snapshot_data
latest_snapshot_data = self.__get_latest_values_for_indicators_snapshot(snapshot_data)
price_bar = self.__latest_snapshot_to_price_bar(latest_snapshot_data)
broker.set_current_price_bar(price_bar, price_bar_index)
broker.set_current_indicators_values(latest_snapshot_data)
if price_bar_index == number_of_pricebars - 1:
strategy.day_end_price_bar(price_bar, price_bar_index, snapshot_data, latest_snapshot_data, broker)
else:
strategy.new_price_bar(price_bar, price_bar_index, snapshot_data, latest_snapshot_data, broker)
price_bar_index += 1
def __get_latest_values_for_indicators_snapshot(self, snapshot_data):
result = {}
for indicator_name, indicator_values in snapshot_data.items():
result[indicator_name] = indicator_values[-1]
return result
def __latest_snapshot_to_price_bar(self, latest_snapshot_data):
price_bar = PriceBar()
price_bar.timestamp = latest_snapshot_data["timestamp"]
price_bar.open = latest_snapshot_data["open"]
price_bar.close = latest_snapshot_data["close"]
price_bar.high = latest_snapshot_data["high"]
price_bar.low = latest_snapshot_data["low"]
price_bar.volume = latest_snapshot_data["volume"]
return price_bar
| {
"content_hash": "3e68582da72b27aff38c7ae9d04f8450",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 115,
"avg_line_length": 38.70909090909091,
"alnum_prop": 0.660403945514326,
"repo_name": "quantwizard-com/pythonbacktest",
"id": "da54a8e7387530d0ef74dbc0d8bb0e1b06982338",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonbacktest/backtestengine/basicbacktestengine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "340439"
},
{
"name": "Python",
"bytes": "144332"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
"""
This file contains implementation of database model for contrail config daemons
"""
from exceptions import NoIdError
from vnc_api.gen.resource_client import *
from utils import obj_type_to_vnc_class
class DBBase(object):
# This is the base class for all DB objects. All derived objects must
# have a class member called _dict of dictionary type.
# The init method of this class must be callled before using any functions
_logger = None
_cassandra = None
_manager = None
# objects in the database could be indexed by uuid or fq-name
# set _indexed_by_name to True in the derived class to use fq-name as index
_indexed_by_name = False
@classmethod
def init(cls, manager, logger, cassandra):
cls._logger = logger
cls._cassandra = cassandra
cls._manager = manager
# end init
class __metaclass__(type):
def __iter__(cls):
for i in cls._dict:
yield i
# end __iter__
def values(cls):
for i in cls._dict.values():
yield i
# end values
def items(cls):
for i in cls._dict.items():
yield i
# end items
def __contains__(cls, item):
# check for 'item in cls'
return item in cls._dict
# end __contains__
# end __metaclass__
@classmethod
def get(cls, key):
return cls._dict.get(key)
# end get
@classmethod
def locate(cls, key, *args):
if key not in cls._dict:
try:
obj = cls(key, *args)
cls._dict[key] = obj
return obj
except NoIdError as e:
cls._logger.debug(
"Exception %s while creating %s for %s" %
(e, cls.__name__, key))
return None
return cls._dict[key]
# end locate
def delete_obj(self):
# Override in derived class to provide additional functionality
pass
@classmethod
def delete(cls, key):
obj = cls.get(key)
if obj is None:
return
obj.delete_obj()
del cls._dict[key]
# end delete
def get_ref_uuid_from_dict(self, obj_dict, ref_name):
if ref_name in obj_dict:
return obj_dict[ref_name][0]['uuid']
else:
return None
def get_key(self):
if self._indexed_by_name:
return self.name
return self.uuid
# end get_key
def add_ref(self, ref_type, ref, attr=None):
if hasattr(self, ref_type):
setattr(self, ref_type, ref)
elif hasattr(self, ref_type+'s'):
ref_set = getattr(self, ref_type+'s')
if isinstance(ref_set, set):
ref_set.add(ref)
elif isinstance(ref_set, dict):
ref_set[ref] = attr
# end add_ref
def delete_ref(self, ref_type, ref):
if hasattr(self, ref_type) and getattr(self, ref_type) == ref:
setattr(self, ref_type, None)
elif hasattr(self, ref_type+'s'):
ref_set = getattr(self, ref_type+'s')
if isinstance(ref_set, set):
ref_set.discard(ref)
elif isinstance(ref_set, dict) and ref in ref_set:
del ref_set[ref]
# end delete_ref
def add_to_parent(self, obj):
if isinstance(obj, dict):
parent_type = obj.get('parent_type')
else:
parent_type = obj.parent_type
self.parent_type = parent_type.replace('-', '_')
if self._indexed_by_name:
if isinstance(obj, dict):
fq_name = obj_dict.get('fq_name', [])
if fq_name:
self.parent_key = ':'.join(fq_name[:-1])
else:
return
else:
self.parent_key = obj.get_parent_fq_name_str()
else:
if isinstance(obj, dict):
self.parent_key = obj.get('parent_uuid')
else:
self.parent_key = obj.get_parent_uuid
if not self.parent_type or not self.parent_key:
return
self.add_ref(self.parent_type, self.parent_key)
p_obj = self.get_obj_type_map()[self.parent_type].get(self.parent_key)
if p_obj is not None:
p_obj.add_ref(self.obj_type, self.get_key())
# end
def remove_from_parent(self):
if not self.parent_type or not self.parent_key:
return
p_obj = self.get_obj_type_map()[self.parent_type].get(self.parent_key)
if p_obj is not None:
p_obj.delete_ref(self.obj_type, self.get_key())
def _get_ref_key(self, ref, ref_type=None):
if self._indexed_by_name:
key = ':'.join(ref['to'])
else:
try:
key = ref['uuid']
except KeyError:
fq_name = ref['to']
key = self._cassandra.fq_name_to_uuid(ref_type, fq_name)
return key
# end _get_ref_key
def get_single_ref_attr(self, ref_type, obj):
if isinstance(obj, dict):
refs = obj.get(ref_type+'_refs') or obj.get(ref_type+'_back_refs')
else:
refs = (getattr(obj, ref_type+'_refs', None) or
getattr(obj, ref_type+'_back_refs', None))
if refs:
ref_attr = refs[0].get('attr', None)
return ref_attr
return None
# end get_single_ref_attr
def update_single_ref(self, ref_type, obj):
if isinstance(obj, dict):
refs = obj.get(ref_type+'_refs') or obj.get(ref_type+'_back_refs')
else:
refs = (getattr(obj, ref_type+'_refs', None) or
getattr(obj, ref_type+'_back_refs', None))
if refs:
new_key = self._get_ref_key(refs[0], ref_type)
else:
new_key = None
old_key = getattr(self, ref_type, None)
if old_key == new_key:
return
ref_obj = self.get_obj_type_map()[ref_type].get(old_key)
if ref_obj is not None:
ref_obj.delete_ref(self.obj_type, self.get_key())
ref_obj = self.get_obj_type_map()[ref_type].get(new_key)
if ref_obj is not None:
ref_obj.add_ref(self.obj_type, self.get_key())
setattr(self, ref_type, new_key)
# end update_single_ref
def set_children(self, ref_type, obj):
if isinstance(obj, dict):
refs = obj.get(ref_type+'s')
else:
refs = getattr(obj, ref_type+'s', None)
new_refs = set()
for ref in refs or []:
new_key = self._get_ref_key(ref, ref_type)
new_refs.add(new_key)
setattr(self, ref_type+'s', new_refs)
# end set_children
def update_multiple_refs(self, ref_type, obj):
if isinstance(obj, dict):
refs = obj.get(ref_type+'_refs') or obj.get(ref_type+'_back_refs')
else:
refs = (getattr(obj, ref_type+'_refs', None) or
getattr(obj, ref_type+'_back_refs', None))
new_refs = set()
for ref in refs or []:
new_key = self._get_ref_key(ref, ref_type)
new_refs.add(new_key)
old_refs = getattr(self, ref_type+'s')
for ref_key in old_refs - new_refs:
ref_obj = self.get_obj_type_map()[ref_type].get(ref_key)
if ref_obj is not None:
ref_obj.delete_ref(self.obj_type, self.get_key())
for ref_key in new_refs - old_refs:
ref_obj = self.get_obj_type_map()[ref_type].get(ref_key)
if ref_obj is not None:
ref_obj.add_ref(self.obj_type, self.get_key())
setattr(self, ref_type+'s', new_refs)
# end update_multiple_refs
def update_multiple_refs_with_attr(self, ref_type, obj):
if isinstance(obj, dict):
refs = obj.get(ref_type+'_refs') or obj.get(ref_type+'_back_refs')
else:
refs = (getattr(obj, ref_type+'_refs', None) or
getattr(obj, ref_type+'_back_refs', None))
new_refs = {}
for ref in refs or []:
new_key = self._get_ref_key(ref, ref_type)
new_refs[new_key] = ref.get('attr')
old_refs = getattr(self, ref_type+'s')
for ref_key in set(old_refs.keys()) - set(new_refs.keys()):
ref_obj = self.get_obj_type_map()[ref_type].get(ref_key)
if ref_obj is not None:
ref_obj.delete_ref(self.obj_type, self.get_key())
for ref_key in new_refs:
if ref_key in old_refs and new_refs[ref_key] == old_refs[ref_key]:
continue
ref_obj = self.get_obj_type_map()[ref_type].get(ref_key)
if ref_obj is not None:
ref_obj.add_ref(self.obj_type, self.get_key(), new_refs[ref_key])
setattr(self, ref_type+'s', new_refs)
# end update_multiple_refs
@classmethod
def read_obj(cls, uuid, obj_type=None):
ok, objs = cls._cassandra.object_read(obj_type or cls.obj_type, [uuid])
if not ok:
cls._logger.error(
'Cannot read %s %s, error %s' % (obj_type, uuid, objs))
raise NoIdError(uuid)
return objs[0]
# end read_obj
@classmethod
def vnc_obj_from_dict(cls, obj_type, obj_dict):
cls = obj_type_to_vnc_class(obj_type, __name__)
return cls.from_dict(**obj_dict)
@classmethod
def read_vnc_obj(cls, uuid=None, fq_name=None, obj_type=None):
if uuid is None and fq_name is None:
raise NoIdError('')
obj_type = obj_type or cls.obj_type
if uuid is None:
if isinstance(fq_name, basestring):
fq_name = fq_name.split(':')
uuid = cls._cassandra.fq_name_to_uuid(obj_type, fq_name)
obj_dict = cls.read_obj(uuid, obj_type)
obj = cls.vnc_obj_from_dict(obj_type, obj_dict)
obj.clear_pending_updates()
return obj
# end read_vnc_obj
@classmethod
def list_obj(cls, obj_type=None):
obj_type = obj_type or cls.obj_type
ok, result = cls._cassandra.object_list(obj_type)
if not ok:
return []
uuids = [uuid for _, uuid in result]
ok, objs = cls._cassandra.object_read(obj_type, uuids)
if not ok:
return []
return objs
@classmethod
def list_vnc_obj(cls, obj_type=None):
obj_type = obj_type or cls.obj_type
vnc_cls = obj_type_to_vnc_class(obj_type, __name__)
obj_dicts = cls.list_obj(obj_type)
for obj_dict in obj_dicts:
obj = vnc_cls.from_dict(**obj_dict)
obj.clear_pending_updates()
yield obj
def get_parent_uuid(self, obj):
if 'parent_uuid' in obj:
return obj['parent_uuid']
else:
parent_type = obj['parent_type'].replace('-', '_')
parent_fq_name = obj['fq_name'][:-1]
return self._cassandra.fq_name_to_uuid(parent_type, parent_fq_name)
# end get_parent_uuid
@classmethod
def find_by_name_or_uuid(cls, name_or_uuid):
obj = cls.get(name_or_uuid)
if obj:
return obj
for obj in cls.values():
if obj.name == name_or_uuid:
return obj
return None
# end find_by_name_or_uuid
@classmethod
def reset(cls):
cls._dict = {}
@classmethod
def get_obj_type_map(cls):
module_base = [x for x in DBBase.__subclasses__()
if cls.__module__ == x.obj_type]
return dict((x.obj_type, x) for x in module_base[0].__subclasses__())
# end class DBBase
| {
"content_hash": "6d907704e2c07898e56f60fd030e2001",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 81,
"avg_line_length": 33.97971014492754,
"alnum_prop": 0.5342489123944383,
"repo_name": "codilime/contrail-controller",
"id": "35faf7df2c6cb573c4d01be3ed1cec4adeeb84f5",
"size": "11793",
"binary": false,
"copies": "2",
"ref": "refs/heads/windows3.1",
"path": "src/config/common/vnc_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "96717"
},
{
"name": "C++",
"bytes": "20662554"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "19459"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "PowerShell",
"bytes": "1784"
},
{
"name": "Python",
"bytes": "5590763"
},
{
"name": "Roff",
"bytes": "40925"
},
{
"name": "Shell",
"bytes": "52721"
},
{
"name": "Thrift",
"bytes": "8382"
},
{
"name": "Yacc",
"bytes": "35530"
}
],
"symlink_target": ""
} |
'''
run the following test with:
python driver_mathTest.py
'''
import unittest
import math
import rospy
from geometry_msgs.msg import Quaternion
from nav_msgs.msg import Odometry
from driver_pseudolinear import PseudoLinearDriver as Driver
from utils import heading_to_quaternion, quaternion_to_heading, easy_Odom, is_close
class TestDriverCalculations(unittest.TestCase):
# sign conventions (revised):
# axis: x axis is parallel to goal, y axis is to- the left when facing
# the goal direction, z-axis is oriented up
# positive heading error - rotated counter clockwise from goal
# positve offset error - positive y-axis,
def setUp(self):
self.driver_obj = Driver()
def test_zero(self):
heading = 0
offset = 0
adjusted_heading = self.driver_obj.calc_adjusted_heading(heading, offset)
self.assertTrue(is_close(adjusted_heading, 0.0))
def test_pos_heading(self):
heading = 1.0
offset = 0
adjusted_heading = self.driver_obj.calc_adjusted_heading(heading, offset)
self.assertTrue(is_close(adjusted_heading, heading))
def test_neg_heading(self):
heading = -1.0
offset = 0
adjusted_heading = self.driver_obj.calc_adjusted_heading(heading, offset)
self.assertTrue(is_close(adjusted_heading, heading))
def test_pure_offset1(self):
heading = 0
offset = .5
adjusted_heading = self.driver_obj.calc_adjusted_heading(heading, offset)
self.assertTrue(adjusted_heading > 0.0)
self.assertTrue(is_close(adjusted_heading, .75*math.pi/2, 4))
def test_pure_offset2(self):
heading = 0
offset = -.5
adjusted_heading = self.driver_obj.calc_adjusted_heading(heading, offset)
self.assertTrue(adjusted_heading < 0.0)
self.assertTrue(is_close(adjusted_heading, -.75*math.pi/2, 4))
def test_angular_vel1(self):
adjusted_heading = 0
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(is_close(ang_vel, 0.0))
def test_angular_vel2(self):
adjusted_heading = 0.5
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(ang_vel < 0.0)
self.assertTrue(is_close(ang_vel, -0.5))
def test_angular_vel3(self):
adjusted_heading = -0.5
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(ang_vel > 0.0)
self.assertTrue(is_close(ang_vel, 0.5))
def test_angular_vel4(self):
adjusted_heading = 0.25
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(ang_vel < 0.0)
self.assertTrue(is_close(ang_vel, -0.4333), 3)
def test_angular_vel5(self):
adjusted_heading = -0.25
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(ang_vel > 0.0)
self.assertTrue(is_close(ang_vel, 0.4333), 3)
def test_angular_vel6(self):
adjusted_heading = 100.0
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(ang_vel < 0.0)
self.assertTrue(is_close(ang_vel, -self.driver_obj.max_omega))
def test_angular_vel7(self):
adjusted_heading = -100.0
ang_vel = self.driver_obj.calc_angular_velocity(adjusted_heading)
self.assertTrue(ang_vel > 0.0)
self.assertTrue(is_close(ang_vel, self.driver_obj.max_omega))
def test_linear_veloicty1(self):
along = 0.0
off = 0.0
ang_vel = 0.0
goal_vel = 0.0
lin_vel = self.driver_obj.calc_linear_velocity(along, off, ang_vel, goal_vel)
self.assertTrue(is_close(lin_vel, 0.0))
def test_linear_veloicty2(self):
along = 0.0
off = 0.0
ang_vel = 0.0
goal_vel = self.driver_obj.max_v
lin_vel = self.driver_obj.calc_linear_velocity(along, off, ang_vel, goal_vel)
self.assertTrue(is_close(lin_vel, self.driver_obj.max_v))
def test_linear_veloicty3(self):
along = 0.0
off = 0.0
ang_vel = 0.0
goal_vel = -self.driver_obj.max_v
lin_vel = self.driver_obj.calc_linear_velocity(along, off, ang_vel, goal_vel)
self.assertTrue(is_close(lin_vel, -self.driver_obj.max_v))
def test_linear_veloicty4(self):
along = 0.5
off = 0.0
ang_vel = 0.0
goal_vel = 0.0
lin_vel = self.driver_obj.calc_linear_velocity(along, off, ang_vel, goal_vel)
self.assertTrue(is_close(lin_vel, -0.5))
def test_linear_veloicty5(self):
along = -0.5
off = 0.0
ang_vel = 0.0
goal_vel = 0.0
lin_vel = self.driver_obj.calc_linear_velocity(along, off, ang_vel, goal_vel)
self.assertTrue(is_close(lin_vel, 0.5))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "5b481fac0100d7533610154935a6231d",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 85,
"avg_line_length": 34.53521126760563,
"alnum_prop": 0.6331566068515497,
"repo_name": "buckbaskin/drive_stack",
"id": "0a9ea9ddc9e4b0bed7695cf16afaa29af48ecd65",
"size": "4927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/driver_psl_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "16498"
},
{
"name": "Python",
"bytes": "121562"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.test.client import Client
from users.models import User
class TestViewShow(TestCase):
def setUp(self):
self.user = User.objects.create_user(
email = "[email protected]",
password = "test_password",
)
self.client = Client()
def test_get_user_show_with_valid_user_id_request_should_return_200(self):
response = self.client.get("/users/" + str(self.user.id))
self.assertEqual(response.status_code, 200)
def test_get_user_show_with_valid_user_id_should_contain_user_info(self):
response = self.client.get("/users/" + str(self.user.id))
self.assertContains(response, self.user.email)
self.assertContains(response, self.user.password)
| {
"content_hash": "2d603e1aa4367d12726a672bea9dcea7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 37.23809523809524,
"alnum_prop": 0.6675191815856778,
"repo_name": "buildbuild/buildbuild",
"id": "1748898a47d3424e640d980da7cd6a9e4846403b",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buildbuild/users/tests/test_view_show.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "259939"
},
{
"name": "JavaScript",
"bytes": "70053"
},
{
"name": "Python",
"bytes": "207512"
},
{
"name": "Shell",
"bytes": "1546"
}
],
"symlink_target": ""
} |
"""Shared testing utilities."""
from gcloud import _helpers
from gcloud._helpers import _DefaultsContainer
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
def _monkey_defaults(*args, **kwargs):
mock_defaults = _DefaultsContainer(*args, **kwargs)
return _Monkey(_helpers, _DEFAULTS=mock_defaults)
def _setup_defaults(test_case, *args, **kwargs):
test_case._replaced_defaults = _helpers._DEFAULTS
_helpers._DEFAULTS = _DefaultsContainer(*args, **kwargs)
def _tear_down_defaults(test_case):
_helpers._DEFAULTS = test_case._replaced_defaults
| {
"content_hash": "dceb05813f5699c4f8ad0032855e028a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 29.457142857142856,
"alnum_prop": 0.6508244422890398,
"repo_name": "GrimDerp/gcloud-python",
"id": "18d21b19b3589fce6b850872a9ad486e74061461",
"size": "1628",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gcloud/_testing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "20396"
},
{
"name": "Python",
"bytes": "835658"
},
{
"name": "Shell",
"bytes": "9043"
}
],
"symlink_target": ""
} |
import statsd
from thumbor.metrics import BaseMetrics
class Metrics(BaseMetrics):
@classmethod
def client(cls, config):
"""
Cache statsd client so it doesn't do a DNS lookup
over and over
"""
if not hasattr(cls, "_client"):
cls._client = statsd.StatsClient(
config.STATSD_HOST, config.STATSD_PORT, config.STATSD_PREFIX
)
return cls._client
def incr(self, metricname, value=1):
Metrics.client(self.config).incr(metricname, value)
def timing(self, metricname, value):
Metrics.client(self.config).timing(metricname, value)
| {
"content_hash": "b14bf8576bfa5c6f4a54ea143fca191f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.6242236024844721,
"repo_name": "scorphus/thumbor",
"id": "e52fa64296985365bf86c89ffb06362baef5e748",
"size": "896",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "thumbor/metrics/statsd_metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58654"
},
{
"name": "JavaScript",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "11518"
},
{
"name": "Python",
"bytes": "604965"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="visible", parent_name="layout.geo", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
| {
"content_hash": "575792c3445631f3b62042d8876ac1f7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 36.45454545454545,
"alnum_prop": 0.6234413965087282,
"repo_name": "plotly/plotly.py",
"id": "8acac3fc8656a6876de9352d2bd7c6761c8582ef",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/geo/_visible.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
class Typed:
_expected_type = type(None)
def __init__(self, name=None):
self._name = name
def __set__(self, instance, value):
if not isinstance(value, self._expected_type):
raise TypeError('Expected ' + str(self._expected_type))
instance.__dict__[self._name] = value
class Integer(Typed):
_expected_type = int
class Float(Typed):
_expected_type = float
class String(Typed):
_expected_type = str
class OrderedMeta(type):
def __new__(cls, clsname, bases, clsdict):
d = dict(clsdict)
order = []
for name, value in clsdict.items():
if isinstance(value, Typed):
value._name = name
order.append(name)
d['_order'] = order
return type.__new__(cls, clsname, bases, d)
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
class Structure(metaclass=OrderedMeta):
def as_csv(self):
return ','.join(str(getattr(self, name)) for name in self._order)
class Stock(Structure):
name = String()
shares = Integer()
price = Float()
def __init__(self, name, shares, price):
self.name = name
self.shares = shares
self.price = price
| {
"content_hash": "2e35f80d40e46930d695e12c182f89cf",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 73,
"avg_line_length": 22.789473684210527,
"alnum_prop": 0.5850654349499615,
"repo_name": "xu6148152/Binea_Python_Project",
"id": "c7b22dd0f59a5d30b0cefe7d7faa875ffbe5f327",
"size": "1325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonCookbook/meta/typed.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4034"
},
{
"name": "CSS",
"bytes": "611975"
},
{
"name": "HTML",
"bytes": "639396"
},
{
"name": "Java",
"bytes": "11754"
},
{
"name": "JavaScript",
"bytes": "494846"
},
{
"name": "Python",
"bytes": "964827"
}
],
"symlink_target": ""
} |
import os
APP_DIR = os.path.dirname( globals()['__file__'] )
DBNAME = 'metadb'
COLLECTIONNAME = 'metadata'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
# 'URL': 'http://127.0.0.1:8983/solr'
# },
#}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = APP_DIR + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = 'media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = APP_DIR + '/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'db3n0unxh(l@+*lse*wb)e%0fe3k#(r^#4yd9u%@9b&vickozt'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'pkan.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join( APP_DIR, 'templates' )
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'haystack',
'pkan.pkanapp',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "6d876781a66309c649a25c1af67d848e",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 115,
"avg_line_length": 33.453416149068325,
"alnum_prop": 0.6761975492016339,
"repo_name": "alexbyrnes/pico-ckan",
"id": "0ee47c09ec6ac72d6a702c2c919f702caa7efd05",
"size": "5386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkan/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "462828"
},
{
"name": "Python",
"bytes": "9918"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.