repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
aabbox/kbengine | kbe/src/lib/python/Lib/test/test_zlib.py | 84 | 26766 | import unittest
from test import support
import binascii
import random
import sys
from test.support import bigmemtest, _1G, _4G
zlib = support.import_module('zlib')
requires_Compress_copy = unittest.skipUnless(
hasattr(zlib.compressobj(), "copy"),
'requires Compress.copy()')
requires_Decompress_copy = unittest.skipUnless(
hasattr(zlib.decompressobj(), "copy"),
'requires Decompress.copy()')
class VersionTestCase(unittest.TestCase):
def test_library_version(self):
# Test that the major version of the actual library in use matches the
# major version that we were compiled against. We can't guarantee that
# the minor versions will match (even on the machine on which the module
# was compiled), and the API is stable between minor versions, so
# testing only the major versions avoids spurious failures.
self.assertEqual(zlib.ZLIB_RUNTIME_VERSION[0], zlib.ZLIB_VERSION[0])
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(b""), zlib.crc32(b"", 0))
self.assertTrue(zlib.crc32(b"abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32(b"", 0), 0)
self.assertEqual(zlib.crc32(b"", 1), 1)
self.assertEqual(zlib.crc32(b"", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(b""), zlib.adler32(b"", 1))
self.assertTrue(zlib.adler32(b"abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32(b"", 0), 0)
self.assertEqual(zlib.adler32(b"", 1), 1)
self.assertEqual(zlib.adler32(b"", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFF, expected & 0x0FFFFFFFF)
def test_penguins(self):
self.assertEqual32(zlib.crc32(b"penguin", 0), 0x0e5c1a120)
self.assertEqual32(zlib.crc32(b"penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32(b"penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32(b"penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32(b"penguin"), zlib.crc32(b"penguin", 0))
self.assertEqual(zlib.adler32(b"penguin"),zlib.adler32(b"penguin",1))
def test_crc32_adler32_unsigned(self):
foo = b'abcdefghijklmnop'
# explicitly test signed behavior
self.assertEqual(zlib.crc32(foo), 2486878355)
self.assertEqual(zlib.crc32(b'spam'), 1138425661)
self.assertEqual(zlib.adler32(foo+foo), 3573550353)
self.assertEqual(zlib.adler32(b'spam'), 72286642)
def test_same_as_binascii_crc32(self):
foo = b'abcdefghijklmnop'
crc = 2486878355
self.assertEqual(binascii.crc32(foo), crc)
self.assertEqual(zlib.crc32(foo), crc)
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
# Issue #10276 - check that inputs >=4GB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
@bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
def test_big_buffer(self, size):
data = b"nyan" * (_1G + 1)
self.assertEqual(zlib.crc32(data), 1044521549)
self.assertEqual(zlib.adler32(data), 2256789997)
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_badlevel(self):
# specifying compression level out of range causes an error
# (but -1 is Z_DEFAULT_COMPRESSION and apparently the zlib
# accepts 0 too)
self.assertRaises(zlib.error, zlib.compress, b'ERROR', 10)
def test_badargs(self):
self.assertRaises(TypeError, zlib.adler32)
self.assertRaises(TypeError, zlib.crc32)
self.assertRaises(TypeError, zlib.compress)
self.assertRaises(TypeError, zlib.decompress)
for arg in (42, None, '', 'abc', (), []):
self.assertRaises(TypeError, zlib.adler32, arg)
self.assertRaises(TypeError, zlib.crc32, arg)
self.assertRaises(TypeError, zlib.compress, arg)
self.assertRaises(TypeError, zlib.decompress, arg)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
# specifying total bits too large causes an error
self.assertRaises(ValueError,
zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, -1)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)
# Generate 10MB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
for i in range(10)])
data = data * (size // len(data) + 1)
try:
compress_func(data)
finally:
# Release memory
data = None
def check_big_decompress_buffer(self, size, decompress_func):
data = b'x' * size
try:
compressed = zlib.compress(data, 1)
finally:
# Release memory
data = None
data = decompress_func(compressed)
# Sanity check
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b'x')), 0)
finally:
data = None
class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.compress(bytearray(data)), x)
for ob in x, bytearray(x):
self.assertEqual(zlib.decompress(ob), data)
def test_incomplete_stream(self):
# An useful error message is given
x = zlib.compress(HAMLET_SCENE)
self.assertRaisesRegex(zlib.error,
"Error -5 while decompressing data: incomplete or truncated stream",
zlib.decompress, x[:-1])
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
compress = lambda s: zlib.compress(s, 1)
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress)
@bigmemtest(size=_4G + 100, memuse=1, dry_run=False)
def test_length_overflow(self, size):
data = b'x' * size
try:
self.assertRaises(OverflowError, zlib.compress, data, 1)
self.assertRaises(OverflowError, zlib.decompress, data)
finally:
data = None
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
datasrc = HAMLET_SCENE * 128
datazip = zlib.compress(datasrc)
# should compress both bytes and bytearray data
for data in (datasrc, bytearray(datasrc)):
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
self.assertEqual(x1 + x2, datazip)
for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))):
dco = zlib.decompressobj()
y1 = dco.decompress(v1 + v2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
self.assertIsInstance(dco.unconsumed_tail, bytes)
self.assertIsInstance(dco.unused_data, bytes)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memlevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memlevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(b''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
decombuf = zlib.decompress(combuf)
# Test type of return value
self.assertIsInstance(decombuf, bytes)
self.assertEqual(data, decombuf)
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual(b'', dco.unconsumed_tail, ########
"(A) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress(b'')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual(b'', dco.unconsumed_tail, ########
"(B) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
self.assertEqual(data, b''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.assertFalse(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress(b'', max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, b"", -1)
self.assertEqual(b'', dco.unconsumed_tail)
def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute.
cdata = b"x\x9cKLJ\x06\x00\x02M\x01" # "abc"
dco = zlib.decompressobj()
ddata = dco.decompress(cdata, 1)
ddata += dco.decompress(dco.unconsumed_tail)
self.assertEqual(dco.unconsumed_tail, b"")
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
self.assertEqual(zlib.decompress(b''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
@unittest.skipUnless(hasattr(zlib, 'Z_SYNC_FLUSH'),
'requires zlib.Z_SYNC_FLUSH')
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.assertTrue(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), b"") # Returns nothing
def test_dictionary(self):
h = HAMLET_SCENE
# Build a simulated dictionary out of the words in HAMLET.
words = h.split()
random.shuffle(words)
zdict = b''.join(words)
# Use it to compress HAMLET.
co = zlib.compressobj(zdict=zdict)
cd = co.compress(h) + co.flush()
# Verify that it will decompress with the dictionary.
dco = zlib.decompressobj(zdict=zdict)
self.assertEqual(dco.decompress(cd) + dco.flush(), h)
# Verify that it fails when not given the dictionary.
dco = zlib.decompressobj()
self.assertRaises(zlib.error, dco.decompress, cd)
def test_dictionary_streaming(self):
# This simulates the reuse of a compressor object for compressing
# several separate data streams.
co = zlib.compressobj(zdict=HAMLET_SCENE)
do = zlib.decompressobj(zdict=HAMLET_SCENE)
piece = HAMLET_SCENE[1000:1500]
d0 = co.compress(piece) + co.flush(zlib.Z_SYNC_FLUSH)
d1 = co.compress(piece[100:]) + co.flush(zlib.Z_SYNC_FLUSH)
d2 = co.compress(piece[:-100]) + co.flush(zlib.Z_SYNC_FLUSH)
self.assertEqual(do.decompress(d0), piece)
self.assertEqual(do.decompress(d1), piece[100:])
self.assertEqual(do.decompress(d2), piece[:-100])
def test_decompress_incomplete_stream(self):
# This is 'foo', deflated
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
# For the record
self.assertEqual(zlib.decompress(x), b'foo')
self.assertRaises(zlib.error, zlib.decompress, x[:-5])
# Omitting the stream end works with decompressor objects
# (see issue #8672).
dco = zlib.decompressobj()
y = dco.decompress(x[:-5])
y += dco.flush()
self.assertEqual(y, b'foo')
def test_decompress_eof(self):
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo'
dco = zlib.decompressobj()
self.assertFalse(dco.eof)
dco.decompress(x[:-5])
self.assertFalse(dco.eof)
dco.decompress(x[-5:])
self.assertTrue(dco.eof)
dco.flush()
self.assertTrue(dco.eof)
def test_decompress_eof_incomplete_stream(self):
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo'
dco = zlib.decompressobj()
self.assertFalse(dco.eof)
dco.decompress(x[:-5])
self.assertFalse(dco.eof)
dco.flush()
self.assertFalse(dco.eof)
def test_decompress_unused_data(self):
# Repeated calls to decompress() after EOF should accumulate data in
# dco.unused_data, instead of just storing the arg to the last call.
source = b'abcdefghijklmnopqrstuvwxyz'
remainder = b'0123456789'
y = zlib.compress(source)
x = y + remainder
for maxlen in 0, 1000:
for step in 1, 2, len(y), len(x):
dco = zlib.decompressobj()
data = b''
for i in range(0, len(x), step):
if i < len(y):
self.assertEqual(dco.unused_data, b'')
if maxlen == 0:
data += dco.decompress(x[i : i + step])
self.assertEqual(dco.unconsumed_tail, b'')
else:
data += dco.decompress(
dco.unconsumed_tail + x[i : i + step], maxlen)
data += dco.flush()
self.assertTrue(dco.eof)
self.assertEqual(data, source)
self.assertEqual(dco.unconsumed_tail, b'')
self.assertEqual(dco.unused_data, remainder)
def test_flush_with_freed_input(self):
# Issue #16411: decompressor accesses input to last decompress() call
# in flush(), even if this object has been freed in the meanwhile.
input1 = b'abcdefghijklmnopqrstuvwxyz'
input2 = b'QWERTYUIOPASDFGHJKLZXCVBNM'
data = zlib.compress(input1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
del data
data = zlib.compress(input2)
self.assertEqual(dco.flush(), input1[1:])
@requires_Compress_copy
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = bytes(str(HAMLET_SCENE, "ascii").swapcase(), "ascii")
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = c0.copy()
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = b''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = b''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
@requires_Compress_copy
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
@requires_Decompress_copy
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
# Test type of return value
self.assertIsInstance(comp, bytes)
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = d0.copy()
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = b''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = b''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
@requires_Decompress_copy
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
c = zlib.compressobj(1)
compress = lambda s: c.compress(s) + c.flush()
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
d = zlib.decompressobj()
decompress = lambda s: d.decompress(s) + d.flush()
self.check_big_decompress_buffer(size, decompress)
@bigmemtest(size=_4G + 100, memuse=1, dry_run=False)
def test_length_overflow(self, size):
data = b'x' * size
c = zlib.compressobj(1)
d = zlib.decompressobj()
try:
self.assertRaises(OverflowError, c.compress, data)
self.assertRaises(OverflowError, d.decompress, data)
finally:
data = None
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = bytes()
for i in range(0, length, step):
blocks += bytes(randint(0, 255) for x in range(step))
return blocks
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = b"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
def test_main():
support.run_unittest(
VersionTestCase,
ChecksumTestCase,
ChecksumBigBufferTestCase,
ExceptionTestCase,
CompressTestCase,
CompressObjectTestCase
)
if __name__ == "__main__":
unittest.main() # XXX
###test_main()
| lgpl-3.0 |
onitake/ansible | lib/ansible/modules/identity/cyberark/cyberark_user.py | 18 | 16119 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: cyberark_user
short_description: Module for CyberArk User Management using PAS Web Services SDK
author:
- Edward Nunez (@enunez-cyberark) CyberArk BizDev
- Cyberark Bizdev (@cyberark-bizdev)
- erasmix (@erasmix)
version_added: 2.4
description:
- CyberArk User Management using PAS Web Services SDK. It currently supports the following
actions Get User Details, Add User, Update User, Delete User.
options:
username:
required: True
description:
- The name of the user who will be queried (for details), added, updated or deleted.
state:
default: present
choices: [present, absent]
description:
- Specifies the state needed for the user
present for create user, absent for delete user.
cyberark_session:
required: True
description:
- Dictionary set by a CyberArk authentication containing the different values to perform actions on a logged-on CyberArk session,
please see M(cyberark_authentication) module for an example of cyberark_session.
initial_password:
description:
- The password that the new user will use to log on the first time. This password must meet the password policy requirements.
this parameter is required when state is present -- Add User.
new_password:
description:
- The user updated password. Make sure that this password meets the password policy requirements.
email:
description:
- The user email address.
first_name:
description:
- The user first name.
last_name:
description:
- The user last name.
change_password_on_the_next_logon:
type: bool
default: 'no'
description:
- Whether or not the user must change their password in their next logon.
Valid values = true/false.
expiry_date:
description:
- The date and time when the user account will expire and become disabled.
user_type_name:
default: EPVUser
description:
- The type of user.
disabled:
type: bool
default: 'no'
description:
- Whether or not the user will be disabled. Valid values = true/false.
location:
description:
- The Vault Location for the user.
group_name:
description:
- The name of the group the user will be added to.
'''
EXAMPLES = '''
- name: Logon to CyberArk Vault using PAS Web Services SDK
cyberark_authentication:
api_base_url: "https://components.cyberark.local"
use_shared_logon_authentication: true
- name: Create user & immediately add it to a group
cyberark_user:
username: "username"
initial_password: "password"
user_type_name: "EPVUser"
change_password_on_the_next_logon: false
group_name: "GroupOfUsers"
state: present
cyberark_session: "{{ cyberark_session }}"
- name: Make sure user is present and reset user credential if present
cyberark_user:
username: "Username"
new_password: "password"
disabled: false
state: present
cyberark_session: "{{ cyberark_session }}"
- name: Logoff from CyberArk Vault
cyberark_authentication:
state: absent
cyberark_session: "{{ cyberark_session }}"
'''
RETURN = '''
changed:
description: Whether there was a change done.
type: bool
returned: always
cyberark_user:
description: Dictionary containing result properties.
returned: always
type: dict
sample:
result:
description: user properties when state is present
type: dict
returned: success
status_code:
description: Result HTTP Status code
returned: success
type: int
sample: 200
'''
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import http_client as httplib
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
def user_details(module):
# Get username from module parameters, and api base url
# along with validate_certs from the cyberark_session established
username = module.params["username"]
cyberark_session = module.params["cyberark_session"]
api_base_url = cyberark_session["api_base_url"]
validate_certs = cyberark_session["validate_certs"]
# Prepare result, end_point, and headers
result = {}
end_point = "/PasswordVault/WebServices/PIMServices.svc/Users/{0}".format(
username)
headers = {'Content-Type': 'application/json'}
headers["Authorization"] = cyberark_session["token"]
try:
response = open_url(
api_base_url + end_point,
method="GET",
headers=headers,
validate_certs=validate_certs)
result = {"result": json.loads(response.read())}
return (False, result, response.getcode())
except (HTTPError, httplib.HTTPException) as http_exception:
if http_exception.code == 404:
return (False, None, http_exception.code)
else:
module.fail_json(
msg=("Error while performing user_details."
"Please validate parameters provided."
"\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, to_text(http_exception))),
headers=headers,
status_code=http_exception.code)
except Exception as unknown_exception:
module.fail_json(
msg=("Unknown error while performing user_details."
"\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))),
headers=headers,
exception=traceback.format_exc(),
status_code=-1)
def user_add_or_update(module, HTTPMethod):
# Get username from module parameters, and api base url
# along with validate_certs from the cyberark_session established
username = module.params["username"]
cyberark_session = module.params["cyberark_session"]
api_base_url = cyberark_session["api_base_url"]
validate_certs = cyberark_session["validate_certs"]
# Prepare result, paylod, and headers
result = {}
payload = {}
headers = {'Content-Type': 'application/json',
"Authorization": cyberark_session["token"]}
# end_point and payload sets different depending on POST/PUT
# for POST -- create -- payload contains username
# for PUT -- update -- username is part of the endpoint
if HTTPMethod == "POST":
end_point = "/PasswordVault/WebServices/PIMServices.svc/Users"
payload["UserName"] = username
elif HTTPMethod == "PUT":
end_point = "/PasswordVault/WebServices/PIMServices.svc/Users/{0}"
end_point = end_point.format(username)
# --- Optionally populate payload based on parameters passed ---
if "initial_password" in module.params:
payload["InitialPassword"] = module.params["initial_password"]
if "new_password" in module.params:
payload["NewPassword"] = module.params["new_password"]
if "email" in module.params:
payload["Email"] = module.params["email"]
if "first_name" in module.params:
payload["FirstName"] = module.params["first_name"]
if "last_name" in module.params:
payload["LastName"] = module.params["last_name"]
if "change_password_on_the_next_logon" in module.params:
if module.params["change_password_on_the_next_logon"]:
payload["ChangePasswordOnTheNextLogon"] = "true"
else:
payload["ChangePasswordOnTheNextLogon"] = "false"
if "expiry_date" in module.params:
payload["ExpiryDate"] = module.params["expiry_date"]
if "user_type_name" in module.params:
payload["UserTypeName"] = module.params["user_type_name"]
if "disabled" in module.params:
if module.params["disabled"]:
payload["Disabled"] = "true"
else:
payload["Disabled"] = "false"
if "location" in module.params:
payload["Location"] = module.params["location"]
# --------------------------------------------------------------
try:
# execute REST action
response = open_url(
api_base_url + end_point,
method=HTTPMethod,
headers=headers,
data=json.dumps(payload),
validate_certs=validate_certs)
result = {"result": json.loads(response.read())}
return (True, result, response.getcode())
except (HTTPError, httplib.HTTPException) as http_exception:
module.fail_json(
msg=("Error while performing user_add_or_update."
"Please validate parameters provided."
"\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, to_text(http_exception))),
payload=payload,
headers=headers,
status_code=http_exception.code)
except Exception as unknown_exception:
module.fail_json(
msg=("Unknown error while performing user_add_or_update."
"\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))),
payload=payload,
headers=headers,
exception=traceback.format_exc(),
status_code=-1)
def user_delete(module):
# Get username from module parameters, and api base url
# along with validate_certs from the cyberark_session established
username = module.params["username"]
cyberark_session = module.params["cyberark_session"]
api_base_url = cyberark_session["api_base_url"]
validate_certs = cyberark_session["validate_certs"]
# Prepare result, end_point, and headers
result = {}
end_point = "/PasswordVault/WebServices/PIMServices.svc/Users/{0}".format(
username)
headers = {'Content-Type': 'application/json'}
headers["Authorization"] = cyberark_session["token"]
try:
# execute REST action
response = open_url(
api_base_url + end_point,
method="DELETE",
headers=headers,
validate_certs=validate_certs)
result = {"result": {}}
return (True, result, response.getcode())
except (HTTPError, httplib.HTTPException) as http_exception:
exception_text = to_text(http_exception)
if http_exception.code == 404 and "ITATS003E" in exception_text:
# User does not exist
result = {"result": {}}
return (False, result, http_exception.code)
else:
module.fail_json(
msg=("Error while performing user_delete."
"Please validate parameters provided."
"\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, exception_text)),
headers=headers,
status_code=http_exception.code)
except Exception as unknown_exception:
module.fail_json(
msg=("Unknown error while performing user_delete."
"\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))),
headers=headers,
exception=traceback.format_exc(),
status_code=-1)
def user_add_to_group(module):
# Get username, and groupname from module parameters, and api base url
# along with validate_certs from the cyberark_session established
username = module.params["username"]
group_name = module.params["group_name"]
cyberark_session = module.params["cyberark_session"]
api_base_url = cyberark_session["api_base_url"]
validate_certs = cyberark_session["validate_certs"]
# Prepare result, end_point, headers and payload
result = {}
end_point = "/PasswordVault/WebServices/PIMServices.svc//Groups/{0}/Users".format(
group_name)
headers = {'Content-Type': 'application/json'}
headers["Authorization"] = cyberark_session["token"]
payload = {"UserName": username}
try:
# execute REST action
response = open_url(
api_base_url + end_point,
method="POST",
headers=headers,
data=json.dumps(payload),
validate_certs=validate_certs)
result = {"result": {}}
return (True, result, response.getcode())
except (HTTPError, httplib.HTTPException) as http_exception:
exception_text = to_text(http_exception)
if http_exception.code == 409 and "ITATS262E" in exception_text:
# User is already member of Group
return (False, None, http_exception.code)
else:
module.fail_json(
msg=("Error while performing user_add_to_group."
"Please validate parameters provided."
"\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, exception_text)),
payload=payload,
headers=headers,
exception=traceback.format_exc(),
status_code=http_exception.code)
except Exception as unknown_exception:
module.fail_json(
msg=("Unknown error while performing user_add_to_group."
"\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))),
payload=payload,
headers=headers,
status_code=-1)
def main():
fields = {
"username": {"required": True, "type": "str"},
"state": {"type": "str",
"choices": ["present", "absent"],
"default": "present"},
"cyberark_session": {"required": True, "type": "dict"},
"initial_password": {"type": "str", "no_log": True},
"new_password": {"type": "str", "no_log": True},
"email": {"type": "str"},
"first_name": {"type": "str"},
"last_name": {"type": "str"},
"change_password_on_the_next_logon": {"type": "bool"},
"expiry_date": {"type": "str"},
"user_type_name": {"type": "str"},
"disabled": {"type": "bool"},
"location": {"type": "str"},
"group_name": {"type": "str"},
}
module = AnsibleModule(argument_spec=fields)
state = module.params["state"]
changed = False
result = {}
if (state == "present"):
(changed, result, status_code) = user_details(module)
if (status_code == 200): # user already exists
if ("new_password" in module.params):
# if new_password specified, proceed to update user credential
(changed, result, status_code) = user_add_or_update(module, "PUT")
if ("group_name" in module.params and module.params["group_name"] is not None):
# if user exists, add to group if needed
(changed, ignored_result, ignored_status_code) = user_add_to_group(module)
elif (status_code == 404):
# user does not exist, proceed to create it
(changed, result, status_code) = user_add_or_update(module, "POST")
if (status_code == 201 and "group_name" in module.params and module.params["group_name"] is not None):
# if user was created, add to group if needed
(changed, ignored_result, ignored_status_code) = user_add_to_group(module)
elif (state == "absent"):
(changed, result, status_code) = user_delete(module)
module.exit_json(
changed=changed,
cyberark_user=result,
status_code=status_code)
if __name__ == '__main__':
main()
| gpl-3.0 |
sdrogers/lda | code/formula.py | 2 | 1874 | import re
class Formula(object):
def __init__(self,formula):
self.atom_names = ['C','H','N','O','P','S','Cl','I','Br','Si','F','D']
self.formula = formula
self.atoms = {}
for atom in self.atom_names:
self.atoms[atom] = self.get_atoms(atom)
def equals(self,formula):
is_equal = True
for atom in self.atoms:
if not self.atoms[atom] == formula.atoms[atom]:
is_equal = False
return is_equal
return is_equal
def correct_gcms_derivatives(self):
n_silicons = self.atoms['Si']
self.atoms['Si'] = 0
self.atoms['C'] -= n_silicons
self.atoms['H'] -= 3*n_silicons
self.atoms['H'] += n_silicons
self.make_string()
def make_string(self):
self.formula = ""
for atom in self.atom_names:
atom_no = self.atoms[atom]
if atom_no == 1:
self.formula += atom
elif atom_no > 1:
self.formula += atom + str(atom_no)
def get_atoms(self,atom_name):
# Do some regex matching to find the numbers of the important atoms
ex = atom_name + '(?![a-z])' + '\d*'
m = re.search(ex,self.formula)
if m == None:
return 0
else:
ex = atom_name + '(?![a-z])' + '(\d*)'
m2 = re.findall(ex,self.formula)
total = 0
for a in m2:
if len(a) == 0:
total += 1
else:
total += int(a)
return total
def compute_exact_mass(self):
masses = {'C':12.00000000000,'H':1.00782503214,'O':15.99491462210,'N':14.00307400524,'P':30.97376151200,'S':31.97207069000,'Cl':34.96885271000,'I':126.904468,'Br':78.9183376,'Si':27.9769265327,'F':18.99840320500,'D':2.01410177800}
exact_mass = 0.0
for a in self.atoms:
exact_mass += masses[a]*self.atoms[a]
return exact_mass
def __str__(self):
fstring = ""
for atom in self.atom_names:
if self.atoms[atom] > 0:
fstring += atom
if self.atoms[atom] > 1:
fstring += str(self.atoms[atom])
return fstring
def __repr__(self):
return self.__str__()
| gpl-3.0 |
amghost/myblog | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexer.py | 95 | 26928 | # -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re, itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator
import collections
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
('\xff\xfe\0\0', 'utf-32'),
('\0\0\xfe\xff', 'utf-32be'),
('\xff\xfe', 'utf-16'),
('\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
class Lexer(object, metaclass=LexerMeta):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
*New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'latin1'``).
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
``'chardet'`` to use the chardet library, if it is installed.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, str):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = str(text[len(bom):], encoding,
errors='replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = str(text, enc.get('encoding') or 'utf-8',
errors='replace')
text = decoded
else:
text = text.decode(self.encoding)
else:
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (tokentype, value) pairs.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
#-------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit(object):
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit()
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags):
"""Preprocess the regular expression component of a token definition."""
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or isinstance(token, collections.Callable), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# processed already
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs.keys()):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in itertools.chain((cls,), cls.__mro__):
toks = c.__dict__.get('tokens', {})
for state, items in toks.items():
curitems = tokens.get(state)
if curitems is None:
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, '\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(statestack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
| mit |
wwj718/ANALYSE | common/djangoapps/student/management/commands/6002exportusers.py | 68 | 1840 | ##
## One-off script to export 6.002x users into the edX framework
##
## Could be modified to be general by:
## * Changing user_keys and up_keys to handle dates more cleanly
## * Providing a generic set of tables, rather than just users and user profiles
## * Handling certificates and grades
## * Handling merge/forks of UserProfile.meta
import datetime
import json
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserProfile
class Command(BaseCommand):
help = \
'''Exports all users and user profiles.
Caveat: Should be looked over before any run
for schema changes.
Current version grabs user_keys from
django.contrib.auth.models.User and up_keys
from student.userprofile. '''
def handle(self, *args, **options):
users = list(User.objects.all())
user_profiles = list(UserProfile.objects.all())
user_profile_dict = dict([(up.user_id, up) for up in user_profiles])
user_tuples = [(user_profile_dict[u.id], u) for u in users if u.id in user_profile_dict]
user_keys = ['id', 'username', 'email', 'password', 'is_staff',
'is_active', 'is_superuser', 'last_login', 'date_joined',
'password']
up_keys = ['language', 'location', 'meta', 'name', 'id', 'user_id']
def extract_dict(keys, object):
d = {}
for key in keys:
item = object.__getattribute__(key)
if type(item) == datetime.datetime:
item = item.isoformat()
d[key] = item
return d
extracted = [{'up': extract_dict(up_keys, t[0]), 'u':extract_dict(user_keys, t[1])} for t in user_tuples]
fp = open('transfer_users.txt', 'w')
json.dump(extracted, fp)
fp.close()
| agpl-3.0 |
code-sauce/tensorflow | tensorflow/python/summary/writer/writer.py | 1 | 12539 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event method.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (
graph.as_graph_def(add_shapes=True) if isinstance(graph, ops.Graph)
else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(
graph_def=graph_def or maybe_graph_as_def))
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
[`Session.run()`](client.md#Session.run) or
[`Tensor.eval()`](framework.md#Tensor.eval), to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as retured by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s"
% type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None):
"""Creates a `FileWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
event_writer = EventFileWriter(logdir, max_queue, flush_secs)
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
| apache-2.0 |
virgree/odoo | addons/note/tests/__init__.py | 260 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_note
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CLOUGH/info3180-project-2 | lib/werkzeug/testsuite/exceptions.py | 100 | 3325 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The tests for the exception classes.
TODO:
- This is undertested. HTML is never checked
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import exceptions
from werkzeug.wrappers import Response
from werkzeug._compat import text_type
class ExceptionsTestCase(WerkzeugTestCase):
def test_proxy_exception(self):
orig_resp = Response('Hello World')
try:
exceptions.abort(orig_resp)
except exceptions.HTTPException as e:
resp = e.get_response({})
else:
self.fail('exception not raised')
self.assert_true(resp is orig_resp)
self.assert_equal(resp.get_data(), b'Hello World')
def test_aborter(self):
abort = exceptions.abort
self.assert_raises(exceptions.BadRequest, abort, 400)
self.assert_raises(exceptions.Unauthorized, abort, 401)
self.assert_raises(exceptions.Forbidden, abort, 403)
self.assert_raises(exceptions.NotFound, abort, 404)
self.assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
self.assert_raises(exceptions.NotAcceptable, abort, 406)
self.assert_raises(exceptions.RequestTimeout, abort, 408)
self.assert_raises(exceptions.Gone, abort, 410)
self.assert_raises(exceptions.LengthRequired, abort, 411)
self.assert_raises(exceptions.PreconditionFailed, abort, 412)
self.assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
self.assert_raises(exceptions.RequestURITooLarge, abort, 414)
self.assert_raises(exceptions.UnsupportedMediaType, abort, 415)
self.assert_raises(exceptions.UnprocessableEntity, abort, 422)
self.assert_raises(exceptions.InternalServerError, abort, 500)
self.assert_raises(exceptions.NotImplemented, abort, 501)
self.assert_raises(exceptions.BadGateway, abort, 502)
self.assert_raises(exceptions.ServiceUnavailable, abort, 503)
myabort = exceptions.Aborter({1: exceptions.NotFound})
self.assert_raises(LookupError, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
myabort = exceptions.Aborter(extra={1: exceptions.NotFound})
self.assert_raises(exceptions.NotFound, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
def test_exception_repr(self):
exc = exceptions.NotFound()
self.assert_equal(text_type(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
exc = exceptions.NotFound('Not There')
self.assert_equal(text_type(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
def test_special_exceptions(self):
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
h = dict(exc.get_headers({}))
self.assert_equal(h['Allow'], 'GET, HEAD, POST')
self.assert_true('The method is not allowed' in exc.get_description())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExceptionsTestCase))
return suite
| apache-2.0 |
Garee/pytodoist | pytodoist/test/test_api.py | 1 | 5203 | #!/usr/bin/env python
"""This module contains unit tests for the pytodoist.api module."""
import json
import time
import unittest
from pytodoist.api import TodoistAPI
from pytodoist.test.util import TestUser
# No magic numbers
_HTTP_OK = 200
class TodoistAPITest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = TodoistAPI()
def setUp(self):
self.user = TestUser()
time.sleep(10) # Rate limit ourselves to avoid a server rate limit.
response = self.api.register(self.user.email, self.user.full_name,
self.user.password)
if response.status_code != _HTTP_OK: # Assume already registered.
response = self.api.login(self.user.email, self.user.password)
user_json = response.json()
self.user.token = user_json['token']
def tearDown(self):
self.api.delete_user(self.user.token, self.user.password)
def test_class_variables(self):
self.assertEqual(self.api.VERSION, '8')
self.assertEqual(self.api.URL, 'https://api.todoist.com/API/v8/')
def test_login_success(self):
response = self.api.login(self.user.email, self.user.password)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('token', response.json())
def test_login_failure(self):
response = self.api.login(self.user.email, '')
self.assertNotEqual(response.status_code, _HTTP_OK)
self.assertIn('error', response.json())
def test_login_with_google_failure(self):
response = self.api.login_with_google(self.user.email, '')
self.assertNotEqual(response.status_code, _HTTP_OK)
def test_register_success(self):
self.api.delete_user(self.user.token, self.user.password)
response = self.api.register(self.user.email, self.user.full_name,
self.user.password)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('token', response.json())
def test_register_failure(self):
response = self.api.register(self.user.email, self.user.full_name,
self.user.password)
self.assertNotEqual(response.status_code, _HTTP_OK)
self.assertIn('error', response.json())
def test_delete_user_success(self):
response = self.api.delete_user(self.user.token,
self.user.password)
self.assertEqual(response.status_code, _HTTP_OK)
response = self.api.login(self.user.email, self.user.password)
self.assertNotEqual(response.status_code, _HTTP_OK)
self.assertIn('error', response.json())
def test_delete_user_failure(self):
self.api.delete_user(self.user.token, '')
response = self.api.login(self.user.email, self.user.password)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertNotIn('error', response.json())
def test_sync_all(self):
response = self.api.sync(self.user.token, self.user.sync_token)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('sync_token', response.json())
def test_query(self):
queries = ['tomorrow', 'p1']
response = self.api.query(self.user.token, json.dumps(queries))
self.assertEqual(response.status_code, _HTTP_OK)
self.assertEqual(len(response.json()), len(queries))
def test_add_item_success(self):
response = self.api.add_item(self.user.token, 'Task 1')
self.assertEqual(response.status_code, _HTTP_OK)
task_info = response.json()
self.assertEqual(task_info['content'], 'Task 1')
def test_quick_add(self):
text = 'Buy milk #Inbox'
response = self.api.quick_add(self.user.token, text)
self.assertEqual(response.status_code, _HTTP_OK)
task_info = response.json()
self.assertEqual(task_info['content'], 'Buy milk')
def test_get_all_completed_tasks_empty(self):
response = self.api.get_all_completed_tasks(self.user.token)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('items', response.json())
def test_get_redirect_link(self):
response = self.api.get_redirect_link(self.user.token)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('link', response.json())
def test_get_productivity_stats(self):
response = self.api.get_productivity_stats(self.user.token)
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('karma', response.json())
def test_update_notification_settings_success(self):
response = self.api.update_notification_settings(self.user.token,
'user_left_project',
'email',
1) # False
self.assertEqual(response.status_code, _HTTP_OK)
self.assertIn('user_left_project', response.json())
self.assertFalse(response.json()['user_left_project']['notify_email'])
if __name__ == '__main__':
unittest.main()
| mit |
Pistachitos/Sick-Beard | lib/hachoir_parser/container/riff.py | 90 | 16966 | # -*- coding: UTF-8 -*-
"""
RIFF parser, able to parse:
* AVI video container
* WAV audio container
* CDA file
Documents:
- libavformat source code from ffmpeg library
http://ffmpeg.mplayerhq.hu/
- Video for Windows Programmer's Guide
http://www.opennet.ru/docs/formats/avi.txt
- What is an animated cursor?
http://www.gdgsoft.com/anituner/help/aniformat.htm
Authors:
* Aurélien Jacobs
* Mickaël KENIKSSI
* Victor Stinner
Changelog:
* 2007-03-30: support ACON (animated icons)
* 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser
* 2006-08-03: creation of CDA parser by Mickaël KENIKSSI
* 2005-06-21: creation of WAV parser by Victor Stinner
* 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs
Thanks to:
* Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file
format information
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
Bit, NullBits, NullBytes,
RawBytes, String, PaddingBytes,
SubFile)
from lib.hachoir_core.tools import alignValue, humanDuration
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler, textHandler
from lib.hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from lib.hachoir_parser.image.ico import IcoFile
from datetime import timedelta
def parseText(self):
yield String(self, "text", self["size"].value,
strip=" \0", truncate="\0",
charset="ISO-8859-1")
def parseRawFormat(self, size):
yield RawBytes(self, "raw_format", size)
def parseVideoFormat(self, size):
yield UInt32(self, "video_size", "Video format: Size")
yield UInt32(self, "width", "Video format: Width")
yield UInt32(self, "height", "Video format: Height")
yield UInt16(self, "panes", "Video format: Panes")
yield UInt16(self, "depth", "Video format: Depth")
yield UInt32(self, "tag1", "Video format: Tag1")
yield UInt32(self, "img_size", "Video format: Image size")
yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter")
yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter")
yield UInt32(self, "clr_used", "Video format: ClrUsed")
yield UInt32(self, "clr_important", "Video format: ClrImportant")
def parseAudioFormat(self, size):
yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name)
yield UInt16(self, "channel", "Audio format: Channels")
yield UInt32(self, "sample_rate", "Audio format: Sample rate")
yield UInt32(self, "bit_rate", "Audio format: Bit rate")
yield UInt16(self, "block_align", "Audio format: Block align")
if size >= 16:
yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample")
if size >= 18:
yield UInt16(self, "ext_size", "Audio format: Size of extra information")
if size >= 28: # and self["a_channel"].value > 2
yield UInt16(self, "reserved", "Audio format: ")
yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask")
yield UInt32(self, "subformat", "Audio format: Subformat id")
def parseAVIStreamFormat(self):
size = self["size"].value
strtype = self["../stream_hdr/stream_type"].value
TYPE_HANDLER = {
"vids": (parseVideoFormat, 40),
"auds": (parseAudioFormat, 16)
}
handler = parseRawFormat
if strtype in TYPE_HANDLER:
info = TYPE_HANDLER[strtype]
if info[1] <= size:
handler = info[0]
for field in handler(self, size):
yield field
def parseAVIStreamHeader(self):
if self["size"].value != 56:
raise ParserError("Invalid stream header size")
yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII")
field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII")
if self["stream_type"].value == "vids":
yield Enum(field, video_fourcc_name, lambda text: text.upper())
else:
yield field
yield UInt32(self, "flags", "Stream flags")
yield UInt16(self, "priority", "Stream priority")
yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0")
yield UInt32(self, "init_frames", "InitialFrames")
yield UInt32(self, "scale", "Time scale")
yield UInt32(self, "rate", "Divide by scale to give frame rate")
yield UInt32(self, "start", "Stream start time (unit: rate/scale)")
yield UInt32(self, "length", "Stream length (unit: rate/scale)")
yield UInt32(self, "buf_size", "Suggested buffer size")
yield UInt32(self, "quality", "Stream quality")
yield UInt32(self, "sample_size", "Size of samples")
yield UInt16(self, "left", "Destination rectangle (left)")
yield UInt16(self, "top", "Destination rectangle (top)")
yield UInt16(self, "right", "Destination rectangle (right)")
yield UInt16(self, "bottom", "Destination rectangle (bottom)")
class RedBook(FieldSet):
"""
RedBook offset parser, used in CD audio (.cda) file
"""
def createFields(self):
yield UInt8(self, "frame")
yield UInt8(self, "second")
yield UInt8(self, "minute")
yield PaddingBytes(self, "notused", 1)
def formatSerialNumber(field):
"""
Format an disc serial number.
Eg. 0x00085C48 => "0008-5C48"
"""
sn = field.value
return "%04X-%04X" % (sn >> 16, sn & 0xFFFF)
def parseCDDA(self):
"""
HSG address format: number of 1/75 second
HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset)
HSG length = (minute*60 + second)*75 + frame (from RB length)
"""
yield UInt16(self, "cda_version", "CD file version (currently 1)")
yield UInt16(self, "track_no", "Number of track")
yield textHandler(UInt32(self, "disc_serial", "Disc serial number"),
formatSerialNumber)
yield UInt32(self, "hsg_offset", "Track offset (HSG format)")
yield UInt32(self, "hsg_length", "Track length (HSG format)")
yield RedBook(self, "rb_offset", "Track offset (Red-book format)")
yield RedBook(self, "rb_length", "Track length (Red-book format)")
def parseWAVFormat(self):
size = self["size"].value
if size not in (16, 18):
self.warning("Format with size of %s bytes is not supported!" % size)
yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name)
yield UInt16(self, "nb_channel", "Number of audio channel")
yield UInt32(self, "sample_per_sec", "Sample per second")
yield UInt32(self, "byte_per_sec", "Average byte per second")
yield UInt16(self, "block_align", "Block align")
yield UInt16(self, "bit_per_sample", "Bits per sample")
def parseWAVFact(self):
yield UInt32(self, "nb_sample", "Number of samples in audio stream")
def parseAviHeader(self):
yield UInt32(self, "microsec_per_frame", "Microsecond per frame")
yield UInt32(self, "max_byte_per_sec", "Maximum byte per second")
yield NullBytes(self, "reserved", 4)
# Flags
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "has_index")
yield Bit(self, "must_use_index")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "is_interleaved")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "trust_cktype")
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "was_capture_file")
yield Bit(self, "is_copyrighted")
yield NullBits(self, "reserved[]", 14)
yield UInt32(self, "total_frame", "Total number of frames in the video")
yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)")
yield UInt32(self, "nb_stream", "Number of streams")
yield UInt32(self, "sug_buf_size", "Suggested buffer size")
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "scale")
yield UInt32(self, "rate")
yield UInt32(self, "start")
yield UInt32(self, "length")
def parseODML(self):
yield UInt32(self, "total_frame", "Real number of frame of OpenDML video")
padding = self["size"].value - 4
if 0 < padding:
yield NullBytes(self, "padding[]", padding)
class AVIIndexEntry(FieldSet):
size = 16*8
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield UInt32(self, "flags")
yield UInt32(self, "start", "Offset from start of movie data")
yield UInt32(self, "length")
def parseIndex(self):
while not self.eof:
yield AVIIndexEntry(self, "index[]")
class Chunk(FieldSet):
TAG_INFO = {
# This dictionnary is edited by RiffFile.validate()
"LIST": ("list[]", None, "Sub-field list"),
"JUNK": ("junk[]", None, "Junk (padding)"),
# Metadata
"INAM": ("title", parseText, "Document title"),
"IART": ("artist", parseText, "Artist"),
"ICMT": ("comment", parseText, "Comment"),
"ICOP": ("copyright", parseText, "Copyright"),
"IENG": ("author", parseText, "Author"),
"ICRD": ("creation_date", parseText, "Creation date"),
"ISFT": ("producer", parseText, "Producer"),
"IDIT": ("datetime", parseText, "Date time"),
# TODO: Todo: see below
# "strn": Stream description
# TWOCC code, movie/field[]/tag.value[2:4]:
# "db": "Uncompressed video frame",
# "dc": "Compressed video frame",
# "wb": "Audio data",
# "pc": "Palette change"
}
subtag_info = {
"INFO": ("info", "File informations"),
"hdrl": ("headers", "Headers"),
"strl": ("stream[]", "Stream header list"),
"movi": ("movie", "Movie stream"),
"odml": ("odml", "ODML"),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["tag"].value
if tag in self.TAG_INFO:
self.tag_info = self.TAG_INFO[tag]
if tag == "LIST":
subtag = self["subtag"].value
if subtag in self.subtag_info:
info = self.subtag_info[subtag]
self.tag_info = (info[0], None, info[1])
self._name = self.tag_info[0]
self._description = self.tag_info[2]
else:
self.tag_info = ("field[]", None, None)
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield filesizeHandler(UInt32(self, "size", "Size"))
if not self["size"].value:
return
if self["tag"].value == "LIST":
yield String(self, "subtag", 4, "Sub-tag", charset="ASCII")
handler = self.tag_info[1]
while 8 < (self.size - self.current_size)/8:
field = self.__class__(self, "field[]")
yield field
if (field.size/8) % 2 != 0:
yield UInt8(self, "padding[]", "Padding")
else:
handler = self.tag_info[1]
if handler:
for field in handler(self):
yield field
else:
yield RawBytes(self, "raw_content", self["size"].value)
padding = self.seekBit(self._size)
if padding:
yield padding
def createDescription(self):
tag = self["tag"].display
return u"Chunk (tag %s)" % tag
class ChunkAVI(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
"strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"),
"strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"),
"avih": ("avi_hdr", parseAviHeader, "AVI header"),
"idx1": ("index", parseIndex, "Stream index"),
"dmlh": ("odml_hdr", parseODML, "ODML header"),
})
class ChunkCDDA(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("cdda", parseCDDA, "CD audio informations"),
})
class ChunkWAVE(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("format", parseWAVFormat, "Audio format"),
'fact': ("nb_sample", parseWAVFact, "Number of samples"),
'data': ("audio_data", None, "Audio stream data"),
})
def parseAnimationHeader(self):
yield UInt32(self, "hdr_size", "Size of header (36 bytes)")
if self["hdr_size"].value != 36:
self.warning("Animation header with unknown size (%s)" % self["size"].value)
yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor")
yield UInt32(self, "nb_step", "Number of Blits before the animation cycles")
yield UInt32(self, "cx")
yield UInt32(self, "cy")
yield UInt32(self, "bit_count")
yield UInt32(self, "planes")
yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present")
yield Bit(self, "is_icon")
yield NullBits(self, "padding", 31)
def parseAnimationSequence(self):
while not self.eof:
yield UInt32(self, "icon[]")
def formatJiffie(field):
sec = float(field.value) / 60
return humanDuration(timedelta(seconds=sec))
def parseAnimationRate(self):
while not self.eof:
yield textHandler(UInt32(self, "rate[]"), formatJiffie)
def parseIcon(self):
yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile)
class ChunkACON(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'anih': ("anim_hdr", parseAnimationHeader, "Animation header"),
'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"),
'rate': ("anim_rate", parseAnimationRate, "Animation sequence"),
'icon': ("icon[]", parseIcon, "Icon"),
})
class RiffFile(Parser):
PARSER_TAGS = {
"id": "riff",
"category": "container",
"file_ext": ("avi", "cda", "wav", "ani"),
"min_size": 16*8,
"mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"),
# FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )"
"magic": (
("AVI LIST", 8*8),
("WAVEfmt ", 8*8),
("CDDAfmt ", 8*8),
("ACONanih", 8*8),
),
"description": "Microsoft RIFF container"
}
VALID_TYPES = {
"WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"),
"CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"),
"AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"),
"ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"),
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "RIFF":
return "Wrong signature"
if self["type"].value not in self.VALID_TYPES:
return "Unknown RIFF content type"
return True
def createFields(self):
yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize", "File size"))
yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII")
# Choose chunk type depending on file type
try:
chunk_cls = self.VALID_TYPES[self["type"].value][0]
except KeyError:
chunk_cls = Chunk
# Parse all chunks up to filesize
while self.current_size < self["filesize"].value*8+8:
yield chunk_cls(self, "chunk[]")
if not self.eof:
yield RawBytes(self, "padding[]", (self.size-self.current_size)/8)
def createMimeType(self):
try:
return self.VALID_TYPES[self["type"].value][1]
except KeyError:
return None
def createDescription(self):
tag = self["type"].value
if tag == "AVI ":
desc = u"Microsoft AVI video"
if "headers/avi_hdr" in self:
header = self["headers/avi_hdr"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
microsec = header["microsec_per_frame"].value
if microsec:
desc += ", %.1f fps" % (1000000.0 / microsec)
if "total_frame" in header and header["total_frame"].value:
delta = timedelta(seconds=float(header["total_frame"].value) * microsec)
desc += ", " + humanDuration(delta)
return desc
else:
try:
return self.VALID_TYPES[tag][2]
except KeyError:
return u"Microsoft RIFF container"
def createContentSize(self):
size = (self["filesize"].value + 8) * 8
return min(size, self.stream.size)
def createFilenameSuffix(self):
try:
return self.VALID_TYPES[self["type"].value][3]
except KeyError:
return ".riff"
| gpl-3.0 |
tornadozou/tensorflow | tensorflow/python/saved_model/loader_impl.py | 49 | 9488 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader implementation for SavedModel with hermetic, language-neutral exports.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
def _parse_saved_model(export_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
export_dir: Directory containing the SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
try:
file_content = file_io.FileIO(path_to_pb, "rb").read()
saved_model.ParseFromString(file_content)
return saved_model
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
elif file_io.file_exists(path_to_pbtxt):
try:
file_content = file_io.FileIO(path_to_pbtxt, "rb").read()
text_format.Merge(file_content.decode("utf-8"), saved_model)
return saved_model
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e)))
else:
raise IOError("SavedModel file does not exist at: %s/{%s|%s}" %
(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT,
constants.SAVED_MODEL_FILENAME_PB))
def _get_asset_tensors(export_dir, meta_graph_def_to_load):
"""Gets the asset tensors, if defined in the meta graph def to load.
Args:
export_dir: Directory where the SavedModel is located.
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
Returns:
A dictionary of asset tensors, keyed by the name of the asset tensor. The
value in the map corresponds to the absolute path of the asset file.
"""
# Collection-def that may contain the assets key.
collection_def = meta_graph_def_to_load.collection_def
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
# Location of the assets for SavedModel.
assets_directory = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value
# Process each asset and add it to the asset tensor dictionary.
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
asset_tensor_dict[asset_proto.tensor_info.name] = os.path.join(
compat.as_bytes(assets_directory),
compat.as_bytes(asset_proto.filename))
return asset_tensor_dict
def _get_main_op_tensor(meta_graph_def_to_load):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
collection_def = meta_graph_def_to_load.collection_def
main_op_tensor = None
if constants.MAIN_OP_KEY in collection_def:
main_ops = collection_def[constants.MAIN_OP_KEY].node_list.value
if len(main_ops) != 1:
raise RuntimeError("Expected exactly one SavedModel main op.")
main_op_tensor = ops.get_collection(constants.MAIN_OP_KEY)[0]
return main_op_tensor
def _get_legacy_init_op_tensor(meta_graph_def_to_load):
"""Gets the legacy init op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
Returns:
The legacy init op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the legacy init op key
has other than exactly one tensor.
"""
collection_def = meta_graph_def_to_load.collection_def
legacy_init_op_tensor = None
if constants.LEGACY_INIT_OP_KEY in collection_def:
legacy_init_ops = collection_def[
constants.LEGACY_INIT_OP_KEY].node_list.value
if len(legacy_init_ops) != 1:
raise RuntimeError("Expected exactly one legacy serving init op.")
legacy_init_op_tensor = ops.get_collection(constants.LEGACY_INIT_OP_KEY)[0]
return legacy_init_op_tensor
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
def load(sess, tags, export_dir, **saver_kwargs):
"""Loads the model from a SavedModel as specified by tags.
Args:
sess: The TensorFlow session to restore the variables.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
export_dir: Directory in which the SavedModel protocol buffer and variables
to be loaded are located.
**saver_kwargs: Optional keyword arguments passed through to Saver.
Returns:
The `MetaGraphDef` protocol buffer loaded in the provided session. This
can be used to further extract signature-defs, collection-defs, etc.
Raises:
RuntimeError: MetaGraphDef associated with the tags cannot be found.
"""
with sess.graph.as_default():
# Build the SavedModel protocol buffer and find requested meta graph def.
saved_model = _parse_saved_model(export_dir)
found_match = False
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(tags):
meta_graph_def_to_load = meta_graph_def
found_match = True
break
if not found_match:
raise RuntimeError(
"MetaGraphDef associated with tags " + str(tags).strip("[]") +
" could not be found in SavedModel. To inspect available tag-sets in"
" the SavedModel, please use the SavedModel CLI: `saved_model_cli`"
)
# Build a saver by importing the meta graph def to load.
saver = tf_saver.import_meta_graph(meta_graph_def_to_load, **saver_kwargs)
if saver:
# Build the checkpoint path where the variables are located.
variables_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.VARIABLES_DIRECTORY),
compat.as_bytes(constants.VARIABLES_FILENAME))
# Restore the variables using the built saver in the provided session.
saver.restore(sess, variables_path)
else:
tf_logging.info("The specified SavedModel has no variables; no "
"checkpoints were restored.")
# Get asset tensors, if any.
asset_tensors_dictionary = _get_asset_tensors(export_dir,
meta_graph_def_to_load)
main_op_tensor = _get_main_op_tensor(meta_graph_def_to_load)
if main_op_tensor is not None:
sess.run(fetches=[main_op_tensor], feed_dict=asset_tensors_dictionary)
else:
legacy_init_op_tensor = _get_legacy_init_op_tensor(meta_graph_def_to_load)
if legacy_init_op_tensor is not None:
sess.run(
fetches=[legacy_init_op_tensor], feed_dict=asset_tensors_dictionary)
return meta_graph_def_to_load
| apache-2.0 |
sodafree/backend | build/ipython/IPython/zmq/completer.py | 3 | 2950 | """Tab-completion over zmq"""
# Trying to get print statements to work during completion, not very
# successfully...
from __future__ import print_function
import itertools
try:
import readline
except ImportError:
readline = None
import rlcompleter
import time
import session
class KernelCompleter(object):
"""Kernel-side completion machinery."""
def __init__(self, namespace):
self.namespace = namespace
self.completer = rlcompleter.Completer(namespace)
def complete(self, line, text):
# We'll likely use linel later even if now it's not used for anything
matches = []
complete = self.completer.complete
for state in itertools.count():
comp = complete(text, state)
if comp is None:
break
matches.append(comp)
return matches
class ClientCompleter(object):
"""Client-side completion machinery.
How it works: self.complete will be called multiple times, with
state=0,1,2,... When state=0 it should compute ALL the completion matches,
and then return them for each value of state."""
def __init__(self, client, session, socket):
# ugly, but we get called asynchronously and need access to some
# client state, like backgrounded code
assert readline is not None, "ClientCompleter depends on readline"
self.client = client
self.session = session
self.socket = socket
self.matches = []
def request_completion(self, text):
# Get full line to give to the kernel in case it wants more info.
line = readline.get_line_buffer()
# send completion request to kernel
msg = self.session.send(self.socket,
'complete_request',
dict(text=text, line=line))
# Give the kernel up to 0.5s to respond
for i in range(5):
ident,rep = self.session.recv(self.socket)
rep = session.Message(rep)
if rep is not None and rep.msg_type == 'complete_reply':
matches = rep.content.matches
break
time.sleep(0.1)
else:
# timeout
print ('TIMEOUT') # Can't see this message...
matches = None
return matches
def complete(self, text, state):
if self.client.backgrounded > 0:
print("\n[Not completing, background tasks active]")
print(readline.get_line_buffer(), end='')
return None
if state==0:
matches = self.request_completion(text)
if matches is None:
self.matches = []
print('WARNING: Kernel timeout on tab completion.')
else:
self.matches = matches
try:
return self.matches[state]
except IndexError:
return None
| bsd-3-clause |
bdoner/SickRage | lib/hachoir_parser/file_system/iso9660.py | 85 | 4954 | """
ISO 9660 (cdrom) file system parser.
Documents:
- Standard ECMA-119 (december 1987)
http://www.nondot.org/sabre/os/files/FileSystems/iso9660.pdf
Author: Victor Stinner
Creation: 11 july 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt32, UInt64, Enum,
NullBytes, RawBytes, String)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class PrimaryVolumeDescriptor(FieldSet):
static_size = 2041*8
def createFields(self):
yield NullBytes(self, "unused[]", 1)
yield String(self, "system_id", 32, "System identifier", strip=" ")
yield String(self, "volume_id", 32, "Volume identifier", strip=" ")
yield NullBytes(self, "unused[]", 8)
yield UInt64(self, "space_size", "Volume space size")
yield NullBytes(self, "unused[]", 32)
yield UInt32(self, "set_size", "Volume set size")
yield UInt32(self, "seq_num", "Sequence number")
yield UInt32(self, "block_size", "Block size")
yield UInt64(self, "path_table_size", "Path table size")
yield UInt32(self, "occu_lpath", "Location of Occurrence of Type L Path Table")
yield UInt32(self, "opt_lpath", "Location of Optional of Type L Path Table")
yield UInt32(self, "occu_mpath", "Location of Occurrence of Type M Path Table")
yield UInt32(self, "opt_mpath", "Location of Optional of Type M Path Table")
yield RawBytes(self, "root", 34, "Directory Record for Root Directory")
yield String(self, "vol_set_id", 128, "Volume set identifier", strip=" ")
yield String(self, "publisher", 128, "Publisher identifier", strip=" ")
yield String(self, "data_preparer", 128, "Data preparer identifier", strip=" ")
yield String(self, "application", 128, "Application identifier", strip=" ")
yield String(self, "copyright", 37, "Copyright file identifier", strip=" ")
yield String(self, "abstract", 37, "Abstract file identifier", strip=" ")
yield String(self, "biographic", 37, "Biographic file identifier", strip=" ")
yield String(self, "creation_ts", 17, "Creation date and time", strip=" ")
yield String(self, "modification_ts", 17, "Modification date and time", strip=" ")
yield String(self, "expiration_ts", 17, "Expiration date and time", strip=" ")
yield String(self, "effective_ts", 17, "Effective date and time", strip=" ")
yield UInt8(self, "struct_ver", "Structure version")
yield NullBytes(self, "unused[]", 1)
yield String(self, "app_use", 512, "Application use", strip=" \0")
yield NullBytes(self, "unused[]", 653)
class BootRecord(FieldSet):
static_size = 2041*8
def createFields(self):
yield String(self, "sys_id", 31, "Boot system identifier", strip="\0")
yield String(self, "boot_id", 31, "Boot identifier", strip="\0")
yield RawBytes(self, "system_use", 1979, "Boot system use")
class Terminator(FieldSet):
static_size = 2041*8
def createFields(self):
yield NullBytes(self, "null", 2041)
class Volume(FieldSet):
endian = BIG_ENDIAN
TERMINATOR = 255
type_name = {
0: "Boot Record",
1: "Primary Volume Descriptor",
2: "Supplementary Volume Descriptor",
3: "Volume Partition Descriptor",
TERMINATOR: "Volume Descriptor Set Terminator",
}
static_size = 2048 * 8
content_handler = {
0: BootRecord,
1: PrimaryVolumeDescriptor,
TERMINATOR: Terminator,
}
def createFields(self):
yield Enum(UInt8(self, "type", "Volume descriptor type"), self.type_name)
yield RawBytes(self, "signature", 5, "ISO 9960 signature (CD001)")
if self["signature"].value != "CD001":
raise ParserError("Invalid ISO 9960 volume signature")
yield UInt8(self, "version", "Volume descriptor version")
cls = self.content_handler.get(self["type"].value, None)
if cls:
yield cls(self, "content")
else:
yield RawBytes(self, "raw_content", 2048-7)
class ISO9660(Parser):
endian = LITTLE_ENDIAN
MAGIC = "\x01CD001"
NULL_BYTES = 0x8000
PARSER_TAGS = {
"id": "iso9660",
"category": "file_system",
"description": "ISO 9660 file system",
"min_size": (NULL_BYTES + 6)*8,
"magic": ((MAGIC, NULL_BYTES*8),),
}
def validate(self):
if self.stream.readBytes(self.NULL_BYTES*8, len(self.MAGIC)) != self.MAGIC:
return "Invalid signature"
return True
def createFields(self):
yield self.seekByte(self.NULL_BYTES, null=True)
while True:
volume = Volume(self, "volume[]")
yield volume
if volume["type"].value == Volume.TERMINATOR:
break
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
| gpl-3.0 |
ThiagoGarciaAlves/intellij-community | python/lib/Lib/site-packages/django/dispatch/dispatcher.py | 315 | 9292 | import weakref
import threading
from django.dispatch import saferef
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, 'im_func'):
return (id(target.im_self), id(target.im_func))
return id(target)
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak-referencable (more
precisely saferef.safeRef() must be able to create a reference
to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver)
self.lock.acquire()
try:
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
finally:
self.lock.release()
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
self.lock.acquire()
try:
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
finally:
self.lock.release()
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
none_senderkey = _make_id(None)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
receivers.append(receiver)
else:
receivers.append(receiver)
return receivers
def _remove_receiver(self, receiver):
"""
Remove dead receivers from connections.
"""
self.lock.acquire()
try:
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
last_idx = len(self.receivers) - 1
# enumerate in reverse order so that indexes are valid even
# after we delete some items
for idx, (r_key, _) in enumerate(reversed(self.receivers)):
if r_key == key:
del self.receivers[last_idx-idx]
finally:
self.lock.release()
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
signal.connect(func, **kwargs)
return func
return _decorator
| apache-2.0 |
mdavid/horizon | openstack_dashboard/dashboards/admin/volumes/volumes/tests.py | 9 | 4211 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
class VolumeViewTests(test.BaseAdminViewTests):
@test.create_stubs({cinder: ('volume_reset_state',
'volume_get')})
def test_update_volume_status(self):
volume = self.volumes.first()
formData = {'status': 'error'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_reset_state(IsA(http.HttpRequest),
volume.id,
formData['status'])
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volumes:update_status',
args=(volume.id,)),
formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_manage',
'volume_type_list',
'availability_zone_list',
'extension_supported')})
def test_manage_volume(self):
metadata = {'key': u'k1',
'value': u'v1'}
formData = {'host': 'host-1',
'identifier': 'vol-1',
'id_type': u'source-name',
'name': 'name-1',
'description': 'manage a volume',
'volume_type': 'vol_type_1',
'availability_zone': 'nova',
'metadata': metadata['key'] + '=' + metadata['value'],
'bootable': False}
cinder.volume_type_list(
IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.availability_zone_list(
IsA(http.HttpRequest)).\
AndReturn(self.availability_zones.list())
cinder.extension_supported(
IsA(http.HttpRequest),
'AvailabilityZones').\
AndReturn(True)
cinder.volume_manage(
IsA(http.HttpRequest),
host=formData['host'],
identifier=formData['identifier'],
id_type=formData['id_type'],
name=formData['name'],
description=formData['description'],
volume_type=formData['volume_type'],
availability_zone=formData['availability_zone'],
metadata={metadata['key']: metadata['value']},
bootable=formData['bootable'])
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volumes:manage'),
formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_unmanage',
'volume_get')})
def test_unmanage_volume(self):
# important - need to get the v2 cinder volume which has host data
volume_list = \
filter(lambda x: x.name == 'v2_volume', self.cinder_volumes.list())
volume = volume_list[0]
formData = {'volume_name': volume.name,
'host_name': 'host@backend-name#pool',
'volume_id': volume.id}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_unmanage(IsA(http.HttpRequest), volume.id).\
AndReturn(volume)
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volumes:unmanage',
args=(volume.id,)),
formData)
self.assertNoFormErrors(res)
| apache-2.0 |
KoertJanssens/MasterBall.be | pogom/captcha.py | 1 | 12626 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
- Captcha Overseer:
- Tracks incoming new captcha tokens
- Monitors the captcha'd accounts queue
- Launches captcha_solver threads
- Captcha Solver Threads each:
- Have a unique captcha token
- Attempts to verifyChallenge
- Puts account back in active queue
- Pushes webhook messages with captcha status
'''
import logging
import time
import requests
from datetime import datetime
from threading import Thread
from pgoapi import PGoApi
from .fakePogoApi import FakePogoApi
from .models import Token
from .transform import jitter_location
from .account import check_login
from .proxy import get_new_proxy
from .utils import now
log = logging.getLogger(__name__)
def captcha_overseer_thread(args, account_queue, account_captchas,
key_scheduler, wh_queue):
solverId = 0
while True:
# Run once every 15 seconds.
sleep_timer = 15
tokens_needed = len(account_captchas)
if tokens_needed > 0:
regio = args.captcha_regio
tokens = Token.get_valid(tokens_needed,regio)
tokens_available = len(tokens)
solvers = min(tokens_needed, tokens_available)
log.debug('Captcha overseer running. Captchas: %d - Tokens: %d',
tokens_needed, tokens_available)
for i in range(0, solvers):
hash_key = None
if args.hash_key:
hash_key = key_scheduler.next()
t = Thread(target=captcha_solver_thread,
name='captcha-solver-{}'.format(solverId),
args=(args, account_queue, account_captchas,
hash_key, wh_queue, tokens[i]))
t.daemon = True
t.start()
solverId += 1
if solverId > 999:
solverId = 0
# Wait a bit before launching next thread
time.sleep(1)
# Adjust captcha-overseer sleep timer
sleep_timer -= 1 * solvers
# Hybrid mode
if args.captcha_key and args.manual_captcha_timeout > 0:
tokens_remaining = tokens_needed - tokens_available
# Safety guard
tokens_remaining = min(tokens_remaining, 5)
for i in range(0, tokens_remaining):
account = account_captchas[0][1]
last_active = account['last_active']
hold_time = (datetime.utcnow() -
last_active).total_seconds()
if hold_time > args.manual_captcha_timeout:
log.debug('Account %s waited %ds for captcha token ' +
'and reached the %ds timeout.',
account['username'], hold_time,
args.manual_captcha_timeout)
if args.hash_key:
hash_key = key_scheduler.next()
t = Thread(target=captcha_solver_thread,
name='captcha-solver-{}'.format(solverId),
args=(args, account_queue, account_captchas,
hash_key, wh_queue))
t.daemon = True
t.start()
solverId += 1
if solverId > 999:
solverId = 0
# Wait a bit before launching next thread
time.sleep(1)
else:
break
time.sleep(sleep_timer)
def captcha_solver_thread(args, account_queue, account_captchas, hash_key,
wh_queue, token=None):
status, account, captcha_url = account_captchas.popleft()
status['message'] = 'Waking up account {} to verify captcha token.'.format(
account['username'])
log.info(status['message'])
if args.mock != '':
api = FakePogoApi(args.mock)
else:
api = PGoApi()
if hash_key:
log.debug('Using key {} for solving this captcha.'.format(hash_key))
api.activate_hash_server(hash_key)
proxy_url = False
if args.proxy:
# Try to fetch a new proxy
proxy_num, proxy_url = get_new_proxy(args)
if proxy_url:
log.debug('Using proxy %s', proxy_url)
api.set_proxy({'http': proxy_url, 'https': proxy_url})
location = account['last_location']
if not args.no_jitter:
# Jitter location before uncaptcha attempt
location = jitter_location(location)
api.set_position(*location)
check_login(args, account, api, location, proxy_url)
wh_message = {'status_name': args.status_name,
'status': 'error',
'mode': 'manual',
'account': account['username'],
'captcha': status['captcha'],
'time': 0}
if not token:
token = token_request(args, status, captcha_url)
wh_message['mode'] = '2captcha'
response = api.verify_challenge(token=token)
last_active = account['last_active']
hold_time = (datetime.utcnow() - last_active).total_seconds()
wh_message['time'] = int(hold_time)
if 'success' in response['responses']['VERIFY_CHALLENGE']:
status['message'] = (
"Account {} successfully uncaptcha'd, returning to " +
'active duty.').format(account['username'])
log.info(status['message'])
account_queue.put(account)
wh_message['status'] = 'success'
else:
status['message'] = (
'Account {} failed verifyChallenge, putting back ' +
'in captcha queue.').format(account['username'])
log.warning(status['message'])
account_captchas.append((status, account, captcha_url))
wh_message['status'] = 'failure'
if args.webhooks:
wh_queue.put(('captcha', wh_message))
# Make sure status is updated
time.sleep(1)
def handle_captcha(args, status, api, account, account_failures,
account_captchas, whq, response_dict, step_location):
try:
captcha_url = response_dict['responses'][
'CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
status['captcha'] += 1
if not args.captcha_solving:
status['message'] = ('Account {} has encountered a captcha. ' +
'Putting account away.').format(
account['username'])
log.warning(status['message'])
account_failures.append({
'account': account,
'last_fail_time': now(),
'reason': 'captcha found'})
if args.webhooks:
wh_message = {'status_name': args.status_name,
'status': 'encounter',
'mode': 'disabled',
'account': account['username'],
'captcha': status['captcha'],
'time': 0}
whq.put(('captcha', wh_message))
return False
if args.captcha_key and args.manual_captcha_timeout == 0:
if automatic_captcha_solve(args, status, api, captcha_url,
account, whq):
return True
else:
account_failures.append({
'account': account,
'last_fail_time': now(),
'reason': 'captcha failed to verify'})
return False
else:
status['message'] = ('Account {} has encountered a captcha. ' +
'Waiting for token.').format(
account['username'])
log.warning(status['message'])
account['last_active'] = datetime.utcnow()
account['last_location'] = step_location
account_captchas.append((status, account, captcha_url))
if args.webhooks:
wh_message = {'status_name': args.status_name,
'status': 'encounter',
'mode': 'manual',
'account': account['username'],
'captcha': status['captcha'],
'time': args.manual_captcha_timeout}
whq.put(('captcha', wh_message))
return False
except KeyError, e:
log.error('Unable to check captcha: {}'.format(e))
return None
# Return True if captcha was succesfully solved
def automatic_captcha_solve(args, status, api, captcha_url, account, wh_queue):
status['message'] = (
'Account {} is encountering a captcha, starting 2captcha ' +
'sequence.').format(account['username'])
log.warning(status['message'])
if args.webhooks:
wh_message = {'status_name': args.status_name,
'status': 'encounter',
'mode': '2captcha',
'account': account['username'],
'captcha': status['captcha'],
'time': 0}
wh_queue.put(('captcha', wh_message))
time_start = now()
captcha_token = token_request(args, status, captcha_url)
time_elapsed = now() - time_start
if 'ERROR' in captcha_token:
log.warning('Unable to resolve captcha, please check your ' +
'2captcha API key and/or wallet balance.')
if args.webhooks:
wh_message['status'] = 'error'
wh_message['time'] = time_elapsed
wh_queue.put(('captcha', wh_message))
return False
else:
status['message'] = (
'Retrieved captcha token, attempting to verify challenge ' +
'for {}.').format(account['username'])
log.info(status['message'])
response = api.verify_challenge(token=captcha_token)
time_elapsed = now() - time_start
if 'success' in response['responses']['VERIFY_CHALLENGE']:
status['message'] = "Account {} successfully uncaptcha'd.".format(
account['username'])
log.info(status['message'])
if args.webhooks:
wh_message['status'] = 'success'
wh_message['time'] = time_elapsed
wh_queue.put(('captcha', wh_message))
return True
else:
status['message'] = (
'Account {} failed verifyChallenge, putting away ' +
'account for now.').format(account['username'])
log.info(status['message'])
if args.webhooks:
wh_message['status'] = 'failure'
wh_message['time'] = time_elapsed
wh_queue.put(('captcha', wh_message))
return False
def token_request(args, status, url):
s = requests.Session()
# Fetch the CAPTCHA_ID from 2captcha.
try:
request_url = (
'http://2captcha.com/in.php?key={}&method=userrecaptcha' +
'&googlekey={}&pageurl={}').format(args.captcha_key,
args.captcha_dsk, url)
captcha_id = s.post(request_url, timeout=5).text.split('|')[1]
captcha_id = str(captcha_id)
# IndexError implies that the retuned response was a 2captcha error.
except IndexError:
return 'ERROR'
status['message'] = (
'Retrieved captcha ID: {}; now retrieving token.').format(captcha_id)
log.info(status['message'])
# Get the response, retry every 5 seconds if it's not ready.
recaptcha_response = s.get(
'http://2captcha.com/res.php?key={}&action=get&id={}'.format(
args.captcha_key, captcha_id), timeout=5).text
while 'CAPCHA_NOT_READY' in recaptcha_response:
log.info('Captcha token is not ready, retrying in 5 seconds...')
time.sleep(5)
recaptcha_response = s.get(
'http://2captcha.com/res.php?key={}&action=get&id={}'.format(
args.captcha_key, captcha_id), timeout=5).text
token = str(recaptcha_response.split('|')[1])
return token
| agpl-3.0 |
skyoo/jumpserver | apps/perms/api/asset/user_permission/user_permission_nodes_with_assets.py | 1 | 8244 | # -*- coding: utf-8 -*-
#
from itertools import chain
from rest_framework.generics import ListAPIView
from rest_framework.request import Request
from rest_framework.response import Response
from django.db.models import F, Value, CharField, Q
from django.conf import settings
from orgs.utils import tmp_to_root_org
from common.permissions import IsValidUser
from common.utils import get_logger, get_object_or_none
from .mixin import UserNodeGrantStatusDispatchMixin, ForUserMixin, ForAdminMixin
from perms.utils.asset.user_permission import (
get_indirect_granted_node_children, UNGROUPED_NODE_KEY, FAVORITE_NODE_KEY,
get_user_direct_granted_assets, get_top_level_granted_nodes,
get_user_granted_nodes_list_via_mapping_node,
get_user_granted_all_assets, rebuild_user_tree_if_need,
get_user_all_assetpermissions_id, get_favorite_node,
get_ungrouped_node, compute_tmp_mapping_node_from_perm,
TMP_GRANTED_FIELD, count_direct_granted_node_assets,
count_node_all_granted_assets
)
from perms.models import AssetPermission
from assets.models import Asset, FavoriteAsset
from assets.api import SerializeToTreeNodeMixin
from perms.hands import Node
logger = get_logger(__name__)
class MyGrantedNodesWithAssetsAsTreeApi(SerializeToTreeNodeMixin, ListAPIView):
permission_classes = (IsValidUser,)
def add_ungrouped_resource(self, data: list, user, asset_perms_id):
if not settings.PERM_SINGLE_ASSET_TO_UNGROUP_NODE:
return
ungrouped_node = get_ungrouped_node(user, asset_perms_id=asset_perms_id)
direct_granted_assets = get_user_direct_granted_assets(
user, asset_perms_id=asset_perms_id
).annotate(
parent_key=Value(ungrouped_node.key, output_field=CharField())
).prefetch_related('platform')
data.extend(self.serialize_nodes([ungrouped_node], with_asset_amount=True))
data.extend(self.serialize_assets(direct_granted_assets))
def add_favorite_resource(self, data: list, user, asset_perms_id):
favorite_node = get_favorite_node(user, asset_perms_id)
favorite_assets = FavoriteAsset.get_user_favorite_assets(
user, asset_perms_id=asset_perms_id
).annotate(
parent_key=Value(favorite_node.key, output_field=CharField())
).prefetch_related('platform')
data.extend(self.serialize_nodes([favorite_node], with_asset_amount=True))
data.extend(self.serialize_assets(favorite_assets))
def add_node_filtered_by_system_user(self, data: list, user, asset_perms_id):
tmp_nodes = compute_tmp_mapping_node_from_perm(user, asset_perms_id=asset_perms_id)
granted_nodes_key = []
for _node in tmp_nodes:
_granted = getattr(_node, TMP_GRANTED_FIELD, False)
if not _granted:
if settings.PERM_SINGLE_ASSET_TO_UNGROUP_NODE:
assets_amount = count_direct_granted_node_assets(user, _node.key, asset_perms_id)
else:
assets_amount = count_node_all_granted_assets(user, _node.key, asset_perms_id)
_node.assets_amount = assets_amount
else:
granted_nodes_key.append(_node.key)
# 查询他们的子节点
q = Q()
for _key in granted_nodes_key:
q |= Q(key__startswith=f'{_key}:')
if q:
descendant_nodes = Node.objects.filter(q).distinct()
else:
descendant_nodes = Node.objects.none()
data.extend(self.serialize_nodes(chain(tmp_nodes, descendant_nodes), with_asset_amount=True))
def add_assets(self, data: list, user, asset_perms_id):
if settings.PERM_SINGLE_ASSET_TO_UNGROUP_NODE:
all_assets = get_user_granted_all_assets(
user,
via_mapping_node=False,
include_direct_granted_assets=False,
asset_perms_id=asset_perms_id
)
else:
all_assets = get_user_granted_all_assets(
user,
via_mapping_node=False,
include_direct_granted_assets=True,
asset_perms_id=asset_perms_id
)
all_assets = all_assets.annotate(
parent_key=F('nodes__key')
).prefetch_related('platform')
data.extend(self.serialize_assets(all_assets))
@tmp_to_root_org()
def list(self, request: Request, *args, **kwargs):
"""
此算法依赖 UserGrantedMappingNode
获取所有授权的节点和资产
Node = UserGrantedMappingNode + 授权节点的子节点
Asset = 授权节点的资产 + 直接授权的资产
"""
user = request.user
data = []
asset_perms_id = get_user_all_assetpermissions_id(user)
system_user_id = request.query_params.get('system_user')
if system_user_id:
asset_perms_id = list(AssetPermission.objects.valid().filter(
id__in=asset_perms_id, system_users__id=system_user_id, actions__gt=0
).values_list('id', flat=True).distinct())
self.add_ungrouped_resource(data, user, asset_perms_id)
self.add_favorite_resource(data, user, asset_perms_id)
if system_user_id:
self.add_node_filtered_by_system_user(data, user, asset_perms_id)
else:
rebuild_user_tree_if_need(request, user)
all_nodes = get_user_granted_nodes_list_via_mapping_node(user)
data.extend(self.serialize_nodes(all_nodes, with_asset_amount=True))
self.add_assets(data, user, asset_perms_id)
return Response(data=data)
class GrantedNodeChildrenWithAssetsAsTreeApiMixin(UserNodeGrantStatusDispatchMixin,
SerializeToTreeNodeMixin,
ListAPIView):
"""
带资产的授权树
"""
user: None
def get_data_on_node_direct_granted(self, key):
nodes = Node.objects.filter(parent_key=key)
assets = Asset.org_objects.filter(nodes__key=key).distinct()
assets = assets.prefetch_related('platform')
return nodes, assets
def get_data_on_node_indirect_granted(self, key):
user = self.user
asset_perms_id = get_user_all_assetpermissions_id(user)
nodes = get_indirect_granted_node_children(user, key)
assets = Asset.org_objects.filter(
nodes__key=key,
).filter(
granted_by_permissions__id__in=asset_perms_id
).distinct()
assets = assets.prefetch_related('platform')
return nodes, assets
def get_data_on_node_not_granted(self, key):
return Node.objects.none(), Asset.objects.none()
def get_data(self, key, user):
assets, nodes = [], []
if not key:
root_nodes = get_top_level_granted_nodes(user)
nodes.extend(root_nodes)
elif key == UNGROUPED_NODE_KEY:
assets = get_user_direct_granted_assets(user)
assets = assets.prefetch_related('platform')
elif key == FAVORITE_NODE_KEY:
assets = FavoriteAsset.get_user_favorite_assets(user)
else:
nodes, assets = self.dispatch_get_data(key, user)
return nodes, assets
def id2key_if_have(self):
id = self.request.query_params.get('id')
if id is not None:
node = get_object_or_none(Node, id=id)
if node:
return node.key
def list(self, request: Request, *args, **kwargs):
key = self.request.query_params.get('key')
if key is None:
key = self.id2key_if_have()
user = self.user
rebuild_user_tree_if_need(request, user)
nodes, assets = self.get_data(key, user)
tree_nodes = self.serialize_nodes(nodes, with_asset_amount=True)
tree_assets = self.serialize_assets(assets, key)
return Response(data=[*tree_nodes, *tree_assets])
class UserGrantedNodeChildrenWithAssetsAsTreeApi(ForAdminMixin, GrantedNodeChildrenWithAssetsAsTreeApiMixin):
pass
class MyGrantedNodeChildrenWithAssetsAsTreeApi(ForUserMixin, GrantedNodeChildrenWithAssetsAsTreeApiMixin):
pass
| gpl-2.0 |
glovebx/odoo | addons/document/odt2txt.py | 435 | 2110 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys, zipfile, xml.dom.minidom
import StringIO
class OpenDocumentTextFile :
def __init__ (self, filepath):
zip = zipfile.ZipFile(filepath)
self.content = xml.dom.minidom.parseString(zip.read("content.xml"))
def toString (self):
""" Converts the document to a string. """
buffer = u""
for val in ["text:p", "text:h", "text:list"]:
for paragraph in self.content.getElementsByTagName(val) :
buffer += self.textToString(paragraph) + "\n"
return buffer
def textToString(self, element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += self.textToString(node)
return buffer
if __name__ == "__main__" :
s =StringIO.StringIO(file(sys.argv[1]).read())
odt = OpenDocumentTextFile(s)
print odt.toString().encode('ascii','replace')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mottosso/be | be/vendor/requests/packages/urllib3/__init__.py | 155 | 1864 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = '1.10.2'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
# Set security warning to always go off by default.
import warnings
warnings.simplefilter('always', exceptions.SecurityWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| lgpl-2.1 |
JioCloud/nova_test_latest | nova/db/sqlalchemy/migrate_repo/versions/233_add_stats_in_compute_nodes.py | 81 | 1460 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(engine):
meta = MetaData()
meta.bind = engine
# Drop the compute_node_stats table and add a 'stats' column to
# compute_nodes directly. The data itself is transient and doesn't
# need to be copied over.
table_names = ('compute_node_stats', 'shadow_compute_node_stats')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
table.drop()
# Add a new stats column to compute nodes
table_names = ('compute_nodes', 'shadow_compute_nodes')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
stats = Column('stats', Text, default='{}')
table.create_column(stats)
| apache-2.0 |
home-assistant/home-assistant | homeassistant/components/melcloud/__init__.py | 1 | 5055 | """The MELCloud Climate integration."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from typing import Any
from aiohttp import ClientConnectionError
from async_timeout import timeout
from pymelcloud import Device, get_devices
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_TOKEN, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.util import Throttle
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORMS = ["climate", "sensor", "water_heater"]
CONF_LANGUAGE = "language"
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_TOKEN): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigEntry):
"""Establish connection with MELCloud."""
if DOMAIN not in config:
return True
username = config[DOMAIN][CONF_USERNAME]
token = config[DOMAIN][CONF_TOKEN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: username, CONF_TOKEN: token},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Establish connection with MELClooud."""
conf = entry.data
mel_devices = await mel_devices_setup(hass, conf[CONF_TOKEN])
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: mel_devices})
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
class MelCloudDevice:
"""MELCloud Device instance."""
def __init__(self, device: Device) -> None:
"""Construct a device wrapper."""
self.device = device
self.name = device.name
self._available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Pull the latest data from MELCloud."""
try:
await self.device.update()
self._available = True
except ClientConnectionError:
_LOGGER.warning("Connection failed for %s", self.name)
self._available = False
async def async_set(self, properties: dict[str, Any]):
"""Write state changes to the MELCloud API."""
try:
await self.device.set(properties)
self._available = True
except ClientConnectionError:
_LOGGER.warning("Connection failed for %s", self.name)
self._available = False
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def device_id(self):
"""Return device ID."""
return self.device.device_id
@property
def building_id(self):
"""Return building ID of the device."""
return self.device.building_id
@property
def device_info(self):
"""Return a device description for device registry."""
_device_info = {
"connections": {(CONNECTION_NETWORK_MAC, self.device.mac)},
"identifiers": {(DOMAIN, f"{self.device.mac}-{self.device.serial}")},
"manufacturer": "Mitsubishi Electric",
"name": self.name,
}
unit_infos = self.device.units
if unit_infos is not None:
_device_info["model"] = ", ".join(
[x["model"] for x in unit_infos if x["model"]]
)
return _device_info
async def mel_devices_setup(hass, token) -> list[MelCloudDevice]:
"""Query connected devices from MELCloud."""
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with timeout(10):
all_devices = await get_devices(
token,
session,
conf_update_interval=timedelta(minutes=5),
device_set_debounce=timedelta(seconds=1),
)
except (asyncio.TimeoutError, ClientConnectionError) as ex:
raise ConfigEntryNotReady() from ex
wrapped_devices = {}
for device_type, devices in all_devices.items():
wrapped_devices[device_type] = [MelCloudDevice(device) for device in devices]
return wrapped_devices
| apache-2.0 |
vlukes/sfepy | tests/test_msm_symbolic.py | 5 | 7423 | from __future__ import absolute_import
from sfepy import data_dir
import six
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
dim = 2
field_1 = {
'name' : 'a_harmonic_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
't': ('unknown field', 'a_harmonic_field', 0),
's': ('test field', 'a_harmonic_field', 't'),
}
regions = {
'Omega' : 'all',
'Gamma' : ('vertices of surface', 'facet'),
}
ebcs = {
't_left' : ('Gamma', {'t.0' : 'ebc'}),
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
material_1 = {
'name' : 'coef',
'values' : {
'val' : 12.0,
'K' : [[1.0, 0.3], [0.3, 2.0]],
}
}
material_2 = {
'name' : 'rhs',
'function' : 'rhs',
}
equations = {
'Laplace' :
"""2 * dw_laplace.i.Omega(coef.val, s, t)
""",
'Diffusion' :
"""3 * dw_diffusion.i.Omega(coef.K, s, t)
""",
}
equations_rhs = {
'Laplace' :
"""= - dw_volume_lvf.i.Omega(rhs.val, s)""",
'Diffusion' :
"""= - dw_volume_lvf.i.Omega(rhs.val, s)""",
}
solutions = {
'sincos' : ('t', 'sin(3.0 * x) * cos(4.0 * y)'),
'poly' : ('t', '(x**2) + (y**2)'),
'polysin' : ('t', '((x - 0.5)**3) * sin(5.0 * y)'),
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
import numpy as nm
try:
import sfepy.linalg.sympy_operators as sops
except ImportError as exc:
sops = None
from sfepy.base.testing import TestCommon
output_name = 'test_msm_symbolic_%s.vtk'
solution = ['']
def ebc(ts, coor, solution=None):
expression = solution[0]
val = TestCommon.eval_coor_expression(expression, coor)
return nm.atleast_1d(val)
def rhs(ts, coor, mode=None, expression=None, **kwargs):
if mode == 'qp':
if expression is None:
expression = '0.0 * x'
val = TestCommon.eval_coor_expression(expression, coor)
val.shape = (val.shape[0], 1, 1)
return {'val' : val}
functions = {
'ebc' : (lambda ts, coor, **kwargs:
ebc(ts, coor, solution=solution),),
'rhs' : (rhs,),
}
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
from sfepy.discrete import Problem
problem = Problem.from_conf(conf, init_equations=False)
test = Test(problem=problem, conf=conf, options=options)
return test
def _build_rhs(self, equation, sols):
rhss = {}
self.report('%s:' % equation.name)
self.report('evaluating terms, "<=" is solution, "=>" is the rhs:')
for term in equation.terms:
if not hasattr(term, 'symbolic'):
self.report('term %s has no symbolic description!' % term.name)
raise ValueError
expr = term.symbolic['expression']
arg_map = term.symbolic['map']
self.report('%s(%s)' %\
(term.name, ', '.join(term.ats)))
self.report('multiplicator: %f' % term.sign)
self.report(' symbolic:', expr)
self.report(' using argument map:', arg_map)
for sol_name, sol in six.iteritems(sols):
rhs = self._eval_term(sol[1], term, sops)
srhs = "(%s * (%s))" % (term.sign, rhs)
rhss.setdefault(sol_name, []).append(srhs)
for key, val in six.iteritems(rhss):
rhss[key] = '+'.join(val)
return rhss
def _eval_term(self, sol, term, sops):
"""Works for scalar, single unknown terms only!"""
expr = term.symbolic['expression']
arg_map = term.symbolic['map']
env = {'x' : sops.Symbol('x'),
'y' : sops.Symbol('y'),
'z' : sops.Symbol('z'),
'dim' : dim}
for key, val in six.iteritems(arg_map):
if val == 'state':
env[key] = sol
else:
env[key] = term.get_args([val])[0]
if 'material' in val:
# Take the first value - constant in all QPs.
aux = env[key][0,0]
if nm.prod(aux.shape) == 1:
env[key] = aux.squeeze()
else:
import sympy
env[key] = sympy.Matrix(aux)
self.report(' <= ', sol)
sops.set_dim(dim)
val = str(eval(expr, sops.__dict__, env))
self.report(' =>', val)
return val
def _test_msm_symbolic(self, equations):
import os.path as op
if sops is None:
self.report('cannot import sympy, skipping')
return True
problem = self.problem
ok = True
for eq_name, equation in six.iteritems(equations):
problem.set_equations({eq_name : equation})
problem.update_materials()
rhss = self._build_rhs(problem.equations[eq_name],
self.conf.solutions)
erhs = problem.conf.equations_rhs[eq_name]
problem.set_equations({eq_name : equation + erhs})
variables = problem.get_variables()
materials = problem.get_materials()
rhs_mat = materials['rhs']
for sol_name, sol in six.iteritems(problem.conf.solutions):
self.report('testing', sol_name)
var_name, sol_expr = sol
rhs_expr = rhss[sol_name]
self.report('sol:', sol_expr)
self.report('rhs:', rhs_expr)
globals()['solution'][0] = sol_expr
rhs_mat.function.set_extra_args(expression=rhs_expr)
problem.time_update()
state = problem.solve()
coor = variables[var_name].field.get_coor()
ana_sol = self.eval_coor_expression(sol_expr, coor)
num_sol = state(var_name)
ana_norm = nm.linalg.norm(ana_sol, nm.inf)
ret = self.compare_vectors(ana_sol, num_sol,
allowed_error=ana_norm * 1e-2,
label1='analytical %s' % var_name,
label2='numerical %s' % var_name,
norm=nm.inf)
if not ret:
self.report('variable %s: failed' % var_name)
fname = op.join(self.options.out_dir, self.conf.output_name)
out = {}
astate = state.copy()
astate.set_full(ana_sol)
aux = astate.create_output_dict()
out['ana_t'] = aux['t']
aux = state.create_output_dict()
out['num_t'] = aux['t']
problem.domain.mesh.write(fname % '_'.join((sol_name, eq_name)),
io='auto', out=out)
ok = ok and ret
return ok
def _get_equations(self, name):
"""Choose a sub-problem from all equations."""
return {name : self.problem.conf.equations[name]}
def test_msm_symbolic_laplace(self):
return self._test_msm_symbolic(self._get_equations('Laplace'))
def test_msm_symbolic_diffusion(self):
return self._test_msm_symbolic(self._get_equations('Diffusion'))
| bsd-3-clause |
keyurpatel076/MissionPlannerGit | Lib/distutils/command/config.py | 75 | 13130 | """distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
__revision__ = "$Id$"
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.ccompiler import customize_compiler
from distutils import log
LANG_EXT = {'c': '.c', 'c++': '.cxx'}
class config(Command):
description = "prepare to build"
user_options = [
('compiler=', None,
"specify the compiler type"),
('cc=', None,
"specify the compiler executable"),
('include-dirs=', 'I',
"list of directories to search for header files"),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries"),
('noisy', None,
"show every action (compile, link, run, ...) taken"),
('dump-source', None,
"dump generated source files before attempting to compile them"),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run, force=1)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
file = open(filename, "w")
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
file.close()
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs,
lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable([obj], prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", ' '.join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = 1
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = 0
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
file = open(out)
match = 0
while 1:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = 1
break
file.close()
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = 1
except CompileError:
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
ok = 1
except (CompileError, LinkError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
self.spawn([exe])
ok = 1
except (CompileError, LinkError, DistutilsExecError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(self, func, headers=None, include_dirs=None,
libraries=None, library_dirs=None, decl=0, call=0):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
# check_func ()
def check_lib(self, library, library_dirs=None, headers=None,
include_dirs=None, other_libraries=[]):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link("int main (void) { }",
headers, include_dirs,
[library]+other_libraries, library_dirs)
def check_header(self, header, include_dirs=None, library_dirs=None,
lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(body="/* No body */", headers=[header],
include_dirs=include_dirs)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info('%s' % filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
| gpl-3.0 |
soodoku/get-weather-data | setup.py | 1 | 5471 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.test import test as TestCommand
# To use a consistent encoding
from codecs import open
from os import path, system
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
print("TODO: PostDevelopCommand")
develop.run(self)
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
print("TODO: PostInstallCommand")
install.run(self)
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
tox.cmdline(args=args)
setup(
name='get-weather-data',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.31',
description='Scripts for finding out the weather in a particular zip code',
long_description=long_description,
# The project's main homepage.
url='https://github.com/soodoku/get-weather-data',
# Author details
author='Suriyan Laohaprapanon, Gaurav Sood',
author_email='[email protected], [email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# What does your project relate to?
keywords='weather zip cdo noaa',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pygeocoder'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'noaaweb': ['README.md', '*.csv'],
'zip2ws': ['readme.md', 'data/dummy.txt'],
'zip2wd': ['*.txt', '*.csv', 'zip2wd.cfg', 'README.md', 'data/*.sh'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'get-weather-data' will be installed into '<sys.prefix>/get-weather-data'
#data_files=[('get-weather-data', ['zip2wd/data/import-db.sh'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'noaaweb=noaaweb.noaaweb:main',
'zip2ws=zip2ws.zip2ws:main',
'zip2wd-manager=zip2wd.manager:main',
'zip2wd-worker=zip2wd.worker:main',
],
},
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
'test': Tox,
},
tests_require=['tox'],
)
| mit |
ptmr3/GalaxyNote3_Kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
roelvv/pyrax | samples/autoscale/delete_webhook.py | 13 | 3216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import six
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
au = pyrax.autoscale
def safe_int(val, allow_zero=True):
"""
This function converts the six.moves.input values to integers. It handles
invalid entries, and optionally forbids values of zero.
"""
try:
ret = int(val)
except ValueError:
print("Sorry, '%s' is not a valid integer." % val)
return False
if not allow_zero and ret == 0:
print("Please enter a non-zero integer.")
return False
return ret
# Get the current scaling groups
sgs = au.list()
if not sgs:
print("There are no scaling groups defined.")
exit()
print()
print("Available Scaling Groups:")
for pos, sg in enumerate(sgs):
print("%s - %s" % (pos, sg.name))
intanswer = -1
while intanswer < 0:
answer = six.moves.input("Enter the number of the scaling group: ")
if not answer:
print("Nothing entered; exiting.")
exit()
intanswer = safe_int(answer)
if intanswer is False:
intanswer = -1
continue
if not 0 <= intanswer < len(sgs):
print("The number '%s' does not correspond to any scaling group." % answer)
intanswer = -1
policies = sg.list_policies()
if not policies:
print("There are no policies defined for this scaling group. You can only "
"add webhooks to existing policies.")
exit()
for pos, policy in enumerate(policies):
print("%s - %s" % (pos, policy.name))
answer = six.moves.input("Enter the number of the policy: ")
if not answer:
print("Nothing entered; exiting.")
exit()
intanswer = safe_int(answer)
if not 0 <= intanswer < len(policies):
print("The number '%s' does not correspond to any policy." % answer)
exit()
policy = policies[intanswer]
webhooks = policy.list_webhooks()
if not webhooks:
print("There are no webhooks defined for this policy.")
exit()
for pos, webhook in enumerate(webhooks):
print("%s - %s" % (pos, webhook.name))
answer = six.moves.input("Enter the number of the webhook: ")
if not answer:
print("Nothing entered; exiting.")
exit()
intanswer = safe_int(answer)
if not 0 <= intanswer < len(webhooks):
print("The number '%s' does not correspond to any webhook." % answer)
exit()
webhook = webhooks[intanswer]
webhook.delete()
print()
print("Webhook '%s' has been deleted." % webhook.name)
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | plugin.video.TVsupertuga/resources/lib/sources/moviexk_mv_tv.py | 6 | 6002 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.language = ['en']
self.domains = ['moviexk.com']
self.base_link = 'http://moviexk.com'
self.search_link = 'aHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vY3VzdG9tc2VhcmNoL3YxZWxlbWVudD9rZXk9QUl6YVN5Q1ZBWGlVelJZc01MMVB2NlJ3U0cxZ3VubU1pa1R6UXFZJnJzej1maWx0ZXJlZF9jc2UmbnVtPTEwJmhsPWVuJmN4PTAxMzQ0NjM1MTYzMDQ5MzU5NTE5Nzprc2NlY2tjdXZxcyZnb29nbGVob3N0PXd3dy5nb29nbGUuY29tJnE9JXM='
def movie(self, imdb, title, year):
try:
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title), year)
url = client.request(url, output='geturl')
if url == None: raise Exception()
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
try:
t = cleantitle.get(title)
q = '%s %s' % (title, year)
q = self.search_link.decode('base64') % urllib.quote_plus(q)
r = client.request(q, error=True)
r = json.loads(r)['results']
r = [(i['url'], i['titleNoFormatting']) for i in r]
r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)\((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [(urllib.unquote_plus(i[0]), i[1], i[2]) for i in r]
r = [(urlparse.urlparse(i[0]).path, i[1], i[2]) for i in r]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
r = re.sub('/watch-movie-|-\d+$', '/', r[0][0].strip())
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
t = cleantitle.get(tvshowtitle)
q = '%s %s' % (tvshowtitle, year)
q = self.search_link.decode('base64') % urllib.quote_plus(q)
r = client.request(q)
r = json.loads(r)['results']
r = [(i['url'], i['titleNoFormatting']) for i in r]
r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)$', i[1])) for i in r]
r = [(i[0], i[1][0].rsplit('TV Series')[0].strip('(')) for i in r if i[1]]
r = [(urllib.unquote_plus(i[0]), i[1]) for i in r]
r = [(urlparse.urlparse(i[0]).path, i[1]) for i in r]
r = [i for i in r if t == cleantitle.get(i[1])]
r = urlparse.urljoin(self.base_link, r[0][0].strip())
if '/watch-movie-' in r: r = re.sub('/watch-movie-|-\d+$', '/', r)
y = re.findall('(\d{4})', r)
if y:
y = y[0]
else:
y = client.request(r)
y = re.findall('(?:D|d)ate\s*:\s*(\d{4})', y)[0]
if not year == y: raise Exception()
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = '%s?season=%01d&episode=%01d' % (url, int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
f = urlparse.urljoin(self.base_link, url)
url = f.rsplit('?', 1)[0]
r = client.request(url, mobile=True)
r = client.parseDOM(r, 'div', attrs = {'id': 'servers'})
r = client.parseDOM(r, 'li')
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
try:
s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0]
e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0]
r = [(i[0], re.findall('(\d+)', i[1])) for i in r]
r = [(i[0], '%01d' % int(i[1][0]), '%01d' % int(i[1][1])) for i in r if len(i[1]) > 1]
r = [i[0] for i in r if s == i[1] and e == i[2]]
except:
r = [i[0] for i in r]
for u in r:
try:
url = client.request(u, mobile=True)
url = client.parseDOM(url, 'source', ret='src')
url = [i.strip().split()[0] for i in url]
for i in url:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Moviexk', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| gpl-2.0 |
ivandevp/django | tests/postgres_tests/test_hstore.py | 193 | 9011 | import json
from django.core import exceptions, serializers
from django.forms import Form
from . import PostgreSQLTestCase
from .models import HStoreModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgreSQLTestCase):
apps = ['django.contrib.postgres']
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, None)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
HStoreModel.objects.create(field={'a': 'b'}),
HStoreModel.objects.create(field={'a': 'b', 'c': 'd'}),
HStoreModel.objects.create(field={'c': 'd'}),
HStoreModel.objects.create(field={}),
HStoreModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_any_keys=['a', 'c']),
self.objs[:3]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
def test_key_isnull(self):
obj = HStoreModel.objects.create(field={'a': None})
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=True),
self.objs[2:5] + [obj]
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=False),
self.objs[:2]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(id__in=HStoreModel.objects.filter(field__a='b')),
self.objs[:2]
)
class TestSerialization(PostgreSQLTestCase):
test_data = '[{"fields": {"field": "{\\"a\\": \\"b\\"}"}, "model": "postgres_tests.hstoremodel", "pk": null}]'
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgreSQLTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of "a" is not a string.')
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
def test_field_has_changed(self):
class HStoreFormTest(Form):
f1 = forms.HStoreField()
form_w_hstore = HStoreFormTest()
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': '{"a": 1}'})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': {"a": 1}})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': {"a": 1}})
self.assertTrue(form_w_hstore.has_changed())
class TestValidator(PostgreSQLTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
| bsd-3-clause |
lidaohang/beansdb | python/sync.py | 3 | 3600 | #!/usr/bin/python
import sys, os, os.path
from dbclient import Beansdb, db
def get_dir(s, dir):
def parse(line):
p,h,c = line.split(' ')
return p, (int(h), int(c))
return dict(parse(line) for line in
filter(None, (s.get(dir) or '').split('\n')))
def is_dir(d):
return len(d) == 16 and len([k for k in d if k.endswith('/')]) == 16
def mirror(src, dst, path):
s = get_dir(src, path)
d = get_dir(dst, path)
if s == d:
print path, src, dst, 'skipped'
return
if is_dir(s):
for k in sorted(s):
if s[k] != d.get(k):
#print path+k[0], 'mirror ', s[k], d.get(k)
mirror(src, dst, path+k[0])
elif is_dir(d):
for k in sorted(d):
mirror(dst, src, path+k[0])
elif not is_dir(s) and not is_dir(d):
sync_files(src, dst, path, s, d)
sync_files(dst, src, path, d, s)
else:
print path, src, '=>', dst, 'skipped'
def sync_files(src, dst, path, s, d):
for k in sorted(s.keys()):
if k not in d:
data = src.get(k)
if data is not None:
print path, k, s[k], d.get(k,(0,0)), src, "=>", dst, dst.set(k, data, s[k][1])
else:
print path, src, k, 'is None', src.delete(k)
elif s[k][0] != d[k][0]:
if s[k][1] > d[k][1]:
data = src.get(k)
if data is not None:
print path, k, s[k], d.get(k,(0,0)), src, "=>", dst, dst.set(k, data, s[k][1])
else:
print path, src, k, 'is None', src.delete(k)
elif s[k][1] == d[k][1]:
m1 = int((src.get('?'+k) or '0').split(' ')[-1])
m2 = int((dst.get('?'+k) or '0').split(' ')[-1])
print path, src, k, 'is broken', s[k], m1, d[k], m2
if m1 > m2:
dst.set(k, src.get(k))
elif m2 >= m1:
src.set(k, dst.get(k))
def stat(s):
st = {}
for d,h,c in [line.split(' ') for line in (s.get('@') or '').split('\n') if line]:
if len(d) != 2 and not d.endswith('/'):
return {}
try:
st[int(d[0],16)] = (h,int(c))
except:
pass
return st
def almost(a,b):
return abs(a-b) < 0.2*(abs(a)+abs(b))
def sync(db, start=0):
stats = {}
for n,s in db.servers.items():
stats[str(s)] = stat(s)
for b in range(start, db.buckets_count):
N = len(db.buckets[b])
for s in range(N)[::-1]:
src = db.buckets[b][s]
dst = db.buckets[b][(s+1)%N]
if not stats[str(src)] or not stats[str(dst)]:
continue
ss = stats[str(src)].get(b, (0,0))
ds = stats[str(dst)].get(b, (0,0))
if ss != ds:
print '%02x'%b,src,ss, dst, ds
mirror(src, dst, "@%0x"%b)
def lock(fd):
import fcntl, errno
try:
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
print "There is an instance of", sys.argv[0], "running. Quit"
sys.exit(0)
else:
raise
def main():
import os
lock_file_path = '/tmp/lsync.lock'
fd = os.open(lock_file_path, os.O_CREAT|os.O_RDWR, 0660)
try:
lock(fd)
if len(sys.argv)>1:
sync(db, int(sys.argv[1]))
else:
sync(db)
finally:
os.close(fd)
if __name__ == "__main__":
main()
| bsd-3-clause |
Bulochkin/tensorflow_pack | tensorflow/contrib/losses/python/losses/loss_ops_test.py | 82 | 55012 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.absolute_difference(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2,])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrect(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = loss_ops.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(dtypes.float32, shape=[None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(errors_impl.InvalidArgumentError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = loss_ops.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = loss_ops.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = loss_ops.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = loss_ops.log_loss(tf_predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = loss_ops.log_loss(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._predictions, self._labels, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = loss_ops.log_loss(
tf_predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = loss_ops.log_loss(tf_predictions, self._labels, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = loss_ops.log_loss(self._predictions, self._labels, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = loss_ops.hinge_loss(logits, labels).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = loss_ops.hinge_loss(logits, labels)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_squared_error(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2,])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=weights)
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = loss_ops.mean_pairwise_squared_error(
predictions=predictions0,
labels=labels0)
loss1 = loss_ops.mean_pairwise_squared_error(
predictions=predictions1,
labels=labels1)
loss0_1 = loss_ops.mean_pairwise_squared_error(
predictions=array_ops.concat([predictions0, predictions1], 0),
labels=array_ops.concat([labels0, labels1], 0))
with self.test_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = loss_ops.cosine_distance(tf_preds, tf_labels, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = array_ops.placeholder(dtypes.float32)
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(test.TestCase):
def testHingeLoss(self):
logits = constant_op.constant([1.2, 0.4, -1.0, -1.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss = loss_ops.compute_weighted_loss(losses)
self.assertTrue(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5 / 4.0, atol=1e-3)
class AddLossTest(test.TestCase):
def testAddExternalLoss(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses))
self.assertTrue(loss_ops.get_losses())
total_loss = loss_ops.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
self.assertFalse(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
def testNoCollectLosses(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
if __name__ == '__main__':
test.main()
| apache-2.0 |
linsomniac/luigi | luigi/contrib/simulate.py | 16 | 3280 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A module containing classes used to simulate certain behaviors
"""
from multiprocessing import Value
import tempfile
import hashlib
import logging
import os
import luigi
logger = logging.getLogger('luigi-interface')
class RunAnywayTarget(luigi.Target):
"""
A target used to make a task run everytime it is called.
Usage:
Pass `self` as the first argument in your task's `output`:
.. code-block: python
def output(self):
return RunAnywayTarget(self)
And then mark it as `done` in your task's `run`:
.. code-block: python
def run(self):
# Your task execution
# ...
self.output().done() # will then be considered as "existing"
"""
# Specify the location of the temporary folder storing the state files. Subclass to change this value
temp_dir = os.path.join(tempfile.gettempdir(), 'luigi-simulate')
temp_time = 24 * 3600 # seconds
# Unique value (PID of the first encountered target) to separate temporary files between executions and
# avoid deletion collision
unique = Value('i', 0)
def __init__(self, task_obj):
self.task_id = task_obj.task_id
if self.unique.value == 0:
with self.unique.get_lock():
if self.unique.value == 0:
self.unique.value = os.getpid() # The PID will be unique for every execution of the pipeline
# Deleting old files > temp_time
if os.path.isdir(self.temp_dir):
import shutil
import time
limit = time.time() - self.temp_time
for fn in os.listdir(self.temp_dir):
path = os.path.join(self.temp_dir, fn)
if os.path.isdir(path) and os.stat(path).st_mtime < limit:
shutil.rmtree(path)
logger.debug('Deleted temporary directory %s', path)
def get_path(self):
"""
Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
"""
md5_hash = hashlib.md5(self.task_id.encode()).hexdigest()
logger.debug('Hash %s corresponds to task %s', md5_hash, self.task_id)
return os.path.join(self.temp_dir, str(self.unique.value), md5_hash)
def exists(self):
"""
Checks if the file exists
"""
return os.path.isfile(self.get_path())
def done(self):
"""
Creates temporary file to mark the task as `done`
"""
logger.info('Marking %s as done', self)
fn = self.get_path()
os.makedirs(os.path.dirname(fn), exist_ok=True)
open(fn, 'w').close()
| apache-2.0 |
hieukypc/ERP | openerp/addons/sale_stock/res_config.py | 28 | 2068 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import SUPERUSER_ID
from openerp import api, fields, models, _
from openerp.exceptions import AccessError
class SaleConfiguration(models.TransientModel):
_inherit = 'sale.config.settings'
module_delivery = fields.Selection([
(0, 'No shipping costs on sales orders'),
(1, 'Allow adding shipping costs')
], "Shipping")
default_picking_policy = fields.Selection([
(0, 'Ship products when some are available, and allow back orders'),
(1, 'Ship all products at once, without back orders')
], "Default Shipping Policy")
group_mrp_properties = fields.Selection([
(0, "Don't use manufacturing properties (recommended as its easier)"),
(1, 'Allow setting manufacturing order properties per order line (advanced)')
], "Properties on SO Lines",
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties.")
group_route_so_lines = fields.Selection([
(0, 'No order specific routes like MTO or drop shipping'),
(1, 'Choose specific routes on sales order lines (advanced)')
], "Order Routing",
implied_group='sale_stock.group_route_so_lines')
@api.model
def get_default_sale_config(self, fields):
default_picking_policy = self.env['ir.values'].get_default('sale.order', 'picking_policy')
return {
'default_picking_policy': 1 if default_picking_policy == 'one' else 0,
}
@api.multi
def set_sale_defaults(self):
self.ensure_one()
if not self.env.user._is_admin():
raise AccessError(_("Only administrators can change the settings"))
default_picking_policy = 'one' if self.default_picking_policy else 'direct'
self.env['ir.values'].sudo().set_default('sale.order', 'picking_policy', default_picking_policy)
res = super(SaleConfiguration, self).set_sale_defaults()
return res
| gpl-3.0 |
karthikv1392/Machine_Learning | naive_bayes.py | 1 | 15135 | # Karthik V
# Program to implement Naive Bayes classification
import collections
import math
count_clusters=0
fo=open("trainingdata.txt","r+")
array_cluster=[]
i=0
dict_word_map={}
# we create 8 list to put all the word count associations into that list
list1=[]
list2=[]
list3=[]
list4=[]
list5=[]
list6=[]
list7=[]
for row in fo.readlines():
data=row.split()
try:
val=0 # This is an indicator variable to show which is the current class
if(data[0].isdigit()):
# print 'The data values ',
count_clusters=count_clusters+1
array_cluster.insert(i,data[0]) # array cluster contains different cluster values
word_count = data[1].split(':')
if(data[0]=='1'):
val=1
elif(data[0]=='2'):
val=2
elif(data[0]=='3'):
val=3
elif(data[0]=='4'):
val=4
elif(data[0]=='5'):
val=5
elif(data[0]=='6'):
val=6
elif(data[0]=='7'):
val=7
#print 'Word is ', word_count[0],
#print ' count is ', word_count[1]
# word_list=[word,count]
# dict_word_map=
i=i+1
# block=data[1].split(':')
except IndexError :
print 'Empty row'
if(val==1):
list1.append(data[1])
elif(val==2):
list2.append(data[1])
elif(val==3):
list3.append(data[1])
elif(val==4):
list4.append(data[1])
elif(val==5):
list5.append(data[1])
elif(val==6):
list6.append(data[1])
elif(val==7):
list7.append(data[1])
# We will just print different classes and the correspoding entries in each classes
print ' Different class values '
print 'class 1: ', list1
print ' '
print ' '
print 'class 2: ', list2
print ' '
print ' '
print 'class 3: ', list3
print ' '
print ' '
print 'class 4: ', list4
print ' '
print ' '
print 'class 5: ', list5
print ' '
print ' '
print 'class 6: ', list6
print ' '
print ' '
print 'class 7: ', list7
print ' '
print ' '
print '*****************************************************************************************'
print 'count :',
print count_clusters
print 'Total Number of Clusters :',
total_count=len(set(array_cluster)) # set displays unique values and hence we get the perfect cluster count
print total_count
# The above step completes the formatting of data and we have the words and corresponding counts in each class
# To display the number of elements in each cluster
prob_1=0;
c=collections.Counter()
for word in array_cluster:
c[word]+=1
for letter, count in c.most_common(8):
char=letter
print '%s: %7d' % (letter, count)
count = count/1.0
if(letter=='0'):
print count
prob_0=float(count/count_clusters)
elif (letter=='1'):
prob_1=float(count/count_clusters)
elif (letter=='2'):
prob_2=float(count/count_clusters)
elif (letter=='3'):
prob_3=float(count/count_clusters)
elif (letter=='4'):
prob_4=float(count/count_clusters)
elif (letter=='5'):
prob_5=float(count/count_clusters)
elif (letter=='6'):
prob_6=float(count/count_clusters)
elif (letter=='7'):
prob_7=float(count/count_clusters)
else:
print 'nothing'
# printing probability of different clusters
print 'Prob_Cluster 1 :',
print float(prob_1)
print 'Prob_Cluster 2 :',
print float(prob_2)
print 'Prob_Cluster 3 :',
print float(prob_3)
print 'Prob_Cluster 4 :',
print float(prob_4)
print 'Prob_Cluster 5 :',
print float(prob_5)
print 'Prob_Cluster 6 :',
print float(prob_6)
print 'Prob_Cluster 7 :',
print float(prob_7)
# Now we go to the different lists and check the total no of words, no of distinct words and the dictionay for word map
dict_list1={}
dict_list2={}
dict_list3={}
dict_list4={}
dict_list5={}
dict_list6={}
dict_list7={}
dict_list8={}
# initalize the total count in each list to 0
total_count_l1=total_count_l2=total_count_l3=total_count_l4=total_count_l5=total_count_l6=total_count_l7=0
# We will start with list 1
# List 1
word1=[]
for item in list1:
word_count=item.split(':')
word1.append(word_count[0])
if(word_count[0] in dict_list1):
dict_list1[word_count[0]]+=int(word_count[1])
else:
dict_list1[word_count[0]]=int(word_count[1])
total_count_l1=total_count_l1+int(word_count[1])
# Let us try to print the dictionary list one values, Now finding the probability of each word in a list is an easy task
# Key values and probablity of each word to be in list 1
dict_prob_list1={} # dictionary to store the probability associated with each word to be in list 1
'Printing the word and word count in class 1 '
for key,val in dict_list1.items():
unique=float(len(set(word1))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l1+unique)) # This finds the probability of each word note laplace smoothing is done
dict_prob_list1[key]=prob_value
print "{} = {}".format(key, val)
print ' '
# List 2
word2=[]
for item in list2:
word_count=item.split(':')
word2.append(word_count[0])
if(word_count[0] in dict_list2):
dict_list2[word_count[0]]+=int(word_count[1])
else:
dict_list2[word_count[0]]=int(word_count[1])
total_count_l2=total_count_l2+int(word_count[1])
dict_prob_list2={} # dictionary to store the probability associated with each word to be in list 1
'Printing the word and word count in class 2 '
for key,val in dict_list2.items():
unique=float(len(set(word2))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l2+unique))
dict_prob_list2[key]=prob_value
print "{} = {}".format(key, val)
print ' '
# List 3
word3=[]
for item in list3:
word_count=item.split(':')
word3.append(word_count[0])
if(word_count[0] in dict_list3):
dict_list3[word_count[0]]+=int(word_count[1])
else:
dict_list3[word_count[0]]=int(word_count[1])
total_count_l3=total_count_l3+int(word_count[1])
dict_prob_list3={}
'Printing the word and word count in class 3 '
for key,val in dict_list3.items():
unique=float(len(set(word3))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l3+unique))
dict_prob_list3[key]=prob_value
print "{} = {}".format(key, val)
print ' '
# List 4
word4=[]
for item in list4:
word_count=item.split(':')
word4.append(word_count[0])
if(word_count[0] in dict_list4):
dict_list4[word_count[0]]+=int(word_count[1])
else:
dict_list4[word_count[0]]=int(word_count[1])
total_count_l4=total_count_l4+int(word_count[1])
dict_prob_list4={}
'Printing the word and word count in class 4 '
for key,val in dict_list4.items():
unique=float(len(set(word4))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l4+unique))
dict_prob_list4[key]=prob_value
print "{} = {}".format(key, val)
print ' '
# List 5
word5=[]
for item in list5:
word_count=item.split(':')
word5.append(word_count[0])
if(word_count[0] in dict_list5):
dict_list5[word_count[0]]+=int(word_count[1])
else:
dict_list5[word_count[0]]=int(word_count[1])
total_count_l5=total_count_l5+int(word_count[1])
dict_prob_list5={}
'Printing the word and word count in class 5 '
for key,val in dict_list5.items():
unique=float(len(set(word5))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l5+unique))
dict_prob_list5[key]=prob_value
print "{} = {}".format(key, val)
print ' '
# List 6
word6=[]
for item in list6:
word_count=item.split(':')
word6.append(word_count[0])
if(word_count[0] in dict_list6):
dict_list6[word_count[0]]+=int(word_count[1])
else:
dict_list6[word_count[0]]=int(word_count[1])
total_count_l6=total_count_l6+int(word_count[1])
dict_prob_list6={}
'Printing the word and word count in class 6 '
for key,val in dict_list6.items():
unique=float(len(set(word6))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l6+unique))
dict_prob_list6[key]=prob_value
print "{} = {}".format(key, val)
# List 7
word7=[]
for item in list7:
word_count=item.split(':')
word7.append(word_count[0])
if(word_count[0] in dict_list7):
dict_list7[word_count[0]]+=int(word_count[1])
else:
dict_list7[word_count[0]]=int(word_count[1])
total_count_l7=total_count_l7+int(word_count[1])
dict_prob_list7={}
'Printing the word and word count in class 7 '
for key,val in dict_list7.items():
unique=float(len(set(word7))) # This takes the count of unique words in class 1
prob_value=float(float(val+1)/(total_count_l7+unique))
dict_prob_list7[key]=prob_value
print "{} = {}".format(key, val)
print' '
print '********************************************************************************************************************'
print ' '
print 'Printing the total count of words in different classes '
print ' '
print 'Total number of words in class 1 : ', total_count_l1 , ' unique words : ',len(set(word1))
print 'Total number of words in class 2 : ', total_count_l2 , ' unique words : ',len(set(word2))
print 'Total number of words in class 3 : ', total_count_l3 , ' unique words : ',len(set(word3))
print 'Total number of words in class 4 : ', total_count_l4 , ' unique words : ',len(set(word4))
print 'Total number of words in class 5 : ', total_count_l5 , ' unique words : ',len(set(word5))
print 'Total number of words in class 6 : ', total_count_l6 , ' unique words : ',len(set(word6))
print 'Total number of words in class 7 : ', total_count_l7 , ' unique words : ',len(set(word7))
# print the probabitiy values of different items in each list
# Class 1
print 'Printing the probability values in class 1'
for key,val in dict_prob_list1.items():
print "{} = {}".format(key, val)
print ' '
# Class 2
print 'Printing the probability values in class 2'
for key,val in dict_prob_list2.items():
print "{} = {}".format(key, val)
print ' '
# Class 3
print 'Printing the probability values in class 3'
for key,val in dict_prob_list3.items():
print "{} = {}".format(key, val)
print ' '
# Class 4
print 'Printing the probability values in class 4'
for key,val in dict_prob_list4.items():
print "{} = {}".format(key, val)
print ' '
# Class 5
print 'Printing the probability values in class 5'
for key,val in dict_prob_list5.items():
print "{} = {}".format(key, val)
print ' '
# Class 6
print 'Printing the probability values in class 6'
for key,val in dict_prob_list6.items():
print "{} = {}".format(key, val)
print ' '
# Class 7
print 'Printing the probability values in class 7'
for key,val in dict_prob_list7.items():
print "{} = {}".format(key, val)
print ' '
# Now we have all the probability values and all we have to do is that we need to test how well our program works so let us take the test data now
# Read the testing data now
f=open("testingdata.txt","r+")
# We will follow the same pattern as we did for training. Put this elements into a list
# The objective is to find P(Class/Dataset)
# We will follow Bayes's rule and we will compare with all the probabilities to find the best one
print ' '
print ' '
print '*************************************************'
print ' Testing Begins Here '
print '*************************************************'
print ' '
test_list=[] #Initializing an empty list to take values from the training set.
for row in f.readlines():
data=row.split()
flag=0
try:
if(data[0].isdigit()):
flag=1
if(data[0]=='1'):
val=1
elif(data[0]=='2'):
val=2
elif(data[0]=='3'):
val=3
elif(data[0]=='4'):
val=4
elif(data[0]=='5'):
val=5
elif(data[0]=='6'):
val=6
elif(data[0]=='7'):
val=7
except IndexError :
print 'Empty row'
if(val==2):
test_list.append(data[1])
print 'Now the Testing data is :'
print ' '
print test_list
print ' '
total_count_test=0 # To keep a count of the number of total words in the test list
dict_test={}
# Now put all the words in the given list to dictionary and we to get the count of each word and multiply that many times
for item in test_list:
word_count=item.split(':')
if(word_count[0] in dict_test):
dict_test[word_count[0]]+=int(word_count[1])
else:
dict_test[word_count[0]]=int(word_count[1])
total_count_test=total_count_test+int(word_count[1])
# Just to check whether we recieved all the values or not
print ' '
print 'Printing the words and their count from the given dataset '
print ' '
for key,val in dict_test.items():
print "{} = {}".format(key, val)
# We need to take each word and find the probability. We should first begin from class 1
list_ygivenx = [] # to store the p(X/Y) values for each word so that at the end it can be multiplied
prob1=prob2=prob3=prob4=prob5=prob6=prob7=1
for key,val in dict_test.items():
# P(X/1)
if(key in dict_prob_list1):
prob1=prob1*math.pow(float(dict_prob_list1[key]),int(val))
else:
unique=len(set(word1))
prob1=prob1*math.pow(float(1.0/(total_count_l1+unique)),int(val))
# P(X/2)
if(key in dict_prob_list2):
prob2=prob2*math.pow(float(dict_prob_list2[key]),int(val))
else:
unique=len(set(word2))
prob2=prob2*math.pow(float(1.0/(total_count_l2+unique)),int(val))
# P(X/3)
if(key in dict_prob_list3):
prob3=prob3*math.pow(float(dict_prob_list3[key]),int(val))
else:
unique=len(set(word3))
prob3=prob3*math.pow(float(1.0/(total_count_l3+unique)),int(val))
# P(X/4)
if(key in dict_prob_list4):
#print 'Value is ', float(dict_prob_list1[key])
prob4=prob4*math.pow(float(dict_prob_list4[key]),int(val))
#print 'prob 1 ', prob1
else:
unique=len(set(word4))
prob4=prob4*math.pow(float(1.0/(total_count_l4+unique)),int(val))
# P(X/5)
if(key in dict_prob_list5):
#print 'Value is ', float(dict_prob_list1[key])
prob5=prob5*math.pow(float(dict_prob_list5[key]),int(val))
#print 'prob 1 ', prob1
else:
unique=len(set(word5))
prob5=prob5*math.pow(float(1.0/(total_count_l5+unique)),int(val))
# P(X/6)
if(key in dict_prob_list6):
#print 'Value is ', float(dict_prob_list1[key])
prob6=prob6*math.pow(float(dict_prob_list6[key]),int(val))
#print 'prob 1 ', prob1
else:
unique=len(set(word6))
prob6=prob6*math.pow(float(1.0/(total_count_l6+unique)),int(val))
# P(X/7)
if(key in dict_prob_list7):
#print 'Value is ', float(dict_prob_list1[key])
prob7=prob7*math.pow(float(dict_prob_list7[key]),int(val))
#print 'prob 1 ', prob1
else:
unique=len(set(word7))
prob7=prob7*math.pow(float(1.0/(total_count_l7+unique)),int(val))
# Having got all the P(X/Y) values multiply it with corresponding P(Y) values to get the result
list_ygivenx.append(prob1*prob_1)
list_ygivenx.append(prob2*prob_2)
list_ygivenx.append(prob3*prob_3)
list_ygivenx.append(prob4*prob_4)
list_ygivenx.append(prob5*prob_5)
list_ygivenx.append(prob6*prob_6)
list_ygivenx.append(prob7*prob_7)
# Printing the list with the probability values
print '****************************************************************'
print 'Printing the final probability matrix '
print list_ygivenx
print ' '
# Now we just need to find the class with maximum probability
max=0.0
j=1
final=0
for i in list_ygivenx:
if(float(i)>max):
max=i
final=j
j=j+1
print 'Now it is time for prediction '
print ' '
print '***************************************************************'
print ' '
print 'The class is : ',final
print ' '
print '***************************************************************'
print ' ' | gpl-3.0 |
PhilippeMorere/RLTrader | v1/World.py | 1 | 3890 | import math
import random
from qAgent import QAgent
from perceptronAgent import PerceptronAgent
import matplotlib.pyplot as plt
__author__ = 'philippe'
class DataGenerator:
def __init__(self):
self.sinus_offset = 1.2
self.sinus_period = 0.1
self.time = 0
def generate_sinus_data(self):
sin1 = 10 * math.sin(self.sinus_offset + self.sinus_period * self.time)
sin2 = -7 * math.sin(self.sinus_offset + 8 * self.sinus_period * self.time)
sin3 = 5 * math.sin(self.sinus_offset + 1 + 20 * self.sinus_period * self.time)
sin4 = -1 * math.sin(self.sinus_offset + 2.1 + 6 * self.sinus_period * self.time)
point = sin1 + sin2 + sin3 + sin4 + random.random() * 10
return point
def generate_increasing_data(self):
return self.time * 0.1
def increase_time(self):
self.time += 1
def is_first_pass(self):
return self.time == 0
class World:
actions = ['buy', 'hold', 'sell']
def __init__(self):
self.data_generator = DataGenerator()
self.agent = PerceptronAgent(self.actions)
self.agent_is_holding = False
self.gap = 2.0 # %
self.not_trading_fee = 0.1 # %
self.number_training = 10000
self.number_test = 400
self.all_actions = []
self.data_generated = []
def main(self):
reward = 0
old_data = 0
action = self.actions[1]
generation = 0
is_test = False
while generation < (self.number_training + self.number_test):
# Generate new data
new_data = self.data_generator.generate_sinus_data()
if is_test:
self.data_generated.append(new_data)
# For the old round
if not self.data_generator.is_first_pass():
# Compute reward
reward = self.compute_reward(action, old_data, new_data)
if is_test:
self.all_actions.append(self.agent_is_holding)
# Reward agent
self.agent.reward(action, reward, self.agent_is_holding, new_data)
# Get action
action = self.agent.get_action(self.agent_is_holding)
old_data = new_data
self.data_generator.increase_time()
#time.sleep(0.01)
generation += 1
if generation % 10000 == 0 and generation > 0:
if generation <= self.number_training:
print "Training generation", str(generation), "/", self.number_training
else:
print "Test generation", str(generation - self.number_training), "/", self.number_test
self.agent.display_info()
if generation == self.number_training:
is_test = True
self.agent.disable_training()
self.agent.display_info()
#self.agent.print_best_states(15)
self.plot_data()
def plot_data(self):
plt.plot(range(len(self.data_generated)), self.data_generated, 'b-')
plt.plot(range(len(self.all_actions)), self.all_actions, 'r-')
plt.show()
def compute_reward(self, action, old_data, new_data):
earnings = new_data - old_data
# Decrease the agent's earnings when it doesn't trade, force it to trade
if not self.agent_is_holding:
earnings = - self.not_trading_fee * math.fabs(new_data) / 100.0
# Update the agent state: holding or not
if action == self.actions[0]:
# Apply a 2% gap whenever the agent takes a position
if not self.agent_is_holding:
earnings -= new_data * self.gap / 100.0
self.agent_is_holding = True
elif action == self.actions[2]:
self.agent_is_holding = False
return earnings
world = World()
world.main() | gpl-2.0 |
blackPantherOS/packagemanagement | smartpm/smart/backends/rpm/__init__.py | 9 | 1061 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart import *
import os
def checkPackageFile(filename):
return os.path.isfile(filename) and filename.endswith(".rpm")
hooks.register("check-package-file", checkPackageFile)
| apache-2.0 |
ManageIQ/integration_tests | cfme/utils/stats.py | 2 | 1964 | def tol_check(ref, compare, min_error=0.05, low_val_correction=3.0):
"""Tolerance check
The tolerance check is very simple. In essence it checks to ensure
that the ``compare`` value is within ``min_error`` percentage of the ``ref`` value.
However there are special conditions.
If the ref value is zero == the compare value we will alwys return True to avoid
calculation overhead.
If the ref value is zero we check if the compare value is below the low_val_correction
threshold.
The low value correction is also used if ref is small. In this case, if one minus the
difference of the ref and low value correction / reference value yields greater error
correction, then this is used.
For example, if the reference was 1 and the compare was 2, with a min_error set to the
default, the tolerance check would return False. At low values this is probably undesirable
and so, the low_val_correction allows for a greater amount of error at low values.
As an example, with the lvc set to 3, the allowe error would be much higher, allowing the
tolerance check to pass.
The lvc will only take effect if the error it produces is greater than the ``min_error``.
Args:
ref: The reference value
compare: The comparison value
min_error: The minimum allowed error
low_val_correction: A correction value for lower values
"""
if ref == compare:
return True, min_error
elif ref == 0:
return compare <= low_val_correction, low_val_correction
else:
compared_value = float(compare)
reference_value = float(ref)
relational_error = 1.0 - ((reference_value - low_val_correction) / reference_value)
tolerance = max([relational_error, min_error])
difference = abs(reference_value - compared_value)
difference_error = difference / reference_value
return difference_error <= tolerance, tolerance
| gpl-2.0 |
vvv1559/intellij-community | python/lib/Lib/encodings/gb18030.py | 816 | 1031 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
dmilith/SublimeText3-dmilith | Packages/Debugger/modules/views/variable.py | 1 | 6654 | from __future__ import annotations
from ..typecheck import *
from ..import core
from ..import ui
from ..import dap
from . import css
import sublime
class VariableComponentState:
def __init__(self):
self._expanded: dict[int, bool] = {}
self._number_expanded: dict[int, int] = {}
def is_expanded(self, variable: dap.Variable) -> bool:
return self._expanded.get(id(variable), False)
def set_expanded(self, variable: dap.Variable, value: bool):
self._expanded[id(variable)] = value
def number_expanded(self, variable: dap.Variable) -> int:
return self._number_expanded.get(id(variable), 20)
def set_number_expanded(self, variable: dap.Variable, value: int):
self._number_expanded[id(variable)] = value
class VariableComponent (ui.div):
def __init__(self, variable: dap.Variable, source: Optional[dap.SourceLocation] = None, on_clicked_source: Optional[Callable[[dap.SourceLocation], None]] = None, state: VariableComponentState = VariableComponentState()) -> None:
super().__init__()
self.variable = variable
self.state = state
self.item_right = ui.span()
self.variable_children: Optional[list[dap.Variable]] = None
self.error: Optional[core.Error] = None
self.edit_variable_menu = None
self.on_clicked_source = on_clicked_source
self.source = source
if self.state.is_expanded(self.variable):
self.set_expanded()
@core.schedule
async def edit_variable(self) -> None:
if not isinstance(self.variable.reference, dap.types.Variable):
raise core.Error("Not able to set value of this item")
variable = self.variable.reference
session = self.variable.session
info = None
expression = variable.evaluateName or variable.name
value = variable.value or ""
if session.capabilities.supportsDataBreakpoints:
info = await session.data_breakpoint_info(variable)
async def on_edit_variable_async(value: str):
try:
self.variable.reference = await session.set_variable(variable, value)
self.variable.fetched = None
self.dirty()
except core.Error as e:
core.log_exception()
core.display(e)
def on_edit_variable(value: str):
core.run(on_edit_variable_async(value))
@core.schedule
async def copy_value():
session = self.variable.session
if variable.evaluateName:
try:
# Attempt to match vscode behavior
# If the adapter supports clipboard use it otherwise send the none standard 'variables' context
context = 'clipboard' if session.capabilities.supportsClipboardContext else 'variables'
v = await self.variable.session.evaluate_expression(variable.evaluateName, context)
sublime.set_clipboard(v.result)
return
except dap.Error as e:
core.log_exception()
sublime.set_clipboard(value)
def copy_expr():
sublime.set_clipboard(expression)
def add_watch():
session.watch.add(expression)
items = [
ui.InputListItem(
ui.InputText(
on_edit_variable,
"editing a variable",
),
"Edit Variable",
),
ui.InputListItem(
copy_expr,
"Copy Expression",
),
ui.InputListItem(
copy_value,
"Copy Value\t Click again to select",
),
ui.InputListItem(
add_watch,
"Add Variable To Watch",
),
]
if self.edit_variable_menu:
copy_value()
self.edit_variable_menu.cancel()
return
if info and info.id:
types = info.accessTypes or [""]
labels = {
dap.DataBreakpoint.write: "Break On Value Write",
dap.DataBreakpoint.readWrite: "Break On Value Read or Write",
dap.DataBreakpoint.read: "Break On Value Read",
}
def on_add_data_breakpoint(accessType: str):
assert info
session.breakpoints.data.add(info, accessType or None)
for acessType in types:
items.append(ui.InputListItem(
lambda: on_add_data_breakpoint(acessType),
labels.get(acessType) or "Break On Value Change"
))
self.edit_variable_menu = ui.InputList(items, '{} {}'.format(variable.name, variable.value)).run()
await self.edit_variable_menu
self.edit_variable_menu = None
@core.schedule
async def set_expanded(self) -> None:
self.state.set_expanded(self.variable, True)
self.error = None
self.dirty()
try:
self.variable_children = await self.variable.children()
except core.Error as error:
self.error = error
self.dirty()
@core.schedule
async def toggle_expand(self) -> None:
is_expanded = self.state.is_expanded(self.variable)
if is_expanded:
self.state.set_expanded(self.variable, False)
self.dirty()
else:
await self.set_expanded()
def show_more(self) -> None:
count = self.state.number_expanded(self.variable)
self.state.set_number_expanded(self.variable, count + 20)
self.dirty()
def render(self) -> ui.div.Children:
name = self.variable.name
value = self.variable.value
is_expanded = self.state.is_expanded(self.variable)
source = self.source.name if self.source else None
if name:
value_item = ui.click(self.edit_variable)[
ui.text(name, css=css.label_secondary),
ui.spacer(1),
ui.code(value),
]
else:
value_item = ui.click(self.edit_variable)[
ui.code(value),
]
if source:
self.item_right = ui.click(lambda: self.on_clicked_source(self.source))[
ui.spacer(min=1),
ui.text(source, css=css.label_secondary)
]
if not self.variable.has_children:
return [
ui.div(height=css.row_height)[
ui.align()[
ui.spacer(3),
value_item,
self.item_right,
],
],
]
variable_label = ui.div(height=css.row_height)[
ui.align()[
ui.click(self.toggle_expand)[
ui.icon(ui.Images.shared.open if is_expanded else ui.Images.shared.close)
],
value_item,
self.item_right,
]
]
if not is_expanded:
return [
variable_label
]
variable_children: list[ui.div] = []
if self.error:
variable_children.append(
ui.div(height=css.row_height)[
ui.text(str(self.error), css=css.label_redish_secondary)
]
)
elif self.variable_children is None:
variable_children.append(
ui.div(height=css.row_height)[
ui.text('◌', css=css.label_secondary)
]
)
else:
count = self.state.number_expanded(self.variable)
for variable in self.variable_children[:count]:
variable_children.append(VariableComponent(variable, state=self.state))
more_count = len(self.variable_children) - count
if more_count > 0:
variable_children.append(
ui.div(height=css.row_height)[
ui.click(self.show_more)[
ui.text(" {} more items...".format(more_count), css=css.label_secondary)
]
]
)
return [
variable_label,
ui.div(css=css.table_inset)[
variable_children
]
]
| mit |
chokribr/invenio | invenio/modules/multimedia/restful.py | 11 | 4723 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Multimedia IIIF Image API."""
from flask import send_file
from flask_restful import abort, Resource
from functools import wraps
from six import StringIO
from .api import (
IIIFImageAPIWrapper, MultimediaImageCache
)
from .config import (
MULTIMEDIA_IMAGE_API_SUPPORTED_FORMATS
)
from .errors import (
MultimediaError, MultmediaImageCropError, MultmediaImageResizeError,
MultimediaImageFormatError, MultimediaImageRotateError,
MultimediaImageQualityError, IIIFValidatorError, MultimediaImageNotFound,
MultimediaImageForbidden
)
def error_handler(f):
"""error handler."""
@wraps(f)
def inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except (MultmediaImageCropError, MultmediaImageResizeError,
MultimediaImageFormatError, MultimediaImageRotateError,
MultimediaImageQualityError) as e:
abort(500, message=e.message, code=500)
except IIIFValidatorError as e:
abort(400, message=e.message, code=400)
except (MultimediaError, MultimediaImageNotFound,
MultimediaImageForbidden) as e:
abort(e.code, message=e.message, code=e.code)
return inner
class IiifImageAPI(Resource):
"""IIIF API Implementation.
.. note::
* IIF IMAGE API v1.0
* For more infos please visit <http://iiif.io/api/image/>.
* IIIF Image API v2.0
* For more infos please visit <http://iiif.io/api/image/2.0/>.
* The API works only for GET requests
* The image process must follow strictly the following workflow:
* Region
* Size
* Rotation
* Quality
* Format
"""
method_decorators = [
error_handler,
]
def get(self, version, uuid, region, size, rotation, quality,
image_format):
"""Run IIIF Image API workflow."""
# Validate IIIF parameters
IIIFImageAPIWrapper.validate_api(
version=version,
region=region,
size=size,
rotate=rotation,
quality=quality,
image_format=image_format
)
cache = MultimediaImageCache()
# build the image key
key = "iiif:{0}/{1}/{2}/{3}/{4}.{5}".format(
uuid, region, size, quality, rotation, image_format
)
# Check if its cached
cached = cache.get_value(key)
# If the image is cached loaded from cache
if cached:
to_serve = StringIO(cached)
to_serve.seek(0)
# Otherwise build create the image
else:
image = IIIFImageAPIWrapper.get_image(uuid)
image.apply_api(
version=version,
region=region,
size=size,
rotate=rotation,
quality=quality
)
# prepare image to be serve
to_serve = image.serve(image_format=image_format)
# to_serve = image.serve(image_format=image_format)
cache.cache(key, to_serve.getvalue())
# decide the mime_type from the requested image_format
mimetype = MULTIMEDIA_IMAGE_API_SUPPORTED_FORMATS.get(
image_format, 'image/jpeg'
)
return send_file(to_serve, mimetype=mimetype)
def post(self):
"""post."""
abort(405)
def delete(self):
"""delete."""
abort(405)
def options(self):
"""options."""
abort(405)
def put(self):
"""put."""
abort(405)
def head(self):
"""head."""
abort(405)
def setup_app(app, api):
"""setup the urls."""
api.add_resource(
IiifImageAPI,
("/api/multimedia/image/<string:version>/<string:uuid>/"
"<string:region>/<string:size>/<string:rotation>/<string:quality>."
"<string:image_format>"),
)
| gpl-2.0 |
e-sailing/openplotter | tools/kplex/add_kplex.py | 1 | 18795 | #!/usr/bin/env python
# This file is part of Openplotter.
# Copyright (C) 2015 by sailoog <https://github.com/sailoog/openplotter>
#
# Openplotter is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
# Openplotter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Openplotter. If not, see <http://www.gnu.org/licenses/>.
import re
import wx
class addkplex(wx.Dialog):
def __init__(self, edit, extkplex, parent):
conf = parent.conf
self.parent = parent
self.op_folder = parent.op_folder
if edit == 0: title = _('Add kplex interface')
else: title = _('Edit kplex interface')
wx.Dialog.__init__(self, None, title=title, size=(550, 450))
self.extkplex = extkplex
self.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self.result = 0
self.index = -1
if edit != 0:
self.index = edit[11]
panel = wx.Panel(self)
self.icon = wx.Icon(self.op_folder + '/static/icons/kplex.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
wx.StaticText(panel, label=_('Type'), pos=(20, 30))
self.kplex_type_list = ['Serial', 'TCP', 'UDP']
self.kplex_type = wx.ComboBox(panel, choices=self.kplex_type_list, style=wx.CB_READONLY, size=(80, 32),
pos=(20, 55))
self.Bind(wx.EVT_COMBOBOX, self.on_kplex_type_change, self.kplex_type)
wx.StaticText(panel, label=_('Name'), pos=(115, 35))
self.kplex_name = wx.TextCtrl(panel, -1, size=(110, 32), pos=(110, 55))
wx.StaticBox(panel, label=_(' settings '), size=(530, 90), pos=(10, 10))
serialInst = conf.get('UDEV', 'Serialinst')
try: serialInst = eval(serialInst)
except: serialInst = {}
SerDevLs = []
for alias in serialInst:
if serialInst[alias]['data'] == 'NMEA 0183':
if serialInst[alias]['assignment'] == '0': SerDevLs.append(alias)
self.kplex_ser_T1 = wx.StaticText(panel, label=_('Alias'), pos=(230, 35))
self.kplex_device_select = wx.ComboBox(panel, choices=SerDevLs, style=wx.CB_DROPDOWN, size=(140, 32),pos=(225, 55))
self.bauds = ['4800', '9600', '19200', '38400', '57600', '115200', '230400', '460800']
self.kplex_ser_T2 = wx.StaticText(panel, label=_('Bauds'), pos=(375, 35))
self.kplex_baud_select = wx.ComboBox(panel, choices=self.bauds, style=wx.CB_READONLY, size=(90, 32),pos=(370, 55))
self.kplex_net_T1 = wx.StaticText(panel, label=_('Address'), pos=(235, 35))
self.kplex_address = wx.TextCtrl(panel, -1, size=(120, 32), pos=(230, 55))
self.kplex_net_T2 = wx.StaticText(panel, label=_('Port'), pos=(375, 35))
self.kplex_netport = wx.TextCtrl(panel, -1, size=(75, 32), pos=(370, 55))
self.ser_io_list = ['in', 'out', 'both']
self.net_io_list = ['in', 'out', 'both']
wx.StaticText(panel, label=_('in/out'), pos=(470, 35))
self.kplex_io_ser = wx.ComboBox(panel, choices=self.ser_io_list, style=wx.CB_READONLY, size=(70, 32),pos=(465, 55))
self.kplex_io_net = wx.ComboBox(panel, choices=self.net_io_list, style=wx.CB_READONLY, size=(70, 32),pos=(465, 55))
self.Bind(wx.EVT_COMBOBOX, self.on_kplex_io_change, self.kplex_io_ser)
self.Bind(wx.EVT_COMBOBOX, self.on_kplex_io_change, self.kplex_io_net)
self.name_ifilter_list = []
for i in extkplex:
if i[3] == 'in' or i[3] == 'both':
self.name_ifilter_list.append(i[1])
self.ifilter_T1 = wx.StaticBox(panel, label=_('in Filter '), size=(530, 100), pos=(10, 105))
self.mode_ifilter = [_('none'), _('Accept only sentences:'), _('Ignore sentences:')]
self.ifilter_select = wx.ComboBox(panel, choices=self.mode_ifilter, style=wx.CB_READONLY, size=(195, 32),pos=(20, 125))
self.ifilter_select.SetValue(self.mode_ifilter[0])
self.italker = wx.TextCtrl(panel, -1, size=(40, 32), pos=(230, 125))
self.isent = wx.TextCtrl(panel, -1, size=(50, 32), pos=(270, 125))
# self.name_ifilter_select = wx.ComboBox(panel, choices=self.name_ifilter_list, style=wx.CB_READONLY, size=(110, 32), pos=(305, 125))
self.ifilter_add_b = wx.Button(panel, label=_('Add'), pos=(425, 125))
self.Bind(wx.EVT_BUTTON, self.ifilter_add, self.ifilter_add_b)
self.ifilter_sentences = wx.TextCtrl(panel, -1, style=wx.CB_READONLY, size=(395, 32), pos=(20, 165))
self.ifilter_sentences.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_INACTIVECAPTION))
self.ifilter_del_b = wx.Button(panel, label=_('Delete'), pos=(425, 165))
self.Bind(wx.EVT_BUTTON, self.ifilter_del, self.ifilter_del_b)
a = 105
self.ofilter_T1 = wx.StaticBox(panel, label=_('out Filter '), size=(530, 100), pos=(10, 105 + a))
self.mode_ofilter = [_('none'), _('Accept only sentences:'), _('Ignore sentences:')]
self.ofilter_select = wx.ComboBox(panel, choices=self.mode_ofilter, style=wx.CB_READONLY, size=(170, 32), pos=(20, 125 + a))
self.ofilter_select.SetValue(self.mode_ofilter[0])
self.otalker = wx.TextCtrl(panel, -1, size=(40, 32), pos=(200, 125 + a))
self.osent = wx.TextCtrl(panel, -1, size=(50, 32), pos=(240, 125 + a))
self.name_ofilter_select = wx.ComboBox(panel, choices=self.name_ifilter_list, style=wx.CB_READONLY, size=(120, 32), pos=(300, 125 + a))
self.ofilter_add_b = wx.Button(panel, label=_('Add'), pos=(425, 125 + a))
self.Bind(wx.EVT_BUTTON, self.ofilter_add, self.ofilter_add_b)
self.ofilter_sentences = wx.TextCtrl(panel, -1, style=wx.CB_READONLY, size=(395, 32), pos=(20, 165 + a))
self.ofilter_sentences.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_INACTIVECAPTION))
self.ofilter_del_b = wx.Button(panel, label=_('Delete'), pos=(425, 165 + a))
self.Bind(wx.EVT_BUTTON, self.ofilter_del, self.ofilter_del_b)
self.ifilter_sentences.SetValue(_('nothing'))
self.italker.SetValue('**')
self.isent.SetValue('***')
self.ofilter_sentences.SetValue(_('nothing'))
self.otalker.SetValue('**')
self.osent.SetValue('***')
self.optional = wx.CheckBox(panel, label=_('set optional'), pos=(20, 365))
gpsd_examp_b = wx.Button(panel, label=_('Add GPSD input'), pos=(10, 320))
gpsd_examp_b.Bind(wx.EVT_BUTTON, self.gpsd_examp)
SKin_examp_b = wx.Button(panel, label=_('Add Signal K input'), pos=(170, 320))
SKin_examp_b.Bind(wx.EVT_BUTTON, self.SKin_examp)
SKout_examp_b = wx.Button(panel, label=_('Add Signal K output'), pos=(350, 320))
SKout_examp_b.Bind(wx.EVT_BUTTON, self.SKout_examp)
ok = wx.Button(panel, label=_('OK'), pos=(425, 360))
ok.Bind(wx.EVT_BUTTON, self.ok_conn)
cancelBtn = wx.Button(panel, wx.ID_CANCEL, pos=(330, 360))
if edit == 0:
edit = ['0', '0', '0', '0', '0', '0', '0', '0', '0', -1,0]
self.kplex_type.SetValue('Serial')
self.kplex_baud_select.SetValue('4800')
self.kplex_io_ser.SetValue('in')
self.kplex_io_net.SetValue('in')
self.switch_ser_net(True)
self.switch_io_out(False)
self.optional.SetValue(True)
else:
self.kplex_name.SetValue(edit[1])
self.kplex_type.SetValue(edit[2])
if edit[2] == 'Serial':
self.kplex_io_ser.SetValue(edit[3])
self.switch_ser_net(True)
self.kplex_device_select.SetValue(edit[4])
self.kplex_baud_select.SetValue(edit[5])
else:
self.kplex_io_net.SetValue(edit[3])
self.switch_ser_net(False)
self.kplex_address.SetValue(edit[4])
self.kplex_netport.SetValue(edit[5])
self.on_kplex_io_change(0)
if edit[6] != _('none').decode("utf-8"):
if edit[6] == _('accept').decode("utf-8"):
self.ifilter_select.SetValue(self.mode_ifilter[1])
if edit[6] == _('ignore').decode("utf-8"):
self.ifilter_select.SetValue(self.mode_ifilter[2])
self.ifilter_sentences.SetValue(edit[7])
else:
self.ifilter_select.SetValue(self.mode_ifilter[0])
if edit[8] != _('none').decode("utf-8"):
if edit[8] == _('accept').decode("utf-8"):
self.ofilter_select.SetValue(self.mode_ofilter[1])
if edit[8] == _('ignore').decode("utf-8"):
self.ofilter_select.SetValue(self.mode_ofilter[2])
self.ofilter_sentences.SetValue(edit[8])
else:
self.ofilter_select.SetValue(self.mode_ofilter[0])
if edit[10] == '1':
self.optional.SetValue(True)
else:
self.optional.SetValue(False)
def SKout_examp(self, e):
self.kplex_type.SetValue('UDP')
self.kplex_io_net.SetValue('out')
self.switch_ser_net(False)
self.switch_io_out(True)
self.switch_io_in(False)
self.kplex_address.SetValue('')
self.kplex_netport.SetValue('30330')
self.kplex_name.SetValue('signalk_out')
self.ifilter_select.SetValue(self.mode_ifilter[0])
self.ifilter_sentences.SetValue(_('nothing'))
self.ofilter_select.SetValue(self.mode_ifilter[0])
self.ofilter_sentences.SetValue(_('nothing'))
def SKin_examp(self, e):
self.kplex_type.SetValue('TCP')
self.kplex_io_net.SetValue('in')
self.switch_ser_net(False)
self.switch_io_out(False)
self.switch_io_in(True)
self.kplex_address.SetValue('localhost')
self.kplex_netport.SetValue('10110')
self.kplex_name.SetValue('signalk_in')
self.ifilter_select.SetValue(self.mode_ifilter[0])
self.ifilter_sentences.SetValue(_('nothing'))
self.ofilter_select.SetValue(self.mode_ifilter[0])
self.ofilter_sentences.SetValue(_('nothing'))
def gpsd_examp(self, e):
self.kplex_type.SetValue('TCP')
self.kplex_io_net.SetValue('in')
self.switch_ser_net(False)
self.switch_io_out(False)
self.switch_io_in(True)
self.switch_ser_net(False)
self.kplex_address.SetValue('localhost')
self.kplex_netport.SetValue('2947')
self.kplex_baud_select.SetValue('4800')
self.kplex_name.SetValue('gpsd')
self.ifilter_select.SetValue(self.mode_ifilter[0])
self.ifilter_sentences.SetValue(_('nothing'))
self.ofilter_select.SetValue(self.mode_ifilter[0])
self.ofilter_sentences.SetValue(_('nothing'))
self.optional.SetValue(True)
def ifilter_del(self, event):
self.ifilter_sentences.SetValue(_('nothing'))
def ofilter_del(self, event):
self.ofilter_sentences.SetValue(_('nothing'))
def ifilter_add(self, event):
talker = self.italker.GetValue()
sent = self.isent.GetValue()
if not re.match('^[*A-Z]{2}$', talker):
self.ShowMessage(_('Talker must have 2 uppercase characters. The symbol * matches any character.'))
return
if not re.match('^[*A-Z]{3}$', sent):
self.ShowMessage(_('Sentence must have 3 uppercase characters. The symbol * matches any character.'))
return
r_sentence = talker + sent
# if self.name_ifilter_select.GetValue()!='':
# r_sentence+='%'+self.name_ifilter_select.GetValue()
if r_sentence == '*****':
self.ShowMessage(_('You must enter 2 uppercase characters for talker or 3 uppercase characters for sentence. The symbol * matches any character.'))
return
if r_sentence in self.ifilter_sentences.GetValue():
self.ShowMessage(_('This sentence already exists.'))
return
if self.ifilter_sentences.GetValue() == _('nothing'):
self.ifilter_sentences.SetValue(r_sentence)
else:
self.ifilter_sentences.SetValue(self.ifilter_sentences.GetValue() + ',' + r_sentence)
def ofilter_add(self, event):
talker = self.otalker.GetValue()
sent = self.osent.GetValue()
if not re.match('^[*A-Z]{2}$', talker):
self.ShowMessage(_('Talker must have 2 uppercase characters. The symbol * matches any character.'))
return
if not re.match('^[*A-Z]{3}$', sent):
self.ShowMessage(_('Sentence must have 3 uppercase characters. The symbol * matches any character.'))
return
r_sentence = talker + sent
if self.name_ofilter_select.GetValue() != '':
r_sentence += '%' + self.name_ofilter_select.GetValue()
if r_sentence == '*****':
self.ShowMessage(_('You must enter 2 uppercase characters for talker or 3 uppercase characters for sentence. The symbol * matches any character.'))
return
if r_sentence in self.ofilter_sentences.GetValue():
self.ShowMessage(_('This sentence already exists.'))
return
if self.ofilter_sentences.GetValue() == _('nothing'):
self.ofilter_sentences.SetValue(r_sentence)
else:
self.ofilter_sentences.SetValue(self.ofilter_sentences.GetValue() + ',' + r_sentence)
def on_kplex_type_change(self, event):
if self.kplex_type.GetValue() == 'Serial':
self.switch_ser_net(True)
else:
self.switch_ser_net(False)
def switch_ser_net(self, b):
self.kplex_ser_T1.Show(b)
self.kplex_device_select.Show(b)
self.kplex_ser_T2.Show(b)
self.kplex_baud_select.Show(b)
self.kplex_io_ser.Show(b)
self.kplex_net_T1.Show(not b)
self.kplex_address.Show(not b)
self.kplex_net_T2.Show(not b)
self.kplex_netport.Show(not b)
self.kplex_io_net.Show(not b)
def on_kplex_io_change(self, event):
if self.kplex_type.GetValue() == 'Serial':
in_out = str(self.kplex_io_ser.GetValue())
else:
in_out = str(self.kplex_io_net.GetValue())
if in_out != 'out':
self.switch_io_in(True)
else:
self.switch_io_in(False)
if in_out != 'in':
self.switch_io_out(True)
else:
self.switch_io_out(False)
def switch_io_in(self, b):
if b:
self.ifilter_T1.Enable()
self.ifilter_select.Enable()
self.italker.Enable()
self.isent.Enable()
# self.name_ifilter_select.Enable()
self.ifilter_add_b.Enable()
self.ifilter_sentences.Enable()
self.ifilter_del_b.Enable()
else:
self.ifilter_T1.Disable()
self.ifilter_select.Disable()
self.italker.Disable()
self.isent.Disable()
# self.name_ifilter_select.Disable()
self.ifilter_add_b.Disable()
self.ifilter_sentences.Disable()
self.ifilter_del_b.Disable()
self.ifilter_sentences.SetValue(_('nothing'))
self.ifilter_select.SetValue(_('none'))
def switch_io_out(self, b):
if b:
self.ofilter_T1.Enable()
self.ofilter_select.Enable()
self.otalker.Enable()
self.osent.Enable()
self.name_ofilter_select.Enable()
self.ofilter_add_b.Enable()
self.ofilter_sentences.Enable()
self.ofilter_del_b.Enable()
else:
self.ofilter_T1.Disable()
self.ofilter_select.Disable()
self.otalker.Disable()
self.osent.Disable()
self.name_ofilter_select.Disable()
self.ofilter_add_b.Disable()
self.ofilter_sentences.Disable()
self.ofilter_del_b.Disable()
self.ofilter_sentences.SetValue(_('nothing'))
self.ofilter_select.SetValue(_('none'))
def create_gpsd(self, event):
self.name.SetValue('gpsd')
self.typeComboBox.SetValue('TCP')
self.address.SetValue('127.0.0.1')
self.port.SetValue('2947')
def ok_conn(self, event):
name = str(self.kplex_name.GetValue())
name = name.replace(' ', '_')
self.kplex_name.SetValue(name)
type_conn = self.kplex_type.GetValue()
port_address = ''
bauds_port = ''
if type_conn == 'Serial':
in_out = str(self.kplex_io_ser.GetValue())
else:
in_out = str(self.kplex_io_net.GetValue())
if not re.match('^[_0-9a-z]{1,13}$', name):
self.ShowMessage(_('"Name" must be a unique word between 1 and 13 lowercase letters and/or numbers.'))
return
for index, sublist in enumerate(self.extkplex):
if sublist[1] == name and index != self.index:
self.ShowMessage(_('This name is already in use.'))
return
if type_conn == 'Serial':
if str(self.kplex_device_select.GetValue()) != 'none':
port_address = str(self.kplex_device_select.GetValue())
else:
self.ShowMessage(_('You must select a Port.'))
return
bauds_port = str(self.kplex_baud_select.GetValue())
for index, sublist in enumerate(self.extkplex):
if sublist[4] == port_address and sublist[4] != '' and index != self.index:
self.ShowMessage(_('This output is already in use.'))
return
if type_conn == 'UDP' or type_conn == 'TCP':
#if self.kplex_address.GetValue() or type_conn == 'UDP':
port_address = self.kplex_address.GetValue()
#else:
# self.ShowMessage(_('You must enter an Address.'))
# return
if self.kplex_netport.GetValue():
bauds_port = self.kplex_netport.GetValue()
else:
self.ShowMessage(_('You must enter a Port.'))
return
if bauds_port >= '10111' and bauds_port <= '10113' and type_conn == 'TCP':
self.ShowMessage(_('Cancelled. Ports 10111 to 10113 are reserved.'))
return
new_address_port = str(type_conn) + str(port_address) + str(bauds_port)
for index, sublist in enumerate(self.extkplex):
old_address_port = str(sublist[2]) + str(sublist[4]) + str(sublist[5])
if old_address_port == new_address_port and index != self.index:
self.ShowMessage(_('This input is already in use.'))
return
if self.ifilter_select.GetValue() == _('none') and self.ifilter_sentences.GetValue() != _('nothing'):
self.ShowMessage(_('You must select a Filter type.'))
return
if self.ofilter_select.GetValue() == _('none') and self.ofilter_sentences.GetValue() != _('nothing'):
self.ShowMessage(_('You must select a Filter type.'))
return
filter_type = _('none')
filtering = _('nothing')
if self.ifilter_select.GetValue().encode('utf8') == _('Accept only sentences:') and self.ifilter_sentences.GetValue() != _(
'nothing'):
filter_type = 'accept'
filtering = ''
r = self.ifilter_sentences.GetValue()
l = r.split(',')
for index, item in enumerate(l):
if index != 0: filtering += ':'
filtering += '+' + item
filtering += ':-all'
if self.ifilter_select.GetValue() == _('Ignore sentences:') and self.ifilter_sentences.GetValue() != _(
'nothing'):
filter_type = 'ignore'
filtering = ''
r = self.ifilter_sentences.GetValue()
l = r.split(',')
for index, item in enumerate(l):
if index != 0: filtering += ':'
filtering += '-' + item
ofilter_type = _('none')
ofiltering = _('nothing')
if self.ofilter_select.GetValue().encode('utf8') == _('Accept only sentences:') and self.ofilter_sentences.GetValue() != _(
'nothing'):
ofilter_type = 'accept'
ofiltering = ''
r = self.ofilter_sentences.GetValue()
l = r.split(',')
for index, item in enumerate(l):
if index != 0: ofiltering += ':'
ofiltering += '+' + item
ofiltering += ':-all'
if self.ofilter_select.GetValue() == _('Ignore sentences:') and self.ofilter_sentences.GetValue() != _(
'nothing'):
ofilter_type = 'ignore'
ofiltering = ''
r = self.ofilter_sentences.GetValue()
l = r.split(',')
for index, item in enumerate(l):
if index != 0: ofiltering += ':'
ofiltering += '-' + item
optio = '0'
if self.optional.GetValue() == 1:
optio = '1'
self.add_kplex_out = ['None', name, type_conn, in_out, port_address, bauds_port, filter_type, filtering,
ofilter_type, ofiltering, optio, self.index]
self.result = self.add_kplex_out
self.Destroy()
def ShowMessage(self, w_msg):
wx.MessageBox(w_msg, 'Info', wx.OK | wx.ICON_INFORMATION)
| gpl-2.0 |
allenai/allennlp | tests/training/metrics/boolean_accuracy_test.py | 1 | 4788 | from typing import Any, Dict, List, Tuple, Union
import torch
import pytest
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import BooleanAccuracy
class BooleanAccuracyTest(AllenNlpTestCase):
@multi_device
def test_accuracy_computation(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.tensor([[0, 1], [2, 3], [4, 5], [6, 7]], device=device)
targets = torch.tensor([[0, 1], [2, 2], [4, 5], [7, 7]], device=device)
accuracy(predictions, targets)
assert accuracy.get_metric() == 2 / 4
mask = torch.ones(4, 2, device=device).bool()
mask[1, 1] = 0
accuracy(predictions, targets, mask)
assert accuracy.get_metric() == 5 / 8
targets[1, 1] = 3
accuracy(predictions, targets)
assert accuracy.get_metric() == 8 / 12
accuracy.reset()
accuracy(predictions, targets)
assert accuracy.get_metric() == 3 / 4
@multi_device
def test_skips_completely_masked_instances(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.tensor([[0, 1], [2, 3], [4, 5], [6, 7]], device=device)
targets = torch.tensor([[0, 1], [2, 2], [4, 5], [7, 7]], device=device)
mask = torch.tensor(
[[False, False], [True, False], [True, True], [True, True]], device=device
)
accuracy(predictions, targets, mask)
# First example should be skipped, second is correct with mask, third is correct, fourth is wrong.
assert accuracy.get_metric() == 2 / 3
@multi_device
def test_incorrect_gold_labels_shape_catches_exceptions(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.rand([5, 7], device=device)
incorrect_shape_labels = torch.rand([5, 8], device=device)
with pytest.raises(ValueError):
accuracy(predictions, incorrect_shape_labels)
@multi_device
def test_incorrect_mask_shape_catches_exceptions(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.rand([5, 7], device=device)
labels = torch.rand([5, 7], device=device)
incorrect_shape_mask = torch.randint(0, 2, [5, 8], device=device).bool()
with pytest.raises(ValueError):
accuracy(predictions, labels, incorrect_shape_mask)
@multi_device
def test_does_not_divide_by_zero_with_no_count(self, device: str):
accuracy = BooleanAccuracy()
assert accuracy.get_metric() == pytest.approx(0.0)
def test_distributed_accuracy(self):
predictions = [torch.tensor([[0, 1], [2, 3]]), torch.tensor([[4, 5], [6, 7]])]
targets = [torch.tensor([[0, 1], [2, 2]]), torch.tensor([[4, 5], [7, 7]])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = 0.5
run_distributed_test(
[-1, -1],
global_distributed_metric,
BooleanAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def test_distributed_accuracy_unequal_batches(self):
predictions = [torch.tensor([[0, 1], [2, 3], [4, 5]]), torch.tensor([[6, 7]])]
targets = [torch.tensor([[0, 1], [2, 2], [4, 5]]), torch.tensor([[7, 7]])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = 0.5
run_distributed_test(
[-1, -1],
global_distributed_metric,
BooleanAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def test_multiple_distributed_runs(self):
predictions = [torch.tensor([[0, 1], [2, 3]]), torch.tensor([[4, 5], [6, 7]])]
targets = [torch.tensor([[0, 1], [2, 2]]), torch.tensor([[4, 5], [7, 7]])]
metric_kwargs = {"predictions": predictions, "gold_labels": targets}
desired_values = 0.5
run_distributed_test(
[-1, -1],
multiple_runs,
BooleanAccuracy(),
metric_kwargs,
desired_values,
exact=True,
)
def multiple_runs(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: BooleanAccuracy,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
for i in range(200):
metric(**kwargs)
assert desired_values == metric.get_metric()
| apache-2.0 |
viswimmer1/PythonGenerator | data/python_files/32676842/bzr.py | 1 | 7918 | import calendar
from datetime import datetime, timedelta
import re
import time
import urlparse
try:
from bzrlib import bzrdir, revisionspec
from bzrlib.errors import BzrError, NotBranchError
from bzrlib.transport import register_lazy_transport
from bzrlib.transport.remote import RemoteSSHTransport
from bzrlib.transport.ssh import SubprocessVendor, register_ssh_vendor, \
register_default_ssh_vendor
has_bzrlib = True
except ImportError:
has_bzrlib = False
from reviewboard.scmtools.core import SCMTool, HEAD, PRE_CREATION
from reviewboard.scmtools.errors import RepositoryNotFoundError, SCMError
from reviewboard.ssh import utils as sshutils
# Register these URI schemes so we can handle them properly.
urlparse.uses_netloc.append('bzr+ssh')
urlparse.uses_netloc.append('bzr')
sshutils.ssh_uri_schemes.append('bzr+ssh')
if has_bzrlib:
class RBSSHVendor(SubprocessVendor):
"""SSH vendor class that uses rbssh"""
executable_path = 'rbssh'
def __init__(self, local_site_name=None, *args, **kwargs):
super(RBSSHVendor, self).__init__(*args, **kwargs)
self.local_site_name = local_site_name
def _get_vendor_specific_argv(self, username, host, port,
subsystem=None, command=None):
args = [self.executable_path]
if port is not None:
args.extend(['-p', str(port)])
if username is not None:
args.extend(['-l', username])
if self.local_site_name:
args.extend(['--rb-local-site', self.local_site_name])
if subsystem is not None:
args.extend(['-s', host, subsystem])
else:
args.extend([host] + command)
return args
class RBRemoteSSHTransport(RemoteSSHTransport):
LOCAL_SITE_PARAM_RE = \
re.compile('\?rb-local-site-name=([A-Za-z0-9\-_.]+)')
def __init__(self, base, *args, **kwargs):
m = self.LOCAL_SITE_PARAM_RE.search(base)
if m:
self.local_site_name = m.group(1)
base = base.replace(m.group(0), '')
else:
self.local_site_name = None
super(RBRemoteSSHTransport, self).__init__(base, *args, **kwargs)
def _build_medium(self):
client_medium, auth = \
super(RBRemoteSSHTransport, self)._build_medium()
client_medium._vendor = RBSSHVendor(self.local_site_name)
return client_medium, auth
vendor = RBSSHVendor()
register_ssh_vendor("rbssh", vendor)
register_default_ssh_vendor(vendor)
sshutils.register_rbssh('BZR_SSH')
register_lazy_transport('bzr+ssh://', 'reviewboard.scmtools.bzr',
'RBRemoteSSHTransport')
# BZRTool: An interface to Bazaar SCM Tool (http://bazaar-vcs.org/)
class BZRTool(SCMTool):
name = "Bazaar"
dependencies = {
'modules': ['bzrlib'],
}
# Timestamp format in bzr diffs.
# This isn't totally accurate: there should be a %z at the end.
# Unfortunately, strptime() doesn't support %z.
DIFF_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
# "bzr diff" indicates that a file is new by setting the old
# timestamp to the epoch time.
PRE_CREATION_TIMESTAMP = '1970-01-01 00:00:00 +0000'
def __init__(self, repository):
SCMTool.__init__(self, repository)
def get_file(self, path, revision):
if revision == BZRTool.PRE_CREATION_TIMESTAMP:
return ''
revspec = self._revspec_from_revision(revision)
filepath = self._get_full_path(path)
branch = None
try:
try:
branch, relpath = bzrdir.BzrDir.open_containing_tree_or_branch(filepath)[1:]
branch.lock_read()
revtree = revisionspec.RevisionSpec.from_string(revspec).as_tree(branch)
fileid = revtree.path2id(relpath)
if fileid:
contents = revtree.get_file_text(fileid)
else:
contents = ""
except BzrError, e:
raise SCMError(e)
finally:
if branch:
branch.unlock()
return contents
def parse_diff_revision(self, file_str, revision_str, *args, **kwargs):
if revision_str == BZRTool.PRE_CREATION_TIMESTAMP:
return (file_str, PRE_CREATION)
return file_str, revision_str
def get_fields(self):
return ['basedir', 'diff_path', 'parent_diff_path']
def get_diffs_use_absolute_paths(self):
return False
def _get_full_path(self, path, basedir=None):
"""Returns the full path to a file."""
parts = [self.repository.path.rstrip("/")]
if basedir:
parts.append(basedir.strip("/"))
parts.append(path.strip("/"))
final_path = "/".join(parts)
if final_path.startswith("/"):
final_path = "file://%s" % final_path
if self.repository.local_site and sshutils.is_ssh_uri(final_path):
final_path += '?rb-local-site-name=%s' % \
self.repository.local_site.name
return final_path
def _revspec_from_revision(self, revision):
"""Returns a revspec based on the revision found in the diff.
In addition to the standard date format from "bzr diff", this
function supports the revid: syntax provided by the bzr diff-revid plugin.
"""
if revision == HEAD:
revspec = 'last:1'
elif revision.startswith('revid:'):
revspec = revision
else:
revspec = 'date:' + str(self._revision_timestamp_to_local(revision))
return revspec
def _revision_timestamp_to_local(self, timestamp_str):
"""When using a date to ask bzr for a file revision, it expects
the date to be in local time. So, this function converts a
timestamp from a bzr diff file to local time.
"""
timestamp = datetime(*time.strptime(timestamp_str[0:19], BZRTool.DIFF_TIMESTAMP_FORMAT)[0:6])
# Now, parse the difference to GMT time (such as +0200)
# If only strptime() supported %z, we wouldn't have to do this manually.
delta = timedelta(hours=int(timestamp_str[21:23]), minutes=int(timestamp_str[23:25]))
if timestamp_str[20] == '+':
timestamp -= delta
else:
timestamp += delta
# convert to local time
return datetime.utcfromtimestamp(calendar.timegm(timestamp.timetuple()))
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""
Performs checks on a repository to test its validity.
This should check if a repository exists and can be connected to.
This will also check if the repository requires an HTTPS certificate.
The result is returned as an exception. The exception may contain
extra information, such as a human-readable description of the problem.
If the repository is valid and can be connected to, no exception
will be thrown.
"""
super(BZRTool, cls).check_repository(path, username, password,
local_site_name)
if local_site_name and sshutils.is_ssh_uri(path):
path += '?rb-local-site-name=%s' % local_site_name
try:
tree, branch, repository, relpath = \
bzrdir.BzrDir.open_containing_tree_branch_or_repository(path)
except AttributeError:
raise RepositoryNotFoundError()
except NotBranchError, e:
raise RepositoryNotFoundError()
except Exception, e:
raise SCMError(e)
| gpl-2.0 |
jkstrick/samba | third_party/dnspython/dns/namedict.py | 99 | 2106 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS name dictionary"""
import dns.name
class NameDict(dict):
"""A dictionary whose keys are dns.name.Name objects.
@ivar max_depth: the maximum depth of the keys that have ever been
added to the dictionary.
@type max_depth: int
"""
def __init__(self, *args, **kwargs):
super(NameDict, self).__init__(*args, **kwargs)
self.max_depth = 0
def __setitem__(self, key, value):
if not isinstance(key, dns.name.Name):
raise ValueError('NameDict key must be a name')
depth = len(key)
if depth > self.max_depth:
self.max_depth = depth
super(NameDict, self).__setitem__(key, value)
def get_deepest_match(self, name):
"""Find the deepest match to I{name} in the dictionary.
The deepest match is the longest name in the dictionary which is
a superdomain of I{name}.
@param name: the name
@type name: dns.name.Name object
@rtype: (key, value) tuple
"""
depth = len(name)
if depth > self.max_depth:
depth = self.max_depth
for i in xrange(-depth, 0):
n = dns.name.Name(name[i:])
if self.has_key(n):
return (n, self[n])
v = self[dns.name.empty]
return (dns.name.empty, v)
| gpl-3.0 |
Cadasta/cadasta-qgis-plugin | cadasta/test/test_qgis_environment.py | 1 | 1823 | # coding=utf-8
"""Tests for QGIS functionality.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__date__ = '20/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import unittest
from qgis.core import (
QgsProviderRegistry,
QgsCoordinateReferenceSystem,
QgsRasterLayer)
class QGISTest(unittest.TestCase):
"""Test the QGIS Environment"""
def test_qgis_environment(self):
"""QGIS environment has the expected providers"""
r = QgsProviderRegistry.instance()
self.assertIn('gdal', r.providerList())
self.assertIn('ogr', r.providerList())
self.assertIn('postgres', r.providerList())
def test_projection(self):
"""Test that QGIS properly parses a wkt string.
"""
crs = QgsCoordinateReferenceSystem()
wkt = (
'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",'
'SPHEROID["WGS_1984",6378137.0,298.257223563]],'
'PRIMEM["Greenwich",0.0],UNIT["Degree",'
'0.0174532925199433]]')
crs.createFromWkt(wkt)
auth_id = crs.authid()
expected_auth_id = 'EPSG:4326'
self.assertEqual(auth_id, expected_auth_id)
# now test for a loaded layer
path = os.path.join(os.path.dirname(__file__), 'tenbytenraster.asc')
title = 'TestRaster'
layer = QgsRasterLayer(path, title)
auth_id = layer.crs().authid()
self.assertEqual(auth_id, expected_auth_id)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
dudepare/django | tests/template_tests/templatetags/custom.py | 161 | 5408 | import operator
import warnings
from django import template
from django.template.defaultfilters import stringfilter
from django.utils import six
from django.utils.html import escape, format_html
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args))
)
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def escape_naive(context):
"""A tag that doesn't even think about escaping issues"""
return "Hello {0}!".format(context['name'])
@register.simple_tag(takes_context=True)
def escape_explicit(context):
"""A tag that uses escape explicitly"""
return escape("Hello {0}!".format(context['name']))
@register.simple_tag(takes_context=True)
def escape_format_html(context):
"""A tag that uses format_html"""
return format_html("Hello {0}!", context['name'])
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg_role.py | 52 | 7909 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
type: bool
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
type: bool
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: str
sample: "No Async Mirror Group with the name."
"""
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| gpl-3.0 |
hhg2288/mkdocs | mkdocs/tests/cli_tests.py | 20 | 2079 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import unittest
import mock
from click.testing import CliRunner
from mkdocs import __main__ as cli
class CLITests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
@mock.patch('mkdocs.commands.serve.serve', autospec=True)
def test_serve(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_serve.call_count, 1)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build(self, mock_build):
result = self.runner.invoke(
cli.cli, ["build", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_build_verbose(self, mock_build):
result = self.runner.invoke(
cli.cli, ["--verbose", "build"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.commands.build.build', autospec=True)
def test_json(self, mock_build):
result = self.runner.invoke(
cli.cli, ["json", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.commands.new.new', autospec=True)
def test_new(self, mock_new):
result = self.runner.invoke(
cli.cli, ["new", "project"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_new.call_count, 1)
@mock.patch('mkdocs.commands.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy(self, mock_gh_deploy):
result = self.runner.invoke(
cli.cli, ["gh-deploy"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
| bsd-2-clause |
GoogleCloudPlatform/python-compat-runtime | appengine-compat/exported_appengine_sdk/google/appengine/_internal/django/core/cache/backends/memcached.py | 23 | 3612 | "Memcached cache backend"
import time
from google.appengine._internal.django.core.cache.backends.base import BaseCache, InvalidCacheBackendError
from google.appengine._internal.django.utils.encoding import smart_unicode, smart_str
try:
import cmemcache as memcache
import warnings
warnings.warn(
"Support for the 'cmemcache' library has been deprecated. Please use python-memcached instead.",
PendingDeprecationWarning
)
except ImportError:
try:
import memcache
except:
raise InvalidCacheBackendError("Memcached cache backend requires either the 'memcache' or 'cmemcache' library")
class CacheClass(BaseCache):
def __init__(self, server, params):
BaseCache.__init__(self, params)
self._cache = memcache.Client(server.split(';'))
def _get_memcache_timeout(self, timeout):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
timeout = timeout or self.default_timeout
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return timeout
def add(self, key, value, timeout=0):
if isinstance(value, unicode):
value = value.encode('utf-8')
return self._cache.add(smart_str(key), value, self._get_memcache_timeout(timeout))
def get(self, key, default=None):
val = self._cache.get(smart_str(key))
if val is None:
return default
return val
def set(self, key, value, timeout=0):
self._cache.set(smart_str(key), value, self._get_memcache_timeout(timeout))
def delete(self, key):
self._cache.delete(smart_str(key))
def get_many(self, keys):
return self._cache.get_multi(map(smart_str,keys))
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1):
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError. Cmemcache returns None. In both
# cases, we should raise a ValueError though.
except ValueError:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1):
try:
val = self._cache.decr(key, delta)
# python-memcache responds to decr on non-existent keys by
# raising a ValueError. Cmemcache returns None. In both
# cases, we should raise a ValueError though.
except ValueError:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=0):
safe_data = {}
for key, value in data.items():
if isinstance(value, unicode):
value = value.encode('utf-8')
safe_data[smart_str(key)] = value
self._cache.set_multi(safe_data, self._get_memcache_timeout(timeout))
def delete_many(self, keys):
self._cache.delete_multi(map(smart_str, keys))
def clear(self):
self._cache.flush_all()
| apache-2.0 |
credativ/pulp | server/test/unit/server/db/migrations/test_migration_0011.py | 17 | 1861 | import unittest
import mock
from pulp.server.db.migrate.models import MigrationModule
MIGRATION = 'pulp.server.db.migrations.0011_permissions_schema_change'
class TestMigration(unittest.TestCase):
@mock.patch('pulp.server.db.migrations.0011_permissions_schema_change.Permission')
def test_migrate(self, mock_connection):
"""
Test the schema change happens like it should.
"""
permissions_schema = [{"resource": "/", "id": "5356d55b37382030f4a80b5e",
"users": {"admin": [0, 1, 2, 3, 4]}}]
new_schema = [{"resource": "/", "id": "5356d55b37382030f4a80b5e",
"users": [{"username": "admin", "permissions": [0, 1, 2, 3, 4]}]}]
migration = MigrationModule(MIGRATION)._module
collection = mock_connection.get_collection.return_value
collection.find.return_value = permissions_schema
migration.migrate()
self.assertEquals(permissions_schema, new_schema)
@mock.patch('pulp.server.db.migrations.0011_permissions_schema_change.Permission')
def test_idempotence(self, mock_connection):
"""
Test the idempotence of the migration
"""
permissions_schema = [{"resource": "/", "id": "5356d55b37382030f4a80b5e",
"users": {"admin": [0, 1, 2, 3, 4]}}]
new_schema = [{"resource": "/", "id": "5356d55b37382030f4a80b5e",
"users": [{"username": "admin", "permissions": [0, 1, 2, 3, 4]}]}]
migration = MigrationModule(MIGRATION)._module
collection = mock_connection.get_collection.return_value
collection.find.return_value = permissions_schema
migration.migrate()
self.assertEquals(permissions_schema, new_schema)
migration.migrate()
self.assertEquals(permissions_schema, new_schema)
| gpl-2.0 |
ImmortalBen/TeamTalk | win-client/3rdParty/src/json/test/runjsontests.py | 175 | 5469 | import sys
import os
import os.path
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
break
if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
def safeGetLine( lines, index ):
index += -1
if index >= len(lines):
return ''
return lines[index].strip()
return """ Difference in %s at line %d:
Expected: '%s'
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) )
def safeReadFile( path ):
try:
return file( path, 'rt' ).read()
except IOError, e:
return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None,
use_valgrind=False, with_json_checker=False ):
if not input_dir:
input_dir = os.path.join( os.getcwd(), 'data' )
tests = glob( os.path.join( input_dir, '*.json' ) )
if with_json_checker:
test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
else:
test_jsonchecker = []
failed_tests = []
valgrind_path = use_valgrind and VALGRIND_CMD or ''
for input_path in tests + test_jsonchecker:
expect_failure = os.path.basename( input_path ).startswith( 'fail' )
is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
print 'TESTING:', input_path,
options = is_json_checker_test and '--json-checker' or ''
pipe = os.popen( "%s%s %s %s" % (
valgrind_path, jsontest_executable_path, options,
input_path) )
process_output = pipe.read()
status = pipe.close()
if is_json_checker_test:
if expect_failure:
if status is None:
print 'FAILED'
failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
safeReadFile(input_path)) )
else:
print 'OK'
else:
if status is not None:
print 'FAILED'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
print 'OK'
else:
base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output )
if status:
print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail:
print 'FAILED'
failed_tests.append( (input_path, detail) )
else:
print 'OK'
if failed_tests:
print
print 'Failure details:'
for failed_test in failed_tests:
print '* Test', failed_test[0]
print failed_test[1]
print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) )
return 1
else:
print 'All %d tests passed.' % len(tests)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.add_option("-c", "--with-json-checker",
action="store_true", dest="with_json_checker", default=False,
help="run all the tests from the official JSONChecker test suite of json.org")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) < 1 or len(args) > 2:
parser.error( 'Must provides at least path to jsontestrunner executable.' )
sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
if len(args) > 1:
input_path = os.path.normpath( os.path.abspath( args[1] ) )
else:
input_path = None
status = runAllTests( jsontest_executable_path, input_path,
use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
sys.exit( status )
if __name__ == '__main__':
main()
| apache-2.0 |
songmonit/CTTMSONLINE | addons/hr_payroll_account/wizard/__init__.py | 433 | 1116 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cryptobanana/ansible | lib/ansible/modules/network/cloudengine/ce_config.py | 12 | 11355 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_config
version_added: "2.4"
author: "QijunPan (@CloudEngine-Ansible)"
short_description: Manage Huawei CloudEngine configuration sections.
description:
- Huawei CloudEngine configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with CloudEngine configuration sections in
a deterministic way. This module works with CLI transports.
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device current-configuration. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the current-configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(current-configuration) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
type: bool
default: false
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current current-configuration to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current-configuration for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
defaults:
description:
- The I(defaults) argument will influence how the current-configuration
is collected from the device. When the value is set to true,
the command used to collect the current-configuration is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword.
required: false
type: bool
default: false
save:
description:
- The C(save) argument instructs the module to save the
current-configuration to saved-configuration. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
required: false
type: bool
default: false
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine config test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure top level configuration and save it"
ce_config:
lines: sysname {{ inventory_hostname }}
save: yes
provider: "{{ cli }}"
- name: "Configure acl configuration and save it"
ce_config:
lines:
- rule 10 permit source 1.1.1.1 32
- rule 20 permit source 2.2.2.2 32
- rule 30 permit source 3.3.3.3 32
- rule 40 permit source 4.4.4.4 32
- rule 50 permit source 5.5.5.5 32
parents: acl 2000
before: undo acl 2000
match: exact
provider: "{{ cli }}"
- name: "Configure acl configuration and save it"
ce_config:
lines:
- rule 10 permit source 1.1.1.1 32
- rule 20 permit source 2.2.2.2 32
- rule 30 permit source 3.3.3.3 32
- rule 40 permit source 4.4.4.4 32
parents: acl 2000
before: undo acl 2000
replace: block
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/ce_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, run_commands
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
from ansible.module_utils.network.cloudengine.ce import check_args as ce_check_args
def check_args(module, warnings):
ce_check_args(module, warnings)
def get_running_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags.append('include-default')
contents = get_config(module, flags=flags)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none':
config = get_running_config(module)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(ce_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
if any((module.params['src'], module.params['lines'])):
run(module, result)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['save'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ramadhane/odoo | addons/resource/tests/__init__.py | 261 | 1085 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_resource
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
davidzchen/tensorflow | tensorflow/python/data/kernel_tests/placement_test.py | 1 | 4839 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data placement within tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@combinations.generate(test_base.v2_eager_only_combinations())
class PlacementTest(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for tf.data placement within tf.functions.
Specifically, tf.data dataset tensors cannot be copied between devices. These
tests verify the ops are placed in a way that avoids this.
"""
def setUp(self):
super(PlacementTest, self).setUp()
# Grappler optimizations can affect whether the placement issues occur,
# since they may inadvertently rewrite nodes and edges in a way that removes
# cross-device copies.
config.set_optimizer_experimental_options({"disable_meta_optimizer": True})
def testWhileWithCapturedDataset(self):
dataset = dataset_ops.Dataset.range(10)
@def_function.function
def f():
total = constant_op.constant(0, dtypes.int64)
for _ in math_ops.range(1):
for elem in dataset:
total += elem
return total
self.assertEqual(f().numpy(), 45)
def testWhile(self):
self.skipTest("b/166625126")
@def_function.function
def f():
dataset = dataset_ops.Dataset.range(10)
total = constant_op.constant(0, dtypes.int64)
for _ in math_ops.range(1):
for elem in dataset:
total += elem
return total
self.assertEqual(f().numpy(), 45)
def testCondWithPlacement(self):
self.skipTest("b/166625126")
# When the cond op is explicitly placed, there shouldn't be cross-device
# copies.
@def_function.function
def f():
dataset = dataset_ops.Dataset.range(10)
def fn():
return dataset.map(lambda x: x+1)
c = constant_op.constant(2)
with ops.device("/cpu:0"):
a = control_flow_ops.cond(math_ops.equal(c, 2), fn, fn)
iterator = iter(a)
nxt = next(iterator)
return nxt
self.assertEqual(f(), 1)
def testCondWithColocation(self):
self.skipTest("b/166625126")
# When the cond op is colocated with the dataset, there shouldn't be
# cross-device copies.
@def_function.function
def f():
dataset = dataset_ops.Dataset.range(8)
def fn():
return dataset.map(lambda x: x+1)
c = constant_op.constant(2)
with ops.colocate_with(dataset._variant_tensor): # pylint:disable=protected-access
a = control_flow_ops.cond(math_ops.equal(c, 2), fn, fn)
iterator = iter(a)
nxt = next(iterator)
return nxt
self.assertEqual(f().numpy(), 1)
def testCond(self):
self.skipTest("b/166625126")
# Ideally, placer should avoid cross-device copies even when the cond op
# has no placement constraints.
@def_function.function
def f():
dataset = dataset_ops.Dataset.range(8)
def fn():
return dataset.map(lambda x: x+1)
c = constant_op.constant(2)
a = control_flow_ops.cond(math_ops.equal(c, 2), fn, fn)
iterator = iter(a)
nxt = next(iterator)
return nxt
self.assertEqual(f().numpy(), 1)
def testId(self):
self.skipTest("b/166625126")
# Ideally, placer should know that Identity(dataset) should be on the same
# device as the dataset.
@def_function.function
def f():
dataset = dataset_ops.Dataset.range(10)
dataset = array_ops.identity(dataset)
return dataset
f()
if __name__ == "__main__":
test.main()
| apache-2.0 |
ravello/ansible | lib/ansible/runner/action_plugins/patch.py | 93 | 2501 | # (c) 2015, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import os
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
remote_src = utils.boolean(options.get('remote_src', 'no'))
if src is None:
result = dict(failed=True, msg="src is required")
return ReturnData(conn=conn, comm_ok=False, result=result)
if remote_src:
return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
# Source is local
if '_original_file' in inject:
src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
else:
src = utils.path_dwim(self.runner.basedir, src)
if tmp is None or "-tmp-" not in tmp:
tmp = self.runner._make_tmp_path(conn)
tmp_src = conn.shell.join_path(tmp, os.path.basename(src))
conn.put_file(src, tmp_src)
if self.runner.become and self.runner.become_user != 'root':
if not self.runner.noop_on_check(inject):
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
new_module_args = dict(
src=tmp_src,
)
if self.runner.noop_on_check(inject):
new_module_args['CHECKMODE'] = True
module_args = utils.merge_module_args(module_args, new_module_args)
return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
| gpl-3.0 |
jbdubois/obus | src/obusgen/c/obus_c_enum.py | 1 | 6588 | #!/usr/bin/env python
#===============================================================================
# obusgen - obus source code generator.
#
# @file obus_c.py
#
# @brief obus c enum code generator
#
# @author [email protected]
#
# Copyright (c) 2013 Parrot S.A.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Parrot Company nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PARROT COMPANY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===============================================================================
from obus_c_utils import getObjectName
class ObusEnumWriter(object):
""" ObusEnum C class writer """
def __init__(self, enum):
self.enum = enum
def getName(self):
""" get full enum name """
return self.enum.obj.bus.name + "_" + \
self.enum.obj.name + "_" + \
self.enum.name
def getDriverSymbol(self):
""" get enum driver symbol name """
return self.enum.obj.bus.name + "_" + \
self.enum.obj.name + "_" + \
self.enum.name + "_driver"
def __writeEnumIsValid(self, out, header):
""" EnumIsValid method writer """
if header:
out.write("\n/**\n")
out.write(" * @brief check if value is one of "\
"enum %s values.\n", self.getName())
out.write(" *\n")
out.write(" * @param[in] value value to be checked.\n")
out.write(" *\n")
out.write(" * @retval 1 value is one of %s values.\n",
self.getName())
out.write(" * @retval 0 value is not one of %s values.\n",
self.getName())
out.write(" **/")
out.write("\nint %s_is_valid(int32_t value)%s\n", self.getName(),
(';' if header else ''))
if not header:
out.write("{\n")
out.write("\treturn (")
first = True
for v in self.enum.values.values():
if not first:
out.write(" ||\n\t\t")
else:
first = False
out.write("value == %s_%s", self.getName().upper(),
v.name.upper())
out.write(");\n}\n")
def __writeEnumStr(self, out, header):
""" EnumStr method writer """
if header:
out.write("\n/**\n")
out.write(" * @brief get %s string value.\n", self.getName())
out.write(" *\n")
out.write(" * @param[in] value %s value to be converted into string.\n", self.enum.name)
out.write(" *\n")
out.write(" * @retval non NULL constant string value.\n")
out.write(" **/")
out.write("\nconst char *%s_str(enum %s value)%s\n",
self.getName(), self.getName(),
(';' if header else ''))
if not header:
out.write("{\n")
out.write("\tconst char *str;\n")
out.write("\n")
out.write("\tswitch (value) {\n")
for v in self.enum.values.values():
out.write("\tcase %s_%s:\n", self.getName().upper(),
v.name.upper())
out.write("\t\tstr = \"%s\";\n", v.name.upper())
out.write("\t\tbreak;\n")
out.write("\tdefault:\n")
out.write("\t\tstr = \"???\";\n")
out.write("\t\tbreak;\n")
out.write("\t}\n")
out.write("\n")
out.write("\treturn str;\n")
out.write("}\n")
def writeDeclaration(self, out):
""" declare enumeration in a .h file """
# declare enum
out.write("\n/**\n")
out.write(" * @brief %s %s enumeration.\n", getObjectName(self.enum.obj), self.enum.name)
if self.enum.desc:
out.write(" *\n")
out.write(" * %s\n", self.enum.desc)
out.write(" **/\n")
out.write("enum %s {\n", self.getName())
for v in self.enum.values.values():
if v.desc:
out.write("\t/** %s */\n", v.desc)
out.write("\t%s_%s = %d,\n", self.getName().upper(),
v.name.upper(), v.value)
out.write("};\n")
# declare enum value is valid
self.__writeEnumIsValid(out, 1)
self.__writeEnumStr(out, 1)
def writeDriver(self, out):
""" write enum driver in a .c file """
# is_valid
self.__writeEnumIsValid(out, 0)
self.__writeEnumStr(out, 0)
# set_value
out.write("\nstatic void %s_set_value(void *addr, int32_t value)\n",
self.getName())
out.write("{\n")
out.write("\tenum %s *v = addr;\n", self.getName())
out.write("\t*v = (enum %s)value;\n", self.getName())
out.write("}\n")
# get_value
out.write("\nstatic int32_t %s_get_value(const void *addr)\n",
self.getName())
out.write("{\n")
out.write("\tconst enum %s *v = addr;\n", self.getName())
out.write("\treturn (int32_t)(*v);\n")
out.write("}\n")
# format
out.write("\nstatic void %s_format(const void *addr, "\
"char *buf, size_t size)\n", self.getName())
out.write("{\n")
out.write("\tconst enum %s *v = addr;\n", self.getName())
out.write("\n")
out.write("\tif (%s_is_valid((int32_t)(*v)))\n", self.getName())
out.write("\t\tsnprintf(buf, size, \"%%s\", %s_str(*v));\n", self.getName())
out.write("\telse\n")
out.write("\t\tsnprintf(buf, size, \"??? (%%d)\", (int32_t)(*v));\n")
out.write("}\n")
# declare enum driver
out.write("\nstatic const struct obus_enum_driver %s = {\n",
self.getDriverSymbol())
out.write("\t.name = \"%s\",\n", self.getName())
out.write("\t.size = sizeof(enum %s),\n", self.getName())
out.write("\t.default_value = %s_%s,\n", self.getName().upper(),
self.enum.default.upper())
out.write("\t.is_valid = %s_is_valid,\n", self.getName())
out.write("\t.set_value = %s_set_value,\n", self.getName())
out.write("\t.get_value = %s_get_value,\n", self.getName())
out.write("\t.format = %s_format\n", self.getName())
out.write("};\n")
| lgpl-2.1 |
2014c2g23/2015cda-w17 | static/Brython3.1.1-20150328-091302/Lib/configparser.py | 692 | 50025 | """Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException.
"""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException.
"""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", source]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", source]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %s' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except IOError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
| gpl-3.0 |
miguelparaiso/PracticaOdoo | addons/crm_helpdesk/crm_helpdesk.py | 182 | 7480 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
class crm_helpdesk(osv.osv):
""" Helpdesk Cases """
_name = "crm.helpdesk"
_description = "Helpdesk"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'description': fields.text('Description'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'user_id': fields.many2one('res.users', 'Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Responsible sales team. Define Responsible user and Email account for mail gateway.'),
'company_id': fields.many2one('res.company', 'Company'),
'date_closed': fields.datetime('Closed', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252 , help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway"),
'date': fields.datetime('Date'),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'ref2': fields.reference('Reference 2', selection=openerp.addons.base.res.res_request.referencable_models),
'channel_id': fields.many2one('crm.tracking.medium', 'Channel', help="Communication channel."),
'planned_revenue': fields.float('Planned Revenue'),
'planned_cost': fields.float('Planned Costs'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'probability': fields.float('Probability (%)'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.helpdesk')]"),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'state': fields.selection(
[('draft', 'New'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Closed'),
('cancel', 'Cancelled')], 'Status', readonly=True, track_visibility='onchange',
help='The status is set to \'Draft\', when a case is created.\
\nIf the case is in progress the status is set to \'Open\'.\
\nWhen the case is over, the status is set to \'Done\'.\
\nIf the case needs to be reviewed then the status is set to \'Pending\'.'),
}
_defaults = {
'active': lambda *a: 1,
'user_id': lambda s, cr, uid, c: uid,
'state': lambda *a: 'draft',
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '1',
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'email_from': partner.email,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') in ['draft', 'open'] and not values.get('date_open'):
values['date_open'] = fields.datetime.now()
elif values.get('state') == 'done' and not values.get('date_closed'):
values['date_closed'] = fields.datetime.now()
return super(crm_helpdesk, self).write(cr, uid, ids, values, context=context)
def case_escalate(self, cr, uid, ids, context=None):
""" Escalates case to parent level """
data = {'active': True}
for case in self.browse(cr, uid, ids, context=context):
if case.section_id and case.section_id.parent_id:
parent_id = case.section_id.parent_id
data['section_id'] = parent_id.id
if parent_id.change_responsible and parent_id.user_id:
data['user_id'] = parent_id.user_id.id
else:
raise osv.except_osv(_('Error!'), _('You can not escalate, you are already at the top level regarding your sales-team category.'))
self.write(cr, uid, [case.id], data, context=context)
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'user_id': False,
'partner_id': msg.get('author_id', False),
}
defaults.update(custom_values)
return super(crm_helpdesk, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ajose01/rethinkdb | external/gtest_1.6.0/test/gtest_xml_test_utils.py | 356 | 8722 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = '[email protected] (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element ' + actual_node.tagName)
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ' %
(expected_attr.name, actual_node.tagName))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| agpl-3.0 |
jaywunder/jacobwunder.com | public/lib/threejslib/utils/exporters/blender/addons/io_three/exporter/api/material.py | 10 | 5773 | from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
def inner(name, *args, **kwargs):
if isinstance(name, types.Material):
material = name
else:
material = data.materials[name]
return func(material, *args, **kwargs)
return inner
@_material
def ambient_color(material):
logger.debug('material.ambient_color(%s)', material)
diffuse = diffuse_color(material)
return (material.ambient * diffuse[0],
material.ambient * diffuse[1],
material.ambient * diffuse[2])
@_material
def blending(material):
logger.debug('material.blending(%s)', material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug('No THREE_blending_type attribute found')
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
logger.debug('material.bump_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
return normal_scale(material)
@_material
def depth_test(material):
logger.debug('material.depth_test(%s)', material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug('No THREE_depth_test attribute found')
test = True
return test
@_material
def depth_write(material):
logger.debug('material.depth_write(%s)', material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug('No THREE_depth_write attribute found')
write = True
return write
@_material
def diffuse_color(material):
logger.debug('material.diffuse_color(%s)', material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
logger.debug('material.diffuse_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
logger.debug('material.emissive_color(%s)', material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
logger.debug('material.light_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
logger.debug('material.normal_scale(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return texture.normal_factor
@_material
def normal_map(material):
logger.debug('material.normal_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
logger.debug('material.opacity(%s)', material)
return round(material.alpha - 1.0, 2);
@_material
def shading(material):
logger.debug('material.shading(%s)', material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
logger.debug('material.specular_coef(%s)', material)
return material.specular_hardness
@_material
def specular_color(material):
logger.debug('material.specular_color(%s)', material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
logger.debug('material.specular_map(%s)', material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
logger.debug('material.transparent(%s)', material)
return material.use_transparency
@_material
def type(material):
logger.debug('material.type(%s)', material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
logger.debug('material.use_vertex_colors(%s)', material)
return material.use_vertex_color_paint
def used_materials():
logger.debug('material.used_materials()')
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
logger.debug('material.visible(%s)', material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug('No THREE_visible attribute found')
vis = True
return vis
@_material
def wireframe(material):
logger.debug('material.wireframe(%s)', material)
return material.type == WIRE
def _valid_textures(material):
for texture in material.texture_slots:
if not texture: continue
if texture.texture.type != IMAGE: continue
logger.debug('Valid texture found %s', texture)
yield texture
| mit |
PsychoTV/PsychoTeam.repository | plugin.video.p2psport/default.py | 6 | 11222 | # -*- coding: utf-8 -*-
import re
import urllib2
import HTMLParser
import urllib,urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
import requests
from BeautifulSoup import BeautifulSoup as bs
from utils.webutils import *
from scrapers import *
try:
from addon.common.addon import Addon
from addon.common.net import Net
except:
print 'Failed to import script.module.addon.common'
xbmcgui.Dialog().ok("Import Failure", "Failed to import addon.common", "A component needed by P2P Sport is missing on your system", "Please visit www.tvaddons.ag.com for support")
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
params=urlparse.parse_qs(sys.argv[2][1:])
addon = Addon('plugin.video.p2psport', sys.argv)
AddonPath = addon.get_path()
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
mode = args.get('mode', None)
my_addon = xbmcaddon.Addon()
if mode is None:
url = build_url({'mode': 'av'})
li = xbmcgui.ListItem('Arenavision.in',iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'roja'})
li = xbmcgui.ListItem('Rojadirecta.me',iconImage='http://www.rojadirecta.me/static/roja.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ws'})
li = xbmcgui.ListItem('Livefootball.ws',iconImage='http://www.userlogos.org/files/logos/clubber/football_ws___.PNG')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'livefootballvideo.com'})
li = xbmcgui.ListItem('Livefootballvideo.com',iconImage='https://pbs.twimg.com/profile_images/3162217818/2ee4b2f728ef9867d4e1d86e17bb2ef5.jpeg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'livefooty'})
li = xbmcgui.ListItem('Livefootballol.com',iconImage='http://www.livefootballol.com/images/logo.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
# url = build_url({'mode': 'livefootF1'})
# li = xbmcgui.ListItem('Livefootballol.com (F1)',iconImage='http://www.livefootballol.com/images/logo.png')
# xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
# listitem=li, isFolder=True)
url = build_url({'mode': 'phace'})
li = xbmcgui.ListItem('Sport Channels 1',iconImage='http://cdn.streamcentral.netdna-cdn.com/images/software/acestreamlogo.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'serbplus'})
li = xbmcgui.ListItem('Sport Channels 2',iconImage='http://cdn.streamcentral.netdna-cdn.com/images/software/acestreamlogo.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ttv', 'url':'http://livehdstreams.com/trash/ttv-list/ttv.sport.player.m3u'})
li = xbmcgui.ListItem('Sport Channels 3',iconImage='http://cdn.streamcentral.netdna-cdn.com/images/software/acestreamlogo.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': '1ttv'})
li = xbmcgui.ListItem('1torrent.tv',iconImage='http://s3.hostingkartinok.com/uploads/images/2013/06/6e4452212490ac0a66e358c97707ef77.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ttv_sport', 'url':'http://livehdstreams.com/trash/ttv-list/ttv.sport.player.m3u'})
li = xbmcgui.ListItem('Torrent-tv.ru (Sport)',iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'ttv_all', 'url':'http://livehdstreams.com/trash/ttv-list/ttv.sport.player.m3u'})
li = xbmcgui.ListItem('Torrent-tv.ru',iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
# url = build_url({'mode': 'soccer188'})
# li = xbmcgui.ListItem('Soccer188',iconImage='')
# xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
# listitem=li, isFolder=True)
# url = build_url({'mode': '247'})
# li = xbmcgui.ListItem('Livesports 24/7',iconImage='http://i.imgur.com/Mv5ySt4.jpg')
# xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
# listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0]=='streamhub':
streamhub_cats()
elif mode[0]=='open_streamhub_cat':
url=params['url'][0]
open_streamhub_cat(url)
elif mode[0]=='open_streamhub_event':
url=params['url'][0]
open_streamhub_event(url)
elif mode[0]=='soccer188':
soccer188()
elif mode[0]=='play_sopc':
url=params['url'][0]
name=params['name'][0]
play_sop(url,name)
elif mode[0]=='ttv_sport':
ttv_sport()
elif mode[0]=='serbplus':
serbplus()
elif mode[0]=='play_serb':
url=params['url'][0]
name=params['name'][0]
resolve_roja(url,name)
elif mode[0]=='phace':
phace()
elif mode[0]=='247':
url = build_url({'mode': 'schedule_247'})
li = xbmcgui.ListItem('Event schedule',iconImage='http://i.imgur.com/Mv5ySt4.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
url = build_url({'mode': 'all_247'})
li = xbmcgui.ListItem('All channels',iconImage='http://i.imgur.com/Mv5ySt4.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0]=='open_247_event':
url=params['url'][0]
open_247_event(url)
elif mode[0]=='all_247':
all_live247()
elif mode[0]=='schedule_247':
schedule247()
elif mode[0]=='open_247_stream':
url='http://pilkalive.weebly.com'+params['url'][0]
name=params['name'][0]
play247(url,name)
elif mode[0]=='livefootballvideo.com':
livefoot_com()
elif mode[0]=='ttv_all':
ttv_cats()
elif mode[0]=='open_ttv_cat':
cat=params['cat'][0]
tag=params['channels'][0]
get_ttv_cat(cat,tag)
elif mode[0]=='1ttv':
one_ttv_cats()
elif mode[0]=='open_1ttv_cat':
tag=params['tag'][0]
name=params['name'][0]
open_1ttv_cat(tag,name)
elif mode[0]=='open_1ttv_channel':
url=params['url'][0]
open_1ttv_channel(url)
elif mode[0]=='ws':
livefootballws_events()
elif mode[0]=='roja':
rojadirecta_events()
elif mode[0]=='ttv':
get_ttv()
elif mode[0]=='open_ttv_stream':
url=params['url'][0]
name=params['name'][0]
open_ttv_stream(url,name)
elif mode[0]=='av':
url = build_url({'mode': 'av_schedule'})
li = xbmcgui.ListItem('[COLOR orange]Schedule / Agenda[/COLOR]',iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(10):
url = build_url({'mode': 'av_ace','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i+1)})
li = xbmcgui.ListItem('Arenavision %s'%(i+1),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(11,13):
url = build_url({'mode': 'av_rand','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i)})
li = xbmcgui.ListItem('Arenavision %s'%(i),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(13,23):
url = build_url({'mode': 'av_sop','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i)})
li = xbmcgui.ListItem('Arenavision %s'%(i),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
for i in range(23,25):
url = build_url({'mode': 'av_rand','url':'av%s'%(str(i+1)), 'name':'Arenavision %s'%(i)})
li = xbmcgui.ListItem('Arenavision %s'%(i),iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0]=='av_ace':
url='http://arenavision.in/'+params['url'][0]
name=params['name'][0]
try:
play_arena(url,name)
except:
play_arena_sop(url,name)
elif mode[0]=='av_sop':
url='http://arenavision.in/'+params['url'][0]
name=params['name'][0]
try:
play_arena_sop(url,name)
except:
play_arena(url,name)
elif mode[0]=='av_rand':
url='http://arenavision.in/'+params['url'][0]
name=params['name'][0]
try:
play_arena(url,name)
except:
play_arena_sop(url,name)
elif mode[0]=='open_roja_stream':
url='http://www.rojadirecta.me/'+params['url'][0]
name=params['name'][0]
resolve_roja(url,name)
elif mode[0]=='av_schedule':
arenavision_schedule()
elif mode[0]=='av_open':
channels=((params['channels'][0]).replace('[','').replace(']','').replace("'",'').replace('u','').replace(' ','')).split(',')
name=params['name'][0]
sources=[]
for i in range(len(channels)):
title='AV%s'%channels[i]
sources+=[title]
dialog = xbmcgui.Dialog()
index = dialog.select('Select a channel:', sources)
if index>-1:
url=sources[index]
url='http://arenavision.in/'+url.lower()
try: play_arena(url,name)
except: play_arena_sop(url,name)
elif mode[0]=='livefooty':
livefootballol()
elif mode[0]=='open_livefoot':
url='http://www.livefootballol.com'+params['url'][0]
name=params['name'][0]
get_livefoot(url,name)
elif mode[0]=='open_livefoot_stream':
url=params['url'][0]
name=params['name'][0]
play_livefoot(url,name)
elif mode[0]=='livefootF1':
livefootF1()
elif mode[0]=='open_livefoot.com_stream':
url=params['url'][0]
name=params['name'][0]
play_livefoot(url,name)
open_com_event(name,url)
elif mode[0]=='open_ws_stream':
url=params['url'][0]
livefootballws_streams(url) | gpl-2.0 |
PatrickChrist/scikit-learn | sklearn/utils/mocking.py | 267 | 2064 | import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from .testing import assert_true
from .validation import _num_samples, check_array
class ArraySlicingWrapper(object):
def __init__(self, array):
self.array = array
def __getitem__(self, aslice):
return MockDataFrame(self.array[aslice])
class MockDataFrame(object):
# have shape an length but don't support indexing.
def __init__(self, array):
self.array = array
self.shape = array.shape
self.ndim = array.ndim
# ugly hack to make iloc work.
self.iloc = ArraySlicingWrapper(array)
def __len__(self):
return len(self.array)
def __array__(self):
# Pandas data frames also are array-like: we want to make sure that
# input validation in cross-validation does not try to call that
# method.
return self.array
class CheckingClassifier(BaseEstimator, ClassifierMixin):
"""Dummy classifier to test pipelining and meta-estimators.
Checks some property of X and y in fit / predict.
This allows testing whether pipelines / cross-validation or metaestimators
changed the input.
"""
def __init__(self, check_y=None,
check_X=None, foo_param=0):
self.check_y = check_y
self.check_X = check_X
self.foo_param = foo_param
def fit(self, X, y):
assert_true(len(X) == len(y))
if self.check_X is not None:
assert_true(self.check_X(X))
if self.check_y is not None:
assert_true(self.check_y(y))
self.classes_ = np.unique(check_array(y, ensure_2d=False,
allow_nd=True))
return self
def predict(self, T):
if self.check_X is not None:
assert_true(self.check_X(T))
return self.classes_[np.zeros(_num_samples(T), dtype=np.int)]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
| bsd-3-clause |
youdonghai/intellij-community | python/lib/Lib/encodings/iso2022_jp_1.py | 816 | 1061 | #
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
noroutine/ansible | contrib/inventory/nsot.py | 117 | 9825 | #!/usr/bin/env python
'''
nsot
====
Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
Features
--------
* Define host groups in form of NSoT device attribute criteria
* All parameters defined by the spec as of 2015-09-05 are supported.
+ ``--list``: Returns JSON hash of host groups -> hosts and top-level
``_meta`` -> ``hostvars`` which correspond to all device attributes.
Group vars can be specified in the YAML configuration, noted below.
+ ``--host <hostname>``: Returns JSON hash where every item is a device
attribute.
* In addition to all attributes assigned to resource being returned, script
will also append ``site_id`` and ``id`` as facts to utilize.
Confguration
------------
Since it'd be annoying and failure prone to guess where you're configuration
file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
This file should adhere to the YAML spec. All top-level variable must be
desired Ansible group-name hashed with single 'query' item to define the NSoT
attribute query.
Queries follow the normal NSoT query syntax, `shown here`_
.. _shown here: https://github.com/dropbox/pynsot#set-queries
.. code:: yaml
routers:
query: 'deviceType=ROUTER'
vars:
a: b
c: d
juniper_fw:
query: 'deviceType=FIREWALL manufacturer=JUNIPER'
not_f10:
query: '-manufacturer=FORCE10'
The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
cli would, so make sure that's configured appropriately.
.. note::
Attributes I'm showing above are influenced from ones that the Trigger
project likes. As is the spirit of NSoT, use whichever attributes work best
for your workflow.
If config file is blank or absent, the following default groups will be
created:
* ``routers``: deviceType=ROUTER
* ``switches``: deviceType=SWITCH
* ``firewalls``: deviceType=FIREWALL
These are likely not useful for everyone so please use the configuration. :)
.. note::
By default, resources will only be returned for what your default
site is set for in your ``~/.pynsotrc``.
If you want to specify, add an extra key under the group for ``site: n``.
Output Examples
---------------
Here are some examples shown from just calling the command directly::
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
{
"routers": {
"hosts": [
"test1.example.com"
],
"vars": {
"cool_level": "very",
"group": "routers"
}
},
"firewalls": {
"hosts": [
"test2.example.com"
],
"vars": {
"cool_level": "enough",
"group": "firewalls"
}
},
"_meta": {
"hostvars": {
"test2.example.com": {
"make": "SRX",
"site_id": 1,
"id": 108
},
"test1.example.com": {
"make": "MX80",
"site_id": 1,
"id": 107
}
}
},
"rtr_and_fw": {
"hosts": [
"test1.example.com",
"test2.example.com"
],
"vars": {}
}
}
$ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
{
"make": "MX80",
"site_id": 1,
"id": 107
}
'''
from __future__ import print_function
import sys
import os
import pkg_resources
import argparse
import json
import yaml
from textwrap import dedent
from pynsot.client import get_api_client
from pynsot.app import HttpServerError
from click.exceptions import UsageError
from six import string_types
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
class NSoTInventory(object):
'''NSoT Client object for gather inventory'''
def __init__(self):
self.config = dict()
config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
if config_env:
try:
config_file = os.path.abspath(config_env)
except IOError: # If file non-existent, use default config
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
with open(config_file) as f:
try:
self.config.update(yaml.safe_load(f))
except TypeError: # If empty file, use default config
warning('Empty config file')
self._config_default()
except Exception as e:
sys.exit('%s\n' % e)
else: # Use defaults if env var missing
self._config_default()
self.groups = self.config.keys()
self.client = get_api_client()
self._meta = {'hostvars': dict()}
def _config_default(self):
default_yaml = '''
---
routers:
query: deviceType=ROUTER
switches:
query: deviceType=SWITCH
firewalls:
query: deviceType=FIREWALL
'''
self.config = yaml.safe_load(dedent(default_yaml))
def do_list(self):
'''Direct callback for when ``--list`` is provided
Relies on the configuration generated from init to run
_inventory_group()
'''
inventory = dict()
for group, contents in self.config.items():
group_response = self._inventory_group(group, contents)
inventory.update(group_response)
inventory.update({'_meta': self._meta})
return json.dumps(inventory)
def do_host(self, host):
return json.dumps(self._hostvars(host))
def _hostvars(self, host):
'''Return dictionary of all device attributes
Depending on number of devices in NSoT, could be rather slow since this
has to request every device resource to filter through
'''
device = [i for i in self.client.devices.get()
if host in i['hostname']][0]
attributes = device['attributes']
attributes.update({'site_id': device['site_id'], 'id': device['id']})
return attributes
def _inventory_group(self, group, contents):
'''Takes a group and returns inventory for it as dict
:param group: Group name
:type group: str
:param contents: The contents of the group's YAML config
:type contents: dict
contents param should look like::
{
'query': 'xx',
'vars':
'a': 'b'
}
Will return something like::
{ group: {
hosts: [],
vars: {},
}
'''
query = contents.get('query')
hostvars = contents.get('vars', dict())
site = contents.get('site', dict())
obj = {group: dict()}
obj[group]['hosts'] = []
obj[group]['vars'] = hostvars
try:
assert isinstance(query, string_types)
except:
sys.exit('ERR: Group queries must be a single string\n'
' Group: %s\n'
' Query: %s\n' % (group, query)
)
try:
if site:
site = self.client.sites(site)
devices = site.devices.query.get(query=query)
else:
devices = self.client.devices.query.get(query=query)
except HttpServerError as e:
if '500' in str(e.response):
_site = 'Correct site id?'
_attr = 'Queried attributes actually exist?'
questions = _site + '\n' + _attr
sys.exit('ERR: 500 from server.\n%s' % questions)
else:
raise
except UsageError:
sys.exit('ERR: Could not connect to server. Running?')
# Would do a list comprehension here, but would like to save code/time
# and also acquire attributes in this step
for host in devices:
# Iterate through each device that matches query, assign hostname
# to the group's hosts array and then use this single iteration as
# a chance to update self._meta which will be used in the final
# return
hostname = host['hostname']
obj[group]['hosts'].append(hostname)
attributes = host['attributes']
attributes.update({'site_id': host['site_id'], 'id': host['id']})
self._meta['hostvars'].update({hostname: attributes})
return obj
def parse_args():
desc = __doc__.splitlines()[4] # Just to avoid being redundant
# Establish parser with options and error out if no action provided
parser = argparse.ArgumentParser(
description=desc,
conflict_handler='resolve',
)
# Arguments
#
# Currently accepting (--list | -l) and (--host | -h)
# These must not be allowed together
parser.add_argument(
'--list', '-l',
help='Print JSON object containing hosts to STDOUT',
action='store_true',
dest='list_', # Avoiding syntax highlighting for list
)
parser.add_argument(
'--host', '-h',
help='Print JSON object containing hostvars for <host>',
action='store',
)
args = parser.parse_args()
if not args.list_ and not args.host: # Require at least one option
parser.exit(status=1, message='No action requested')
if args.list_ and args.host: # Do not allow multiple options
parser.exit(status=1, message='Too many actions requested')
return args
def main():
'''Set up argument handling and callback routing'''
args = parse_args()
client = NSoTInventory()
# Callback condition
if args.list_:
print(client.do_list())
elif args.host:
print(client.do_host(args.host))
if __name__ == '__main__':
main()
| gpl-3.0 |
mSenyor/sl4a | python/src/Lib/lib-tk/tkMessageBox.py | 52 | 3635 | # tk common message boxes
#
# this module provides an interface to the native message boxes
# available in Tk 4.2 and newer.
#
# written by Fredrik Lundh, May 1997
#
#
# options (all have default values):
#
# - default: which button to make default (one of the reply codes)
#
# - icon: which icon to display (see below)
#
# - message: the message to display
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
# - type: dialog type; that is, which buttons to display (see below)
#
from tkCommonDialog import Dialog
#
# constants
# icons
ERROR = "error"
INFO = "info"
QUESTION = "question"
WARNING = "warning"
# types
ABORTRETRYIGNORE = "abortretryignore"
OK = "ok"
OKCANCEL = "okcancel"
RETRYCANCEL = "retrycancel"
YESNO = "yesno"
YESNOCANCEL = "yesnocancel"
# replies
ABORT = "abort"
RETRY = "retry"
IGNORE = "ignore"
OK = "ok"
CANCEL = "cancel"
YES = "yes"
NO = "no"
#
# message dialog class
class Message(Dialog):
"A message box"
command = "tk_messageBox"
#
# convenience stuff
# Rename _icon and _type options to allow overriding them in options
def _show(title=None, message=None, _icon=None, _type=None, **options):
if _icon and "icon" not in options: options["icon"] = _icon
if _type and "type" not in options: options["type"] = _type
if title: options["title"] = title
if message: options["message"] = message
res = Message(**options).show()
# In some Tcl installations, Tcl converts yes/no into a boolean
if isinstance(res, bool):
if res: return YES
return NO
return res
def showinfo(title=None, message=None, **options):
"Show an info message"
return _show(title, message, INFO, OK, **options)
def showwarning(title=None, message=None, **options):
"Show a warning message"
return _show(title, message, WARNING, OK, **options)
def showerror(title=None, message=None, **options):
"Show an error message"
return _show(title, message, ERROR, OK, **options)
def askquestion(title=None, message=None, **options):
"Ask a question"
return _show(title, message, QUESTION, YESNO, **options)
def askokcancel(title=None, message=None, **options):
"Ask if operation should proceed; return true if the answer is ok"
s = _show(title, message, QUESTION, OKCANCEL, **options)
return s == OK
def askyesno(title=None, message=None, **options):
"Ask a question; return true if the answer is yes"
s = _show(title, message, QUESTION, YESNO, **options)
return s == YES
def askyesnocancel(title=None, message=None, **options):
"Ask a question; return true if the answer is yes, None if cancelled."
s = _show(title, message, QUESTION, YESNOCANCEL, **options)
# s might be a Tcl index object, so convert it to a string
s = str(s)
if s == CANCEL:
return None
return s == YES
def askretrycancel(title=None, message=None, **options):
"Ask if operation should be retried; return true if the answer is yes"
s = _show(title, message, WARNING, RETRYCANCEL, **options)
return s == RETRY
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print "info", showinfo("Spam", "Egg Information")
print "warning", showwarning("Spam", "Egg Warning")
print "error", showerror("Spam", "Egg Alert")
print "question", askquestion("Spam", "Question?")
print "proceed", askokcancel("Spam", "Proceed?")
print "yes/no", askyesno("Spam", "Got it?")
print "yes/no/cancel", askyesnocancel("Spam", "Want it?")
print "try again", askretrycancel("Spam", "Try again?")
| apache-2.0 |
sharhar/USB-Thing | UpdaterFiles/Lib/python-3.5.1.amd64/Lib/pkgutil.py | 9 | 21201 | """Utilities to support packages."""
from functools import singledispatch as simplegeneric
import importlib
import importlib.util
import importlib.machinery
import os
import os.path
import sys
from types import ModuleType
import warnings
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules', 'get_data',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def _get_spec(finder, name):
"""Return the finder-specific module spec."""
# Works with legacy finders.
try:
find_spec = finder.find_spec
except AttributeError:
loader = finder.find_module(name)
if loader is None:
return None
return importlib.util.spec_from_loader(name, loader)
else:
return find_spec(name)
def read_code(stream):
# This helper is needed in order for the PEP 302 emulation to
# correctly handle compiled files
import marshal
magic = stream.read(4)
if magic != importlib.util.MAGIC_NUMBER:
return None
stream.read(8) # Skip timestamp and size
return marshal.load(stream)
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
yield from walk_packages(path, name+'.', onerror)
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
# Implement a file walker for the normal importlib path hook
def _iter_file_finder_modules(importer, prefix=''):
if importer.path is None or not os.path.isdir(importer.path):
return
yielded = {}
import inspect
try:
filenames = os.listdir(importer.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(importer.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
try:
dircontents = os.listdir(path)
except OSError:
# ignore unreadable directories like import does
dircontents = []
for fn in dircontents:
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
iter_importer_modules.register(
importlib.machinery.FileFinder, _iter_file_finder_modules)
def _import_imp():
global imp
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
imp = importlib.import_module('imp')
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
global imp
warnings.warn("This emulation is deprecated, use 'importlib' instead",
DeprecationWarning)
_import_imp()
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
try:
filenames = os.listdir(self.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
try:
dircontents = os.listdir(path)
except OSError:
# ignore unreadable directories like import does
dircontents = []
for fn in dircontents:
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
warnings.warn("This emulation is deprecated, use 'importlib' instead",
DeprecationWarning)
_import_imp()
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
with open(pathname, "rb") as file:
return file.read()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'r')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_code(self.file)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
fullname = self._fix_name(fullname)
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
with open(self.filename[:-1], 'r') as f:
self.source = f.read()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
finder = ImpImporter(self.filename)
spec = _get_spec(finder, '__init__')
return spec.loader
def get_filename(self, fullname=None):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
if mod_type==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif mod_type in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = sorted(zipimport._zip_directory_cache[importer.archive])
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a PEP 302 importer for the given path item
The returned importer is cached in sys.path_importer_cache
if it was newly created by a path hook.
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
sys.path_importer_cache.setdefault(path_item, importer)
break
except ImportError:
pass
else:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be all registered top level
importers (i.e. those on both sys.meta_path and sys.path_hooks).
If the named module is in a package, that package is imported as a side
effect of invoking this function.
If no module name is specified, all top level importers are produced.
"""
if fullname.startswith('.'):
msg = "Relative module name {!r} not supported".format(fullname)
raise ImportError(msg)
if '.' in fullname:
# Get the containing package's __path__
pkg_name = fullname.rpartition(".")[0]
pkg = importlib.import_module(pkg_name)
path = getattr(pkg, '__path__', None)
if path is None:
return
else:
yield from sys.meta_path
path = sys.path
for item in path:
yield get_importer(item)
def get_loader(module_or_name):
"""Get a PEP 302 "loader" object for module_or_name
Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if module_or_name is None:
return None
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
if getattr(module, '__spec__', None) is None:
return None
fullname = module.__name__
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
This is a backwards compatibility wrapper around
importlib.util.find_spec that converts most failures to ImportError
and only returns the loader rather than the full spec
"""
if fullname.startswith('.'):
msg = "Relative module name {!r} not supported".format(fullname)
raise ImportError(msg)
try:
spec = importlib.util.find_spec(fullname)
except (ImportError, AttributeError, TypeError, ValueError) as ex:
# This hack fixes an impedance mismatch between pkgutil and
# importlib, where the latter raises other errors for cases where
# pkgutil previously raised ImportError
msg = "Error while finding loader for {!r} ({}: {})"
raise ImportError(msg.format(fullname, type(ex), ex)) from ex
return spec.loader if spec is not None else None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
sname_pkg = name + ".pkg"
path = path[:] # Start with a copy of the existing path
parent_package, _, final_name = name.rpartition('.')
if parent_package:
try:
search_path = sys.modules[parent_package].__path__
except (KeyError, AttributeError):
# We can't do anything: find_loader() returns None when
# passed a dotted name.
return path
else:
search_path = sys.path
for dir in search_path:
if not isinstance(dir, str):
continue
finder = get_importer(dir)
if finder is not None:
portions = []
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(final_name)
if spec is not None:
portions = spec.submodule_search_locations or []
# Is this finder PEP 420 compliant?
elif hasattr(finder, 'find_loader'):
_, portions = finder.find_loader(final_name)
for portion in portions:
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
if portion not in path:
path.append(portion)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except OSError as msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
with f:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
return path
def get_data(package, resource):
"""Get a resource from a package.
This is a wrapper round the PEP 302 loader get_data API. The package
argument should be the name of a package, in standard module format
(foo.bar). The resource argument should be in the form of a relative
filename, using '/' as the path separator. The parent directory name '..'
is not allowed, and nor is a rooted name (starting with a '/').
The function returns a binary string, which is the contents of the
specified resource.
For packages located in the filesystem, which have already been imported,
this is the rough equivalent of
d = os.path.dirname(sys.modules[package].__file__)
data = open(os.path.join(d, resource), 'rb').read()
If the package cannot be located or loaded, or it uses a PEP 302 loader
which does not support get_data(), then None is returned.
"""
spec = importlib.util.find_spec(package)
if spec is None:
return None
loader = spec.loader
if loader is None or not hasattr(loader, 'get_data'):
return None
# XXX needs test
mod = (sys.modules.get(package) or
importlib._bootstrap._load(spec))
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
| apache-2.0 |
cvium/Flexget | flexget/utils/sqlalchemy_utils.py | 18 | 4634 | """
Miscellaneous SQLAlchemy helpers.
"""
from __future__ import unicode_literals, division, absolute_import
import logging
import sqlalchemy
from sqlalchemy import ColumnDefault, Sequence, Index
from sqlalchemy.types import TypeEngine
from sqlalchemy.schema import Table, MetaData
from sqlalchemy.exc import NoSuchTableError, OperationalError
log = logging.getLogger('sql_utils')
def table_exists(name, session):
"""
Use SQLAlchemy reflect to check table existences.
:param string name: Table name to check
:param Session session: Session to use
:return: True if table exists, False otherwise
:rtype: bool
"""
try:
table_schema(name, session)
except NoSuchTableError:
return False
return True
def table_schema(name, session):
"""
:returns: Table schema using SQLAlchemy reflect as it currently exists in the db
:rtype: Table
"""
return Table(name, MetaData(bind=session.bind), autoload=True)
def table_columns(table, session):
"""
:param string table: Name of table or table schema
:param Session session: SQLAlchemy Session
:returns: List of column names in the table or empty list
"""
res = []
if isinstance(table, basestring):
table = table_schema(table, session)
for column in table.columns:
res.append(column.name)
return res
def table_add_column(table, name, col_type, session, default=None):
"""Adds a column to a table
.. warning:: Uses raw statements, probably needs to be changed in
order to work on other databases besides SQLite
:param string table: Table to add column to (can be name or schema)
:param string name: Name of new column to add
:param col_type: The sqlalchemy column type to add
:param Session session: SQLAlchemy Session to do the alteration
:param default: Default value for the created column (optional)
"""
if isinstance(table, basestring):
table = table_schema(table, session)
if name in table_columns(table, session):
# If the column already exists, we don't have to do anything.
return
# Add the column to the table
if not isinstance(col_type, TypeEngine):
# If we got a type class instead of an instance of one, instantiate it
col_type = col_type()
type_string = session.bind.engine.dialect.type_compiler.process(col_type)
statement = 'ALTER TABLE %s ADD %s %s' % (table.name, name, type_string)
session.execute(statement)
# Update the table with the default value if given
if default is not None:
# Get the new schema with added column
table = table_schema(table.name, session)
if not isinstance(default, (ColumnDefault, Sequence)):
default = ColumnDefault(default)
default._set_parent(getattr(table.c, name))
statement = table.update().values({name: default.execute(bind=session.bind)})
session.execute(statement)
def drop_tables(names, session):
"""Takes a list of table names and drops them from the database if they exist."""
metadata = MetaData()
metadata.reflect(bind=session.bind)
for table in metadata.sorted_tables:
if table.name in names:
table.drop()
def get_index_by_name(table, name):
"""
Find declaratively defined index from table by name
:param table: Table object
:param string name: Name of the index to get
:return: Index object
"""
for index in table.indexes:
if index.name == name:
return index
def create_index(table_name, session, *column_names):
"""
Creates an index on specified `columns` in `table_name`
:param table_name: Name of table to create the index on.
:param session: Session object which should be used
:param column_names: The names of the columns that should belong to this index.
"""
index_name = '_'.join(['ix', table_name] + list(column_names))
table = table_schema(table_name, session)
columns = [getattr(table.c, column) for column in column_names]
try:
Index(index_name, *columns).create(bind=session.bind)
except OperationalError:
log.debug('Error creating index.', exc_info=True)
class ContextSession(sqlalchemy.orm.Session):
""":class:`sqlalchemy.orm.Session` which can be used as context manager"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
self.close()
| mit |
SolaWing/ycmd | cpp/ycm/tests/gmock/gtest/test/gtest_uninitialized_test.py | 2901 | 2480 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
GeoscienceAustralia/PyRate | setup.py | 1 | 4217 | # This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from setuptools.command.test import test as TestCommand
from subprocess import check_output
import platform
import setuptools
__version__ = "0.5.0"
# Get requirements (and dev requirements for testing) from requirements
# txt files. Also ensure we are using correct GDAL version.
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('requirements-test.txt') as f:
test_requirements = f.read().splitlines()
with open('requirements-dev.txt') as f:
dev_requirements = f.read().splitlines()
if platform.system() in 'Windows':
GDAL_VERSION = check_output(["gdalinfo", "--version"]).decode(encoding="utf-8").strip().split(" ")[1][:-1]
else:
GDAL_VERSION = check_output(["gdal-config", "--version"]).decode(encoding="utf-8").split('\n')[0]
requirements = [r + '=={GDAL_VERSION}'.format(GDAL_VERSION=GDAL_VERSION)
if r == 'GDAL' else r for r in requirements]
setup_requirements = [r for r in requirements if "numpy==" in r]
class PyTest(TestCommand, object):
def initialize_options(self):
super(PyTest, self).initialize_options()
self.pytest_args = []
def finalize_options(self):
super(PyTest, self).finalize_options()
self.test_suite = True
self.test_args = []
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
exit(pytest.main(self.pytest_args))
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://geoscienceaustralia.github.io/PyRate/"""
history = open('docs/history.rst').read().replace('.. :changelog:', '')
setup(
name='Py-Rate',
version=__version__,
license="Apache Software License 2.0",
description='A Python tool for estimating velocity and cumulative displacement '
'time-series from Interferometric Synthetic Aperture Radar (InSAR) data.',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Geoscience Australia InSAR team',
author_email='[email protected]',
url='https://github.com/GeoscienceAustralia/PyRate',
download_url='https://github.com/GeoscienceAustralia/PyRate/archive/'+__version__+'.tar.gz',
packages=setuptools.find_packages(),
package_dir={'PyRate': 'pyrate'},
package_data={
'utils': ['colourmap.txt']
},
scripts=['scripts/gdal_calc_local.py'],
entry_points={
'console_scripts': [
'pyrate = pyrate.main:main'
]
},
setup_requires=setup_requirements,
install_requires=requirements,
extras_require={
'dev': dev_requirements
},
tests_require=test_requirements,
zip_safe=False,
keywords='PyRate, Python, InSAR, Geodesy, Remote Sensing, Image Processing',
classifiers=[
'Development Status :: 4 - Beta',
"Operating System :: POSIX",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Information Analysis"
],
cmdclass={
'test': PyTest,
}
)
| apache-2.0 |
shsingh/ansible | lib/ansible/cli/arguments/option_helpers.py | 14 | 17027 | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import operator
import argparse
import os
import os.path
import sys
import time
import yaml
import ansible
from ansible import constants as C
from ansible.module_utils._text import to_native
from ansible.release import __version__
from ansible.utils.path import unfrackpath
#
# Special purpose OptionParsers
#
class SortingHelpFormatter(argparse.HelpFormatter):
def add_arguments(self, actions):
actions = sorted(actions, key=operator.attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
class AnsibleVersion(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ansible_version = to_native(version(getattr(parser, 'prog')))
print(ansible_version)
parser.exit()
class UnrecognizedArgument(argparse.Action):
def __init__(self, option_strings, dest, const=True, default=None, required=False, help=None, metavar=None, nargs=0):
super(UnrecognizedArgument, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const,
default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.error('unrecognized arguments: %s' % option_string)
class PrependListAction(argparse.Action):
"""A near clone of ``argparse._AppendAction``, but designed to prepend list values
instead of appending.
"""
def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None,
choices=None, required=False, help=None, metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
super(PrependListAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar
)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(ensure_value(namespace, self.dest, []))
items[0:0] = values
setattr(namespace, self.dest, items)
def ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
#
# Callbacks to validate and normalize Options
#
def unfrack_path(pathsep=False):
"""Turn an Option's data into a single path in Ansible locations"""
def inner(value):
if pathsep:
return [unfrackpath(x) for x in value.split(os.pathsep) if x]
if value == '-':
return value
return unfrackpath(value)
return inner
def _git_repo_info(repo_path):
""" returns a string containing git branch, commit id and commit date """
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
with open(os.path.join(repo_path, "HEAD")) as f:
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
with open(branch_path) as f:
commit = f.readline()[:10]
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
with open(submodules) as f:
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = _git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
return result
def version(prog=None):
""" return ansible version """
if prog:
result = " ".join((prog, __version__))
else:
result = __version__
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
#
# Functions to add pre-canned options to an OptionParser
#
def create_base_parser(prog, usage="", desc=None, epilog=None):
"""
Create an options parser for all ansible scripts
"""
# base opts
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=SortingHelpFormatter,
epilog=epilog,
description=desc,
conflict_handler='resolve',
)
version_help = "show program's version number, config file location, configured module search path," \
" module location, executable location and exit"
parser.add_argument('--version', action=AnsibleVersion, nargs=0, help=version_help)
add_verbosity_options(parser)
return parser
def add_verbosity_options(parser):
"""Add options for verbosity"""
parser.add_argument('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
def add_async_options(parser):
"""Add options for commands which can launch async tasks"""
parser.add_argument('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type=int, dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_argument('-B', '--background', dest='seconds', type=int, default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
def add_basedir_options(parser):
"""Add options for commands which can set a playbook basedir"""
parser.add_argument('--playbook-dir', default=C.config.get_config_value('PLAYBOOK_DIR'), dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a substitute playbook directory."
"This sets the relative path for many features including roles/ group_vars/ etc.")
def add_check_options(parser):
"""Add options for commands which can run with diagnostic information of tasks"""
parser.add_argument("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_argument('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_argument("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those"
" files; works great with --check")
def add_connect_options(parser):
"""Add options for commands which need to connection to other hosts"""
connect_group = parser.add_argument_group("Connection Options", "control as whom and how to connect to hosts")
connect_group.add_argument('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_argument('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', type=unfrack_path())
connect_group.add_argument('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_argument('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_argument('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type=int, dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_argument('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_argument('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_argument('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_argument('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_argument_group(connect_group)
def add_fork_options(parser):
"""Add options for commands that can fork worker processes"""
parser.add_argument('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type=int,
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
def add_inventory_options(parser):
"""Add options for commands that utilize inventory"""
parser.add_argument('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
parser.add_argument('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
def add_meta_options(parser):
"""Add options for commands which can launch meta tasks from the command line"""
parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache for every host in inventory")
def add_module_options(parser):
"""Add options for commands that load modules"""
module_path = C.config.get_configuration_definition('DEFAULT_MODULE_PATH').get('default', '')
parser.add_argument('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % module_path,
type=unfrack_path(pathsep=True), action=PrependListAction)
def add_output_options(parser):
"""Add options for commands which can change their output"""
parser.add_argument('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_argument('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
def add_runas_options(parser):
"""
Add options for commands which can run tasks as another user
Note that this includes the options from add_runas_prompt_options(). Only one of these
functions should be used.
"""
runas_group = parser.add_argument_group("Privilege Escalation Options", "control how and which user you become as on target hosts")
# consolidated privilege escalation (become)
runas_group.add_argument("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_argument('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD,
help="privilege escalation method to use (default=%(default)s), use "
"`ansible-doc -t become -l` to list valid choices.")
runas_group.add_argument('--become-user', default=None, dest='become_user', type=str,
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
add_runas_prompt_options(parser, runas_group=runas_group)
def add_runas_prompt_options(parser, runas_group=None):
"""
Add options for commands which need to prompt for privilege escalation credentials
Note that add_runas_options() includes these options already. Only one of the two functions
should be used.
"""
if runas_group is None:
runas_group = parser.add_argument_group("Privilege Escalation Options",
"control how and which user you become as on target hosts")
runas_group.add_argument('-K', '--ask-become-pass', dest='become_ask_pass', action='store_true',
default=C.DEFAULT_BECOME_ASK_PASS,
help='ask for privilege escalation password')
parser.add_argument_group(runas_group)
def add_runtask_options(parser):
"""Add options for commands that run a task"""
parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
def add_subset_options(parser):
"""Add options for commands which can run a subset of tasks"""
parser.add_argument('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_argument('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
def add_vault_options(parser):
"""Add options for loading vault files"""
parser.add_argument('--vault-id', default=[], dest='vault_ids', action='append', type=str,
help='the vault identity to use')
base_group = parser.add_mutually_exclusive_group()
base_group.add_argument('--ask-vault-password', '--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
base_group.add_argument('--vault-password-file', '--vault-pass-file', default=[], dest='vault_password_files',
help="vault password file", type=unfrack_path(), action='append')
| gpl-3.0 |
clarko1/Cramd | storage/cloud-client/quickstart_test.py | 8 | 1029 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import quickstart
@mock.patch('google.cloud.storage.client.Client.create_bucket')
def test_quickstart(create_bucket_mock, capsys):
# Unlike other quickstart tests, this one mocks out the creation
# because buckets are expensive, globally-namespaced object.
create_bucket_mock.return_value = mock.sentinel.bucket
quickstart.run_quickstart()
create_bucket_mock.assert_called_with('my-new-bucket')
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.3/django/db/models/sql/subqueries.py | 230 | 8070 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.fields import DateField, FieldDoesNotExist
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import Date
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND, Constraint
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(None)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.model._meta.pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((Constraint(None, field.column, field), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where, using=using)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
pk_field = self.model._meta.pk
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.where.add((Constraint(None, pk_field.column, pk_field), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.get_compiler(using).execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((field, None, value))
except KeyError:
self.related_updates[model] = [(field, None, value)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.iteritems():
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.columns = []
self.values = []
self.params = ()
def clone(self, klass=None, **kwargs):
extras = {
'columns': self.columns[:],
'values': self.values[:],
'params': self.params
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, insert_values, raw_values=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
placeholders, values = [], []
for field, val in insert_values:
placeholders.append((field, val))
self.columns.append(field.column)
values.append(val)
if raw_values:
self.values.extend([(None, v) for v in values])
else:
self.params += tuple(values)
self.values.extend(placeholders)
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_date_select(self, field_name, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
try:
result = self.setup_joins(
field_name.split(LOOKUP_SEP),
self.get_meta(),
self.get_initial_alias(),
False
)
except FieldError:
raise FieldDoesNotExist("%s has no field named '%s'" % (
self.model._meta.object_name, field_name
))
field = result[0]
assert isinstance(field, DateField), "%r isn't a DateField." \
% field.name
alias = result[3][-1]
select = Date((alias, field.column), lookup_type)
self.select = [select]
self.select_fields = [None]
self.select_related = False # See #7097.
self.set_extra_mask([])
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
if field.null:
self.add_filter(("%s__isnull" % field_name, False))
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
| bsd-3-clause |
nevil-brownlee/python-libtrace | test/v2-test-cases/plt_testing.py | 1 | 7672 | # Thu, 13 Mar 14 (PDT)
# plt-testing.py: Support routines for testing python-libtrace
# Copyright (C) 2017, Nevil Brownlee, U Auckland | WAND
import plt # Also imports ipp and datetime
import os # Contains getcwd
import sys # exit and stdout
import re # regular expressions
# import socket # gethostname
import inspect
def get_example_trace(fn, show_full_fn=False):
cwd = os.getcwd()
basename = os.path.basename(cwd)
if re.match(r'python-libtrace', basename):
full_fn = 'pcapfile:' + cwd + '/doc/examples/' + fn
else:
full_fn = 'pcapfile:' + cwd + '/' + fn
if show_full_fn:
print get_tag()+"fullfn = {0}\n" . format(full_fn)
else:
print get_tag()+"fn = {0}\n" . format(fn)
t = plt.trace(full_fn)
t.start()
return t
def print_data(msg, offset, data, mxlen, tag=''):
blanks = ' ' * (offset-1) # print outputs an extra blank
pad = ' ' * (offset - len(msg) + 1) # Don't change (caller's) msg!
print tag+get_tag(), " %s%s" % (msg, pad), # Trailing comma suppresses the linefeed
for j in range(len(data)):
if j == mxlen:
break
if j % 32 == 0 and j != 0:
print "\n%s%s" % (tag+get_tag(),blanks),
if j % 8 == 0 and j != 0:
print ' ',
print "%02x" % (data[j]),
print
def print_ip(ip, offset, tag=''):
margin = ' ' * offset
print tag+get_tag()+" %s -> %s, proto=%d, tclass=%x," % (
ip.src_prefix, ip.dst_prefix, ip.proto, ip.traffic_class)
print tag+get_tag()+" %sttl=%d, hlen=%d, plen=%d, " % (
margin, ip.ttl, ip.hdr_len, ip.pkt_len),
print "mf=%s, frag_offset=%d, ident=%04x" % (
ip.has_mf, ip.frag_offset, ip.ident)
def print_ip6(ip6, offset, tag=''):
margin = ' ' * offset
print tag+get_tag()+" %s -> %s, proto=%d, tclass=%x," % (
ip6.src_prefix, ip6.dst_prefix, ip6.proto, ip6.traffic_class)
print tag+get_tag()+" %sttl=%d, hlen=%s, plen=%s" % (
margin, ip6.hop_limit, ip6.hdr_len, ip6.pkt_len),
print "flow_label=%x, payload_len=%d, next_hdr=%d" % (
ip6.flow_label, ip6.payload_len, ip6.next_hdr)
def print_tcp(tcp, margin, tag=''):
fl = ''
if tcp.urg_flag:
fl += 'U'
if tcp.psh_flag:
fl += 'P'
if tcp.rst_flag:
fl += 'R'
if tcp.fin_flag:
fl += 'F'
if tcp.syn_flag:
fl += 'S'
if tcp.ack_flag:
fl += 'A'
print tag+get_tag()+" TCP, %s -> %s, %d -> %d, seq=%u, ack=%u" % (
tcp.src_prefix, tcp.dst_prefix, tcp.src_port, tcp.dst_port,
tcp.seq_nbr, tcp.ack_nbr)
print tag+get_tag()+" flags=%02x (%s), window=%u, checksum=%x, urg_ptr=%u" % (
tcp.flags, fl, tcp.window, tcp.checksum, tcp.urg_ptr)
payload = tcp.payload
if not payload:
print tag+get_tag()+" "+"no payload"
else:
pd = payload.data
print_data("\n"+tag+get_tag()+" payload:", margin, pd, 64, tag+get_tag())
def print_udp(udp, margin, tag=''):
print tag+get_tag()+" UDP, src_port=%u, dest_port=%u, len=%u, checksum=%04x" % (
udp.src_port, udp.dst_port, udp.len, udp.checksum)
t = (' ' * 8) + 'UDP'
# print_data(t, margin, udp.data, 64)
def print_icmp_ip(p, margin, tag=''):
print tag+get_tag()+" proto=%d, TTL=%d, pkt_len=%d" % (
p.proto, p.ttl, p.pkt_len)
def print_icmp(icmp, offset, tag=''): # IPv4 only (IPv6 uses ICMP6 protocol)
margin = ' ' * offset
print tag+get_tag()+"%sICMP, type=%u, code=%u, checksum=%04x, wlen=%d, clen=%d, %s" % (
margin, icmp.type, icmp.code, icmp.checksum,
icmp.wire_len, icmp.capture_len, icmp.time)
pd = p = icmp.payload
type = icmp.type; pt = 'IP '
if type == 0 or type == 8: # Echo Reply, Echo Request
if type == 8:
which = 'request,'
else:
which = 'reply, '
echo = icmp.echo
print tag+get_tag()+"%sEcho %s ident=%04x, sequence=%d" % (
margin, which, echo.ident, echo.sequence)
pt = 'Echo'
elif type == 3: # Destination Unreachable
print tag+get_tag()+"%sDestination unreachable," % (margin),
print_icmp_ip(p, margin); pd = p.data
elif type == 4: # Source Quench
print tag+"%sSource quench," % (margin),
print_icmp_ip(p, margin); pd = p.data
elif type == 5: # Redirect
redirect = icmp.redirect;
print tag+"%sRedirect, gateway=%s," % (margin, redirect.gateway),
print_icmp_ip(p, margin); pd = p.data
elif type == 11: # Time Exceeded
print tag+"%sTime exceeded," % (margin),
print_icmp_ip(p, margin); pd = p.data
else:
print tag+get_tag()+" Other,",
t = margin + pt
print_data(t, offset+len(pt), pd, 64, tag+get_tag())
def print_ip6_info(ip6, tag=''):
print tag+get_tag()+" %s -> %s, TTL=%d" % (
ip6.src_prefix, ip6.dst_prefix, ip6.ttl)
def print_icmp6(icmp6, offset, tag=''): # IPv6 only
margin = ' ' * (offset-3)
print tag+get_tag()+"%sICMP6: stype=%u, code=%u, checksum=%04x, wlen=%d, clen=%d, %s" % (
margin, icmp6.type, icmp6.code, icmp6.checksum,
icmp6.wire_len, icmp6.capture_len, icmp6.time)
margin = ' ' * offset
type = icmp6.type; pd = p = icmp6.payload; pt = 'Echo'
if type == 1: # Destination Unreachable
print tag+get_tag()+"%sDestination unreachable:" % (margin),
pt = 'IP6 '
print_ip6_info(p); pd = p.data
elif type == 128 or type == 129: # Echo Request, Echo Reply
if type == 128:
which = 'request:'
else:
which = 'reply: '
echo = icmp6.echo
print tag+"%sEcho %s ident=%04x, sequence=%d" % (
margin, which, echo.ident, echo.sequence)
pt = 'Data'
elif type == 2: # Packet Too Big
print tag+get_tag()+"%sPacket Too Big; MTU=%d:" % (margin, icmp6.toobig.mtu),
pt = 'IP '
print_ip6_info(p); pd = p.data
elif type == 3: # Time Exceeded
print tag+get_tag()+"%sTime Exceeded:" % (margin),
pt = 'IP6 '
print_ip6_info(p); pd = p.data
elif type == 4: # Parameter Problem
print tag+get_tag()+"%sParameter Problem; pointer=%d," % (margin, icmp6.param.pointer),
pt = 'IP6 '
print_ip6_info(p); pd = p.data
else:
if type == 133:
s = "Router Solicitation"
elif type == 134:
s = "Router Advertisment"
elif type == 135:
s = "Neighbour Solicitation"
elif type == 136:
s = "Neighbour Advertisment"
elif type == 137:
s = "Redirect"
elif type ==138:
s = "Router Renumbering"
else:
s = "Other"
if type == 135 or type == 136:
print tag+get_tag()+"%s%s: target_prefix=%s, src_prefix=%s" % (
margin, s, icmp6.neighbour.target_prefix, icmp6.src_prefix)
else:
print tag+get_tag()+"%s%s: src_prefix=%s" % (margin, s, icmp6.src_prefix)
pt = 'Data'
t = margin + pt
print_data(t, offset+3, pd, 64, tag+get_tag())
def test_print(message, tag=''):
if tag == '':
print message,
else:
print tag+ ' '+message,
def test_println(message, tag=''):
print tag+' '+message
def get_tag(message=None):
(frame, filename, line_number,
function_name, lines, index) = inspect.getouterframes(inspect.currentframe())[1]
if message == None:
return '['+function_name+':'+str(line_number)+']'
else:
return '['+function_name+':'+str(line_number)+':'+'{'+message+'}'+']'
| gpl-3.0 |
rugk/letsencrypt | letsencrypt/plugins/manual_test.py | 8 | 4559 | """Tests for letsencrypt.plugins.manual."""
import signal
import unittest
import mock
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt.tests import acme_util
from letsencrypt.tests import test_util
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
class ManualAuthenticatorTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.manual.ManualAuthenticator."""
def setUp(self):
from letsencrypt.plugins.manual import ManualAuthenticator
self.config = mock.MagicMock(
no_simple_http_tls=True, simple_http_port=4430,
manual_test_mode=False)
self.auth = ManualAuthenticator(config=self.config, name="manual")
self.achalls = [achallenges.SimpleHTTP(
challb=acme_util.SIMPLE_HTTP_P, domain="foo.com", account_key=KEY)]
config_test_mode = mock.MagicMock(
no_simple_http_tls=True, simple_http_port=4430,
manual_test_mode=True)
self.auth_test_mode = ManualAuthenticator(
config=config_test_mode, name="manual")
def test_more_info(self):
self.assertTrue(isinstance(self.auth.more_info(), str))
def test_get_chall_pref(self):
self.assertTrue(all(issubclass(pref, challenges.Challenge)
for pref in self.auth.get_chall_pref("foo.com")))
def test_perform_empty(self):
self.assertEqual([], self.auth.perform([]))
@mock.patch("letsencrypt.plugins.manual.sys.stdout")
@mock.patch("letsencrypt.plugins.manual.os.urandom")
@mock.patch("acme.challenges.SimpleHTTPResponse.simple_verify")
@mock.patch("__builtin__.raw_input")
def test_perform(self, mock_raw_input, mock_verify, mock_urandom,
mock_stdout):
mock_urandom.side_effect = nonrandom_urandom
mock_verify.return_value = True
resp = challenges.SimpleHTTPResponse(tls=False)
self.assertEqual([resp], self.auth.perform(self.achalls))
self.assertEqual(1, mock_raw_input.call_count)
mock_verify.assert_called_with(
self.achalls[0].challb.chall, "foo.com", KEY.public_key(), 4430)
message = mock_stdout.write.mock_calls[0][1][0]
self.assertTrue(self.achalls[0].token in message)
mock_verify.return_value = False
self.assertEqual([None], self.auth.perform(self.achalls))
@mock.patch("letsencrypt.plugins.manual.subprocess.Popen", autospec=True)
def test_perform_test_command_oserror(self, mock_popen):
mock_popen.side_effect = OSError
self.assertEqual([False], self.auth_test_mode.perform(self.achalls))
@mock.patch("letsencrypt.plugins.manual.time.sleep", autospec=True)
@mock.patch("letsencrypt.plugins.manual.subprocess.Popen", autospec=True)
def test_perform_test_command_run_failure(
self, mock_popen, unused_mock_sleep):
mock_popen.poll.return_value = 10
mock_popen.return_value.pid = 1234
self.assertRaises(
errors.Error, self.auth_test_mode.perform, self.achalls)
@mock.patch("letsencrypt.plugins.manual.time.sleep", autospec=True)
@mock.patch("acme.challenges.SimpleHTTPResponse.simple_verify",
autospec=True)
@mock.patch("letsencrypt.plugins.manual.subprocess.Popen", autospec=True)
def test_perform_test_mode(self, mock_popen, mock_verify, mock_sleep):
mock_popen.return_value.poll.side_effect = [None, 10]
mock_popen.return_value.pid = 1234
mock_verify.return_value = False
self.assertEqual([False], self.auth_test_mode.perform(self.achalls))
self.assertEqual(1, mock_sleep.call_count)
def test_cleanup_test_mode_already_terminated(self):
# pylint: disable=protected-access
self.auth_test_mode._httpd = httpd = mock.Mock()
httpd.poll.return_value = 0
self.auth_test_mode.cleanup(self.achalls)
@mock.patch("letsencrypt.plugins.manual.os.killpg", autospec=True)
def test_cleanup_test_mode_kills_still_running(self, mock_killpg):
# pylint: disable=protected-access
self.auth_test_mode._httpd = httpd = mock.Mock(pid=1234)
httpd.poll.return_value = None
self.auth_test_mode.cleanup(self.achalls)
mock_killpg.assert_called_once_with(1234, signal.SIGTERM)
def nonrandom_urandom(num_bytes):
"""Returns a string of length num_bytes"""
return "x" * num_bytes
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
ebukoz/thrive | erpnext/selling/doctype/selling_settings/selling_settings.py | 28 | 1566 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
from frappe.utils.nestedset import get_root_of
from frappe.model.document import Document
class SellingSettings(Document):
def on_update(self):
self.toggle_hide_tax_id()
def validate(self):
for key in ["cust_master_name", "campaign_naming_by", "customer_group", "territory",
"maintain_same_sales_rate", "editable_price_list_rate", "selling_price_list"]:
frappe.db.set_default(key, self.get(key, ""))
from erpnext.setup.doctype.naming_series.naming_series import set_by_naming_series
set_by_naming_series("Customer", "customer_name",
self.get("cust_master_name")=="Naming Series", hide_name_field=False)
def toggle_hide_tax_id(self):
self.hide_tax_id = cint(self.hide_tax_id)
# Make property setters to hide tax_id fields
for doctype in ("Sales Order", "Sales Invoice", "Delivery Note"):
make_property_setter(doctype, "tax_id", "hidden", self.hide_tax_id, "Check")
make_property_setter(doctype, "tax_id", "print_hide", self.hide_tax_id, "Check")
def set_default_customer_group_and_territory(self):
if not self.customer_group:
self.customer_group = get_root_of('Customer Group')
if not self.territory:
self.territory = get_root_of('Territory')
| gpl-3.0 |
unseenlaser/python-for-android | python-build/python-libs/gdata/build/lib/gdata/apps/groups/service.py | 137 | 10118 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage groups, groups memembers and groups owners.
EmailSettingsService: Set various email settings.
"""
__author__ = '[email protected]'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
GROUP_ID_URL = BASE_URL + '/%s'
MEMBER_URL = BASE_URL + '/%s/member'
MEMBER_ID_URL = MEMBER_URL + '/%s'
OWNER_URL = BASE_URL + '/%s/owner'
OWNER_ID_URL = OWNER_URL + '/%s'
PERMISSION_OWNER = 'Owner'
PERMISSION_MEMBER = 'Member'
PERMISSION_DOMAIN = 'Domain'
PERMISSION_ANYONE = 'Anyone'
class GroupsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Groups service."""
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
start_key, direct_only=None, domain=None):
if domain is None:
domain = self.domain
if service_type == 'group':
if group_id != '' and is_existed:
return GROUP_ID_URL % (domain, group_id)
if member_id != '':
if direct_only is not None:
return GROUP_MEMBER_DIRECT_URL % (domain, member_id,
self._Bool2Str(direct_only))
else:
return GROUP_MEMBER_URL % (domain, member_id)
if start_key != '':
return GROUP_START_URL % (domain, start_key)
return BASE_URL % (domain)
if service_type == 'member':
if member_id != '' and is_existed:
return MEMBER_ID_URL % (domain, group_id, member_id)
if start_key != '':
return MEMBER_START_URL % (domain, group_id, start_key)
return MEMBER_URL % (domain, group_id)
if service_type == 'owner':
if owner_email != '' and is_existed:
return OWNER_ID_URL % (domain, group_id, owner_email)
return OWNER_URL % (domain, group_id)
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def _IsExisted(self, uri):
try:
properties = self._GetProperties(uri)
return True
except gdata.apps.service.AppsForYourDomainException, e:
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
return False
else:
raise e
def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties)
def UpdateGroup(self, group_id, group_name, description, email_permission):
"""Update a group's name, description and/or permission.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the update operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PutProperties(uri, properties)
def RetrieveGroup(self, group_id):
"""Retrieve a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._GetProperties(uri)
def RetrieveAllGroups(self):
"""Retrieve all groups in the domain.
Args:
None.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '', '', '')
return self._GetPropertiesList(uri)
def RetrieveGroups(self, member_id, direct_only=False):
"""Retrieve all groups that belong to the given member_id.
Args:
member_id: The member's email address (e.g. [email protected]).
direct_only: Boolean whether only return groups that this member directly belongs to.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', member_id, '', '', direct_only)
return self._GetPropertiesList(uri)
def DeleteGroup(self, group_id):
"""Delete a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the delete operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._DeleteProperties(uri)
def AddMemberToGroup(self, member_id, group_id):
"""Add a member to a group.
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('member', False, group_id, member_id, '', '', '')
properties = {}
properties['memberId'] = member_id
return self._PostProperties(uri, properties)
def IsMember(self, member_id, group_id):
"""Check whether the given member already exists in the given group
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member exists in the group. False otherwise.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._IsExisted(uri)
def RetrieveMember(self, member_id, group_id):
"""Retrieve the given member in the given group
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._GetProperties(uri)
def RetrieveAllMembers(self, group_id):
"""Retrieve all members in the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveMemberFromGroup(self, member_id, group_id):
"""Remove the given member from the given group
Args:
group_id: The ID of the group (e.g. us-sales).
member_id: The member's email address (e.g. [email protected]).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._DeleteProperties(uri)
def AddOwnerToGroup(self, owner_email, group_id):
"""Add an owner to a group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('owner', False, group_id, '', owner_email, '', '')
properties = {}
properties['email'] = owner_email
return self._PostProperties(uri, properties)
def IsOwner(self, owner_email, group_id):
"""Check whether the given member an owner of the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member is an owner of the given group. False otherwise.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._IsExisted(uri)
def RetrieveOwner(self, owner_email, group_id):
"""Retrieve the given owner in the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._GetProperties(uri)
def RetrieveAllOwners(self, group_id):
"""Retrieve all owners of the given group
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveOwnerFromGroup(self, owner_email, group_id):
"""Remove the given owner from the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._DeleteProperties(uri)
| apache-2.0 |
tylerjereddy/scipy | scipy/special/tests/test_basic.py | 8 | 132780 | # this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
import itertools
import platform
import sys
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
import pytest
from pytest import raises as assert_raises
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, assert_allclose, assert_array_almost_equal_nulp,
suppress_warnings)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import with_special_errors, \
assert_func_equal, FuncData
import math
class TestCephes:
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_binom_nooverflow_8346(self):
# Test (binom(n, k) doesn't overflow prematurely */
dataset = [
(1000, 500, 2.70288240945436551e+299),
(1002, 501, 1.08007396880791225e+300),
(1004, 502, 4.31599279169058121e+300),
(1006, 503, 1.72468101616263781e+301),
(1008, 504, 6.89188009236419153e+301),
(1010, 505, 2.75402257948335448e+302),
(1012, 506, 1.10052048531923757e+303),
(1014, 507, 4.39774063758732849e+303),
(1016, 508, 1.75736486108312519e+304),
(1018, 509, 7.02255427788423734e+304),
(1020, 510, 2.80626776829962255e+305),
(1022, 511, 1.12140876377061240e+306),
(1024, 512, 4.48125455209897109e+306),
(1026, 513, 1.79075474304149900e+307),
(1028, 514, 7.15605105487789676e+307)
]
dataset = np.asarray(dataset)
FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check()
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
# Each row holds (x, nu, lam, expected_value)
# These values were computed using Wolfram Alpha with
# CDF[NoncentralChiSquareDistribution[nu, lam], x]
values = np.array([
[25.00, 20.0, 400, 4.1210655112396197139e-57],
[25.00, 8.00, 250, 2.3988026526832425878e-29],
[0.001, 8.00, 40., 5.3761806201366039084e-24],
[0.010, 8.00, 40., 5.45396231055999457039e-20],
[20.00, 2.00, 107, 1.39390743555819597802e-9],
[22.50, 2.00, 107, 7.11803307138105870671e-9],
[25.00, 2.00, 107, 3.11041244829864897313e-8],
[3.000, 2.00, 1.0, 0.62064365321954362734],
[350.0, 300., 10., 0.93880128006276407710],
[100.0, 13.5, 10., 0.99999999650104210949],
[700.0, 20.0, 400, 0.99999999925680650105],
[150.0, 13.5, 10., 0.99999999999999983046],
[160.0, 13.5, 10., 0.99999999999999999518], # 1.0
])
cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2])
assert_allclose(cdf, values[:, 3], rtol=1e-12)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0), 0.0)
def test_erf_symmetry(self):
x = 5.905732037710919
assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0), 1.0)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@pytest.mark.xfail(reason='The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1, 1, 0), 0.0)
# Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10]
assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488,
rtol=1e-12)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1, 1, 0), 1.0)
# Computed using Wolfram Alpha:
# 1 - CDF[FRatioDistribution[2, 1/10], 1e10]
assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512,
rtol=1e-12)
def test_fdtri(self):
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
# From Wolfram Alpha:
# CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874...
p = 0.8756751669632105666874
assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12)
@pytest.mark.xfail(reason='Returns nan on i686.')
def test_fdtri_mysterious_failure(self):
assert_allclose(cephes.fdtri(1, 1, 0.5), 1)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0), 1.0)
def test_kolmogp(self):
assert_equal(cephes._kolmogp(0), -0.0)
def test_kolmogc(self):
assert_equal(cephes._kolmogc(0), 0.0)
def test_kolmogci(self):
assert_equal(cephes._kolmogci(0), 0.0)
assert_(np.isnan(cephes._kolmogci(np.nan)))
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0)
f = [0.5, 1, 1.5]
p = cephes.ncfdtr(2, 3, 1.5, f)
assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f)
def test_ncfdtridfd(self):
dfd = [1, 2, 3]
p = cephes.ncfdtr(2, dfd, 0.25, 15)
assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd)
def test_ncfdtridfn(self):
dfn = [0.1, 1, 2, 3, 1e4]
p = cephes.ncfdtr(dfn, 2, 0.25, 15)
assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5)
def test_ncfdtrinc(self):
nc = [0.5, 1.5, 2.0]
p = cephes.ncfdtr(2, 3, nc, 15)
assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovp(self):
assert_equal(cephes._smirnovp(1, .1), -1)
assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1))
assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1))
assert_(np.isnan(cephes._smirnovp(1, np.nan)))
def test_smirnovc(self):
assert_equal(cephes._smirnovc(1,.1),0.1)
assert_(np.isnan(cephes._smirnovc(1,np.nan)))
x10 = np.linspace(0, 1, 11, endpoint=True)
assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10))
x4 = np.linspace(0, 1, 5, endpoint=True)
assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_smirnovci(self):
assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4)
assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6)
assert_(np.isnan(cephes._smirnovci(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry:
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre:
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly:
def test_besselpoly(self):
pass
class TestKelvin:
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli:
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta:
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics:
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric:
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg:
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip:
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4)
class TestErf:
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
with np.errstate(all='ignore'):
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler:
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
with np.errstate(all='ignore'):
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
assert_almost_equal(errmax, 0.0, 14)
class TestExp:
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions:
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
@pytest.mark.parametrize('x, exact', [
(1, True),
(1, False),
(np.array(1), True),
(np.array(1), False),
])
def test_factorial_0d_return_type(self, x, exact):
assert np.isscalar(special.factorial(x, exact=exact))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
@pytest.mark.parametrize('x, exact', [
(np.nan, True),
(np.nan, False),
(np.array([np.nan]), True),
(np.array([np.nan]), False),
])
def test_nan_inputs(self, x, exact):
result = special.factorial(x, exact=exact)
assert_(np.isnan(result))
# GH-13122: special.factorial() argument should be an array of integers.
# On Python 3.10, math.factorial() reject float.
# On Python 3.9, a DeprecationWarning is emitted.
# A numpy array casts all integers to float if the array contains a
# single NaN.
@pytest.mark.skipif(sys.version_info >= (3, 10),
reason="Python 3.10+ math.factorial() requires int")
def test_mixed_nan_inputs(self):
x = np.array([np.nan, 1, 2, 3, np.nan])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Using factorial\\(\\) with floats is deprecated")
result = special.factorial(x, exact=True)
assert_equal(np.array([np.nan, 1, 2, 6, np.nan]), result)
result = special.factorial(x, exact=False)
assert_equal(np.array([np.nan, 1, 2, 6, np.nan]), result)
class TestFresnel:
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
def test_fresnel_inf1(self):
frs = special.fresnel(np.inf)
assert_equal(frs, (0.5, 0.5))
def test_fresnel_inf2(self):
frs = special.fresnel(-np.inf)
assert_equal(frs, (-0.5, -0.5))
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma:
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_allclose(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel:
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper:
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# https://github.com/scipy/scipy/issues/1186 (Trac #659)
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel:
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_allclose(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_allclose(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_allclose(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_allclose(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_allclose(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_allclose(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301]
z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300,
10003]
yield from itertools.product(v, z)
# check half-integers; these are problematic points at least
# for cephes/iv
yield from itertools.product(0.5 + arange(-60, 60), [3.5])
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_allclose(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
with np.errstate(all='ignore'):
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
@pytest.mark.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
with np.errstate(all='ignore'):
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_allclose(special.jv(3, 4), 0.43017147387562193)
assert_allclose(special.jv(301, 1300), 0.0183487151115275)
assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_allclose(special.jv(-1, 1), -0.4400505857449335)
assert_allclose(special.jv(-2, 1), 0.1149034849319005)
assert_allclose(special.yv(-1, 1), 0.7812128213002887)
assert_allclose(special.yv(-2, 1), -1.650682606816255)
assert_allclose(special.iv(-1, 1), 0.5651591039924851)
assert_allclose(special.iv(-2, 1), 0.1357476697670383)
assert_allclose(special.kv(-1, 1), 0.6019072301972347)
assert_allclose(special.kv(-2, 1), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952)
assert_allclose(special.yv(-0.5, 1), 0.6713967071418031)
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
assert_allclose(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335)
assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005)
assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887)
assert_allclose(special.yv(-2, 1+0j), -1.650682606816255)
assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851)
assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383)
assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347)
assert_allclose(special.kv(-2, 1+0j), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_allclose(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_allclose(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_gh_7909(self):
assert_(special.kv(1.5, 0) == np.inf)
assert_(special.kve(1.5, 0) == np.inf)
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_allclose(special.iv(1, 700), 1.528500390233901e302)
assert_allclose(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_allclose(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_allclose(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre:
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre:
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda:
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p:
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions:
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
with np.errstate(all='ignore'):
lp = special.lpmv(-1,-1,.001)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu:
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral:
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq:
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder:
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
special.pbdv(1,.2)
1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma:
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq:
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi:
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian:
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati:
def test_riccati_jn(self):
N, x = 2, 0.2
S = np.empty((N, N))
for n in range(N):
j = special.spherical_jn(n, x)
jp = special.spherical_jn(n, x, derivative=True)
S[0,n] = x*j
S[1,n] = x*jp + j
assert_array_almost_equal(S, special.riccati_jn(n, x), 8)
def test_riccati_yn(self):
N, x = 2, 0.2
C = np.empty((N, N))
for n in range(N):
y = special.spherical_yn(n, x)
yp = special.spherical_yn(n, x, derivative=True)
C[0,n] = x*y
C[1,n] = x*yp + y
assert_array_almost_equal(C, special.riccati_yn(n, x), 8)
class TestRound:
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# https://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(sh(0,0,0,0),
0.5/sqrt(pi))
assert_array_almost_equal(sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
assert_array_almost_equal(sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
assert_array_almost_equal(sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
assert_array_almost_equal(sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
assert_array_almost_equal(sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestStruve:
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
rtol = 1e-13
# Gauss's constant
assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186,
rtol=rtol)
# These values were computed using Wolfram Alpha, with the
# function ArithmeticGeometricMean[a, b].
agm13 = 1.863616783244897
agm15 = 2.604008190530940
agm35 = 3.936235503649555
assert_allclose(special.agm([[1], [3]], [1, 3, 5]),
[[1, agm13, agm15],
[agm13, 3, agm35]], rtol=rtol)
# Computed by the iteration formula using mpmath,
# with mpmath.mp.prec = 1000:
agm12 = 1.4567910310469068
assert_allclose(special.agm(1, 2), agm12, rtol=rtol)
assert_allclose(special.agm(2, 1), agm12, rtol=rtol)
assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol)
assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol)
assert_allclose(special.agm(13, 123456789.5), 11111458.498599306,
rtol=rtol)
assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol)
assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol)
assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178,
rtol=rtol)
assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177,
rtol=rtol)
assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152,
rtol=rtol)
fi = np.finfo(1.0)
assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305,
rtol=rtol)
assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308,
rtol=rtol)
assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308,
rtol=rtol)
# zero, nan and inf cases.
assert_equal(special.agm(0, 0), 0)
assert_equal(special.agm(99, 0), 0)
assert_equal(special.agm(-1, 10), np.nan)
assert_equal(special.agm(0, np.inf), np.nan)
assert_equal(special.agm(np.inf, 0), np.nan)
assert_equal(special.agm(0, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, 0), np.nan)
assert_equal(special.agm(np.inf, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, np.inf), np.nan)
assert_equal(special.agm(1, np.nan), np.nan)
assert_equal(special.agm(np.nan, -1), np.nan)
assert_equal(special.agm(1, np.inf), np.inf)
assert_equal(special.agm(np.inf, 1), np.inf)
assert_equal(special.agm(-1, -np.inf), -np.inf)
assert_equal(special.agm(-np.inf, -1), -np.inf)
def test_legacy():
# Legacy behavior: truncating arguments to integers
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
| bsd-3-clause |
leki75/ansible | lib/ansible/utils/module_docs_fragments/postgres.py | 143 | 2774 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Postgres documentation fragment
DOCUMENTATION = """
options:
login_user:
description:
- The username used to authenticate with
required: false
default: postgres
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: null
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
port:
description:
- Database port to connect to.
required: false
default: 5432
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
- Default of C(prefer) matches libpq default.
required: false
default: prefer
choices: [disable, allow, prefer, require, verify-ca, verify-full]
version_added: '2.3'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
required: false
default: null
version_added: '2.3'
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
and C(python-psycopg2) packages on the remote host before using this module.
- The ssl_rootcert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
requirements: [ psycopg2 ]
"""
| gpl-3.0 |
dscorbett/pygments | pygments/lexers/graph.py | 4 | 2756 | # -*- coding: utf-8 -*-
"""
pygments.lexers.graph
~~~~~~~~~~~~~~~~~~~~~
Lexers for graph query languages.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace
__all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For `Cypher Query Language
<https://neo4j.com/docs/developer-manual/3.3/cypher/>`_
For the Cypher version in Neo4j 3.3
.. versionadded:: 2.0
"""
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*\n', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by|true|false)\b', Keyword),
],
'clauses': [
# based on https://neo4j.com/docs/cypher-refcard/3.3/
(r'(all|any|as|asc|ascending|assert|call|case|create|'
r'create\s+index|create\s+unique|delete|desc|descending|'
r'distinct|drop\s+constraint\s+on|drop\s+index\s+on|end|'
r'ends\s+with|fieldterminator|foreach|in|is\s+node\s+key|'
r'is\s+null|is\s+unique|limit|load\s+csv\s+from|match|merge|none|'
r'not|null|on\s+match|on\s+create|optional\s+match|order\s+by|'
r'remove|return|set|skip|single|start|starts\s+with|then|union|'
r'union\s+all|unwind|using\s+periodic\s+commit|yield|where|when|'
r'with)\b', Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}
| bsd-2-clause |
Padavan/marinade | app/main.py | 1 | 9895 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot,SIGNAL,SLOT
<<<<<<< HEAD
import time
=======
import time,threading
>>>>>>> f85633c86d99f9bef1ec0634728cbf7cdbfc08f9
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
def initUI(self):
QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
self.setToolTip('This is <b>QWidget</b> widget')
#self.resize(400,400)
self.centralwidget = QtGui.QWidget(self)
#----------------------------------------------------------
self.tmr=Timer()
self.btn=QtGui.QPushButton('Start', self.centralwidget)
self.btn.setToolTip('This is a <b>QPushButton</b> widget')
self.btn.resize(self.btn.sizeHint())
self.btn.clicked.connect(self.toggle)
<<<<<<< HEAD
self.now=0
=======
>>>>>>> f85633c86d99f9bef1ec0634728cbf7cdbfc08f9
#self.timeText = QtGui.QLabel(self.centralwidget)
#self.timeText.setText("00:00")
self.timer=QtCore.QTimer()
<<<<<<< HEAD
self.value=60
self.lcdNumber = QtGui.QLCDNumber(self.centralwidget)
self.lcdNumber.display(60)
self.lcdNumber.connect(self.timer,SIGNAL("timeout()"),self.lcdNumber,SLOT("count()"))
self.timer.start(100)
#"%02d:%02d" % divmod(self.now, 60)
=======
self.timer.timeout.connect(self.countdown)
#self.timer.start(1000)
self.e=threading.Thread(target=self.timer.start(1000))
self.e.start()
#self.thread = QtCore.QThread()
#self.thread.timer.start(100)
#self.value=60
self.now=self.tmr.dumb()
#self.timer2=threading.Thread(target=self.tmr.start)
self.lcdNumber = QtGui.QLCDNumber(self.centralwidget)
self.lcdNumber.display(self.now)
#self.lcdNumber.connect(self.timer, SIGNAL(timeout()), self, SLOT(update()))
#self.lcdNumber.connect(self.timer,SIGNAL("timeout()"),self.lcdNumber,SLOT("count()"))
#----------------------------------------------------------
#"%02d:%02d" % divmod(self.now, 60)
self.hbox=QtGui.QHBoxLayout()
self.hbox.addStretch(1)
self.hbox.addWidget(self.btn)
self.hbox.addStretch(1)
>>>>>>> f85633c86d99f9bef1ec0634728cbf7cdbfc08f9
self.vbox=QtGui.QVBoxLayout(self.centralwidget)
self.vbox.addWidget(self.lcdNumber)
self.vbox.addLayout(self.hbox)
self.setCentralWidget(self.centralwidget)
#qbtn = QtGui.QPushButton('Quit', self)
# #qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)
#qbtn.resize(qbtn.sizeHint())
#qbtn.move(50, 100)
#self.vbox = QtGui.QVBoxLayout()
#self.vbox.addWidget(self.timeText)
#self.vbox.addWidget(self.btn)
#self.setLayout(self.vbox)
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
#menubar=self.menuBar()
#fileMenu = menubar.addMenu('&File')
#fileMenu.addAction(exitAction)
self.statusBar().showMessage('Ready')
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Statusbar')
self.setWindowIcon(QtGui.QIcon('image/marisa_small.gif'))
self.show()
##------------------------------------------------TOOLBAR-------------------------------
self.menu = QtGui.QMenu()
self.startAction = QtGui.QAction("Start", self)
self.menu.addAction(self.startAction)
self.startAction.setShortcut('Ctrl+S')
self.startAction.triggered.connect(self.dummy)
self.stopAction = QtGui.QAction("Stop", self)
self.menu.addAction(self.stopAction)
self.optAction = QtGui.QAction("Preference", self)
self.menu.addAction(self.optAction)
self.optAction.triggered.connect(self.callPreferences)
#self.aboutWindowStart=AboutWindow()
self.aboutAction = QtGui.QAction("About", self)
self.menu.addAction(self.aboutAction)
self.aboutAction.triggered.connect(self.callWindow)
self.closeAction = QtGui.QAction("Quit", self)
self.menu.addAction(self.closeAction)
self.closeAction.setShortcut('Ctrl+Q')
self.closeAction.triggered.connect(QtGui.qApp.quit)
self.left_spacer = QtGui.QWidget()
self.left_spacer.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.tool_button = QtGui.QToolButton()
self.tool_button.setMenu(self.menu)
self.tool_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.tool_button.setToolTip("shittooltip")
self.tool_button.setFixedWidth(50)
self.tool_button.setIcon(QtGui.QIcon('image/gears.svg'))
self.tool_button_action = QtGui.QWidgetAction(self)
self.tool_button_action.setDefaultWidget(self.tool_button)
#exitAction = QtGui.QAction(QtGui.QIcon('image/marisa_small.gif'), 'Exit', self)
#exitAction.setShortcut('Ctrl+Q')
#exitAction.triggered.connect(QtGui.qApp.quit)
#exitAction.triggered.connect()
self.toolbar = self.addToolBar('Shit')
self.toolbar.addWidget(self.left_spacer)
self.toolbar.addAction(self.tool_button_action)
self.setGeometry(300, 300, 300, 300)
self.setWindowTitle('Marinade')
self.show()
def dummy(self):
print "dummy fun"
def callWindow(self):
print "callWindow method call"
self.aboutWindowStart=AboutWindow()
#self.aboutWindowStart.show()
def count(self):
self.display(self.value)
self.value = self.value-1
def callPreferences(self):
print "call preference method"
self.prefWindowStart=PrefWindow()
def countdown(self):
self.now=self.tmr.dumb()
print "countdoun %d" % self.now
#self.now2="%02d:%02d" % divmod(self.now, 60)
self.lcdNumber.display("%02d:%02d" % divmod(self.now, 60))
def toggle(self):
sender=self.sender()
if self.btn.text()=="Start":
self.statusBar().showMessage(sender.text() + ' was pressed')
self.btn.setText("Stop")
#self.tmr.start()
self.w=threading.Thread(target=self.tmr.start)
self.w.start()
else:
self.statusBar().showMessage(sender.text() + ' was pressed')
self.btn.setText("Start")
self.tmr.stop()
class AboutWindow(QtGui.QWidget):
def __init__(self):
super(AboutWindow, self).__init__()
self.initUI()
def initUI(self):
self.mainWidget = QtGui.QWidget(parent=None)
self.nameLabel = QtGui.QLabel('marinade', self.mainWidget)
self.nameLabel.setText("Marinade")
self.nameLabel.setFont(QtGui.QFont('SansSerif', 14))
#self.nameLabel.setAlignment(QtCore.Qt.AlignLeft)
self.versionLabel = QtGui.QLabel(self.mainWidget)
self.versionLabel.setText("Version 0.1")
self.pictureLabel = QtGui.QLabel(self.mainWidget)
self.pictureLabel.setPixmap(QtGui.QPixmap("image/marisa_small.gif"))
self.grid=QtGui.QGridLayout()
self.grid.setSpacing(10)
self.grid.addWidget(self.nameLabel, 0, 1, 0, 2 )
self.grid.addWidget(self.versionLabel, 1, 1, 1, 2)
self.grid.addWidget(self.pictureLabel, 0, 0, 0, 1)
self.group = QtGui.QGroupBox(self.mainWidget)
self.group.setTitle("MIT Public License")
self.license = QtGui.QTextEdit(self.group)
self.text=open('LICENSE').read()
self.license.setPlainText(self.text)
self.license.setReadOnly(True)
self.okbutton = QtGui.QPushButton("OK", self.mainWidget)
self.okbutton.clicked.connect(self.mainWidget.close)
self.hbox2=QtGui.QHBoxLayout()
self.hbox2.addStretch(1)
self.hbox2.addWidget(self.okbutton)
self.vbox2=QtGui.QVBoxLayout()
self.vbox2.addWidget(self.license)
self.group.setLayout(self.vbox2)
self.vbox=QtGui.QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addWidget(self.group)
self.vbox.addLayout(self.hbox2)
self.mainWidget.setLayout(self.vbox)
self.mainWidget.resize(400, 400)
self.mainWidget.setWindowTitle('About')
self.mainWidget.setWindowIcon(QtGui.QIcon('image/marisa_small.gif'))
self.mainWidget.show()
#def okClicked(self):
#print "ok clicked"
#self.mainWidget.close()
<<<<<<< HEAD
=======
class Timer(object):
def __init__(self):
object.__init__(self)
self.minutes=5
self.count=self.minutes*1
self.is_state=False
def start(self):
self.is_state=False
while self.count > 0:
self.count-=1
time.sleep(1)
print self.is_state
print self.count
if self.is_state:
break
def stop(self):
self.minutes=5
self.count=5
self.is_state=True
def dumb(self):
return self.count
class PrefWindow(QtGui.QWidget):
def __init__(self):
super(PrefWindow, self).__init__()
self.initUI()
>>>>>>> f85633c86d99f9bef1ec0634728cbf7cdbfc08f9
def initUI(self):
self.mainWidget = QtGui.QWidget(parent=None)
self.mainWidget.resize(400, 400)
self.mainWidget.setWindowTitle('Preferences')
self.mainWidget.setWindowIcon(QtGui.QIcon('image/marisa_small.gif'))
self.mainWidget.show()
def main():
app=QtGui.QApplication(sys.argv)
ex=MainWindow()
sys.exit(app.exec_())
if __name__=='__main__':
main()
| mit |
Ambuj-UF/ConCat-1.0 | src/Utils/Bio/Medline/__init__.py | 4 | 5384 | # Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to work with Medline from the NCBI.
Classes:
- Record A dictionary holding Medline data.
Functions:
- read Reads one Medline record
- parse Allows you to iterate over a bunch of Medline records
"""
__docformat__ = "restructuredtext en"
class Record(dict):
"""A dictionary holding information from a Medline record.
All data are stored under the mnemonic appearing in the Medline
file. These mnemonics have the following interpretations:
========= ==============================
Mnemonic Description
--------- ------------------------------
AB Abstract
CI Copyright Information
AD Affiliation
IRAD Investigator Affiliation
AID Article Identifier
AU Author
FAU Full Author
CN Corporate Author
DCOM Date Completed
DA Date Created
LR Date Last Revised
DEP Date of Electronic Publication
DP Date of Publication
EDAT Entrez Date
GS Gene Symbol
GN General Note
GR Grant Number
IR Investigator Name
FIR Full Investigator Name
IS ISSN
IP Issue
TA Journal Title Abbreviation
JT Journal Title
LA Language
LID Location Identifier
MID Manuscript Identifier
MHDA MeSH Date
MH MeSH Terms
JID NLM Unique ID
RF Number of References
OAB Other Abstract
OCI Other Copyright Information
OID Other ID
OT Other Term
OTO Other Term Owner
OWN Owner
PG Pagination
PS Personal Name as Subject
FPS Full Personal Name as Subject
PL Place of Publication
PHST Publication History Status
PST Publication Status
PT Publication Type
PUBM Publishing Model
PMC PubMed Central Identifier
PMID PubMed Unique Identifier
RN Registry Number/EC Number
NM Substance Name
SI Secondary Source ID
SO Source
SFM Space Flight Mission
STAT Status
SB Subset
TI Title
TT Transliterated Title
VI Volume
CON Comment on
CIN Comment in
EIN Erratum in
EFR Erratum for
CRI Corrected and Republished in
CRF Corrected and Republished from
PRIN Partial retraction in
PROF Partial retraction of
RPI Republished in
RPF Republished from
RIN Retraction in
ROF Retraction of
UIN Update in
UOF Update of
SPIN Summary for patients in
ORI Original report in
========= ==============================
"""
def parse(handle):
"""Read Medline records one by one from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing one or more Medline records.
Typical usage::
from Bio import Medline
with open("mymedlinefile") as handle:
records = Medline.parse(handle)
for record in record:
print(record['TI'])
"""
# TODO - Turn that into a working doctest
# These keys point to string values
textkeys = ("ID", "PMID", "SO", "RF", "NI", "JC", "TA", "IS", "CY", "TT",
"CA", "IP", "VI", "DP", "YR", "PG", "LID", "DA", "LR", "OWN",
"STAT", "DCOM", "PUBM", "DEP", "PL", "JID", "SB", "PMC",
"EDAT", "MHDA", "PST", "AB", "AD", "EA", "TI", "JT")
handle = iter(handle)
key = ""
record = Record()
for line in handle:
line = line.rstrip()
if line[:6] == " ": # continuation line
if key == "MH":
# Multi-line MESH term, want to append to last entry in list
record[key][-1] += line[5:] # including space using line[5:]
else:
record[key].append(line[6:])
elif line:
key = line[:4].rstrip()
if key not in record:
record[key] = []
record[key].append(line[6:])
elif record:
# Join each list of strings into one string.
for key in record:
if key in textkeys:
record[key] = " ".join(record[key])
yield record
record = Record()
if record: # catch last one
for key in record:
if key in textkeys:
record[key] = " ".join(record[key])
yield record
def read(handle):
"""Read a single Medline record from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing a Medline record.
Typical usage:
>>> from Bio import Medline
>>> with open("mymedlinefile") as handle:
... record = Medline.read(handle)
... print(record['TI'])
"""
# TODO - Turn that into a working doctest
records = parse(handle)
return next(records)
| gpl-2.0 |
yanheven/horizon | openstack_dashboard/dashboards/project/loadbalancers/tables.py | 16 | 13632 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:loadbalancers:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:loadbalancers:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:loadbalancers:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletevip"
policy_rules = (("network", "delete_vip"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete VIP",
u"Delete VIPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of VIP",
u"Scheduled deletion of VIPs",
count
)
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class DeletePoolLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletepool"
policy_rules = (("network", "delete_pool"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Pool",
u"Delete Pools",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Pool",
u"Scheduled deletion of Pools",
count
)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class DeleteMonitorLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletemonitor"
policy_rules = (("network", "delete_health_monitor"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Monitor",
u"Delete Monitors",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Monitor",
u"Scheduled deletion of Monitors",
count
)
class DeleteMemberLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletemember"
policy_rules = (("network", "delete_member"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Member",
u"Delete Members",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Member",
u"Scheduled deletion of Members",
count
)
class UpdatePoolLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_link_url(self, member):
base_url = reverse("horizon:project:loadbalancers:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_link_url(self, monitor):
base_url = reverse("horizon:project:loadbalancers:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
def get_vip_link(pool):
if pool.vip_id:
return reverse("horizon:project:loadbalancers:vipdetails",
args=(http.urlquote(pool.vip_id),))
else:
return None
class AddPMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:loadbalancers:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:loadbalancers:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class UpdatePoolsRow(tables.Row):
ajax = True
def get_data(self, request, pool_id):
pool = api.lbaas.pool_get(request, pool_id)
try:
vip = api.lbaas.vip_get(request, pool.vip_id)
pool.vip_name = vip.name
except Exception:
pool.vip_name = pool.vip_id
try:
subnet = api.neutron.subnet_get(request, pool.subnet_id)
pool.subnet_name = subnet.cidr
except Exception:
pool.subnet_name = pool.subnet_id
return pool
STATUS_CHOICES = (
("Active", True),
("Down", True),
("Error", False),
)
STATUS_DISPLAY_CHOICES = (
("Active", pgettext_lazy("Current status of a Pool",
u"Active")),
("Down", pgettext_lazy("Current status of a Pool",
u"Down")),
("Error", pgettext_lazy("Current status of a Pool",
u"Error")),
("Created", pgettext_lazy("Current status of a Pool",
u"Created")),
("Pending_Create", pgettext_lazy("Current status of a Pool",
u"Pending Create")),
("Pending_Update", pgettext_lazy("Current status of a Pool",
u"Pending Update")),
("Pending_Delete", pgettext_lazy("Current status of a Pool",
u"Pending Delete")),
("Inactive", pgettext_lazy("Current status of a Pool",
u"Inactive")),
)
class PoolsTable(tables.DataTable):
name = tables.Column("name_or_id",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column('subnet_name', verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
vip_name = tables.Column('vip_name', verbose_name=_("VIP"),
link=get_vip_link)
class Meta(object):
name = "poolstable"
verbose_name = _("Pools")
status_columns = ["status"]
row_class = UpdatePoolsRow
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink)
def get_pool_link(member):
return reverse("horizon:project:loadbalancers:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:loadbalancers:memberdetails",
args=(http.urlquote(member.id),))
class UpdateMemberRow(tables.Row):
ajax = True
def get_data(self, request, member_id):
member = api.lbaas.member_get(request, member_id)
try:
pool = api.lbaas.pool_get(request, member.pool_id)
member.pool_name = pool.name
except Exception:
member.pool_name = member.pool_id
return member
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status',
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "memberstable"
verbose_name = _("Members")
status_columns = ["status"]
row_class = UpdateMemberRow
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:loadbalancers:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
class Meta(object):
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
| apache-2.0 |
achals/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/__init__.py | 1229 | 2323 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| mpl-2.0 |
anggorodewanto/oppia | core/controllers/reader.py | 1 | 21891 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia exploration learner view."""
import json
import logging
import random
import jinja2
from core.controllers import base
from core.domain import classifier_services
from core.domain import collection_services
from core.domain import config_domain
from core.domain import dependency_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import gadget_registry
from core.domain import interaction_registry
from core.domain import rating_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import summary_services
import feconf
import utils
MAX_SYSTEM_RECOMMENDATIONS = 4
DEFAULT_TWITTER_SHARE_MESSAGE_PLAYER = config_domain.ConfigProperty(
'default_twitter_share_message_player', {
'type': 'unicode',
},
'Default text for the Twitter share message for the learner view',
default_value=(
'Check out this interactive lesson from Oppia - a free, open-source '
'learning platform!'))
def require_playable(handler):
"""Decorator that checks if the user can play the given exploration."""
def test_can_play(self, exploration_id, **kwargs):
if exploration_id in feconf.DISABLED_EXPLORATION_IDS:
self.render_template(
'error/disabled_exploration.html', iframe_restriction=None)
return
# Checks if the user for the current session is logged in.
if rights_manager.Actor(self.user_id).can_play(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
return handler(self, exploration_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_play
def classify_string_classifier_rule(state, normalized_answer):
"""Run the classifier if no prediction has been made yet. Currently this
is behind a development flag.
"""
best_matched_answer_group = None
best_matched_answer_group_index = len(state.interaction.answer_groups)
best_matched_rule_spec_index = None
sc = classifier_services.StringClassifier()
training_examples = [
[doc, []] for doc in state.interaction.confirmed_unclassified_answers]
for (answer_group_index, answer_group) in enumerate(
state.interaction.answer_groups):
classifier_rule_spec_index = answer_group.get_classifier_rule_index()
if classifier_rule_spec_index is not None:
classifier_rule_spec = answer_group.rule_specs[
classifier_rule_spec_index]
else:
classifier_rule_spec = None
if classifier_rule_spec is not None:
training_examples.extend([
[doc, [str(answer_group_index)]]
for doc in classifier_rule_spec.inputs['training_data']])
if len(training_examples) > 0:
sc.load_examples(training_examples)
doc_ids = sc.add_examples_for_predicting([normalized_answer])
predicted_label = sc.predict_label_for_doc(doc_ids[0])
if (predicted_label !=
classifier_services.StringClassifier.DEFAULT_LABEL):
predicted_answer_group_index = int(predicted_label)
predicted_answer_group = state.interaction.answer_groups[
predicted_answer_group_index]
for rule_spec in predicted_answer_group.rule_specs:
if rule_spec.rule_type == exp_domain.CLASSIFIER_RULESPEC_STR:
best_matched_rule_spec_index = classifier_rule_spec_index
break
best_matched_answer_group = predicted_answer_group
best_matched_answer_group_index = predicted_answer_group_index
return {
'outcome': best_matched_answer_group.outcome.to_dict(),
'answer_group_index': best_matched_answer_group_index,
'rule_spec_index': best_matched_rule_spec_index,
}
else:
return None
return None
def classify(state, answer):
"""Classify the answer using the string classifier.
This should only be called if the string classifier functionality is
enabled, and the interaction is trainable.
Normalize the answer and classifies the answer if the interaction has a
classifier associated with it. Otherwise, classifies the answer to the
default outcome.
Returns a dict with the following keys:
'outcome': A dict representing the outcome of the answer group matched.
'answer_group_index': An index into the answer groups list indicating
which one was selected as the group which this answer belongs to.
This is equal to the number of answer groups if the default outcome
was matched.
'rule_spec_index': An index into the rule specs list of the matched
answer group which was selected that indicates which rule spec was
matched. This is equal to 0 if the default outcome is selected.
When the default rule is matched, outcome is the default_outcome of the
state's interaction.
"""
assert feconf.ENABLE_STRING_CLASSIFIER
interaction_instance = interaction_registry.Registry.get_interaction_by_id(
state.interaction.id)
normalized_answer = interaction_instance.normalize_answer(answer)
response = None
if interaction_instance.is_string_classifier_trainable:
response = classify_string_classifier_rule(state, normalized_answer)
else:
raise Exception('No classifier found for interaction.')
if response is not None:
return response
elif state.interaction.default_outcome is not None:
return {
'outcome': state.interaction.default_outcome.to_dict(),
'answer_group_index': len(state.interaction.answer_groups),
'classification_certainty': 0.0,
'rule_spec_index': 0
}
raise Exception(
'Something has seriously gone wrong with the exploration. Oppia does '
'not know what to do with this answer. Please contact the '
'exploration owner.')
class ExplorationPage(base.BaseHandler):
"""Page describing a single exploration."""
PAGE_NAME_FOR_CSRF = 'player'
@require_playable
def get(self, exploration_id):
"""Handles GET requests."""
version_str = self.request.get('v')
version = int(version_str) if version_str else None
# Note: this is an optional argument and will be None when the
# exploration is being played outside the context of a collection.
collection_id = self.request.get('collection_id')
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
collection_title = None
if collection_id:
try:
collection = collection_services.get_collection_by_id(
collection_id)
collection_title = collection.title
except Exception as e:
raise self.PageNotFoundException(e)
version = exploration.version
if not rights_manager.Actor(self.user_id).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
is_iframed = (self.request.get('iframed') == 'true')
# TODO(sll): Cache these computations.
gadget_types = exploration.get_gadget_types()
interaction_ids = exploration.get_interaction_ids()
dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
dependency_ids))
gadget_templates = (
gadget_registry.Registry.get_gadget_html(gadget_types))
interaction_templates = (
rte_component_registry.Registry.get_html_for_all_components() +
interaction_registry.Registry.get_interaction_html(
interaction_ids))
self.values.update({
'GADGET_SPECS': gadget_registry.Registry.get_all_specs(),
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'DEFAULT_TWITTER_SHARE_MESSAGE_PLAYER': (
DEFAULT_TWITTER_SHARE_MESSAGE_PLAYER.value),
'additional_angular_modules': additional_angular_modules,
'can_edit': (
bool(self.username) and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id)
),
'dependencies_html': jinja2.utils.Markup(
dependencies_html),
'exploration_title': exploration.title,
'exploration_version': version,
'collection_id': collection_id,
'collection_title': collection_title,
'gadget_templates': jinja2.utils.Markup(gadget_templates),
'iframed': is_iframed,
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'is_private': rights_manager.is_exploration_private(
exploration_id),
# Note that this overwrites the value in base.py.
'meta_name': exploration.title,
# Note that this overwrites the value in base.py.
'meta_description': utils.capitalize_string(exploration.objective),
'nav_mode': feconf.NAV_MODE_EXPLORE,
})
if is_iframed:
self.render_template(
'player/exploration_player.html', iframe_restriction=None)
else:
self.render_template('player/exploration_player.html')
class ExplorationHandler(base.BaseHandler):
"""Provides the initial data for a single exploration."""
def get(self, exploration_id):
"""Populates the data on the individual exploration page."""
version = self.request.get('v')
version = int(version) if version else None
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
self.values.update({
'can_edit': (
self.user_id and
rights_manager.Actor(self.user_id).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id)),
'exploration': exploration.to_player_dict(),
'is_logged_in': bool(self.user_id),
'session_id': utils.generate_new_session_id(),
'version': exploration.version,
})
self.render_json(self.values)
class AnswerSubmittedEventHandler(base.BaseHandler):
"""Tracks a learner submitting an answer."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
old_state_name = self.payload.get('old_state_name')
# The reader's answer.
answer = self.payload.get('answer')
# Parameters associated with the learner.
old_params = self.payload.get('params', {})
old_params['answer'] = answer
# The version of the exploration.
version = self.payload.get('version')
# The answer group and rule spec indexes, which will be used to get
# the rule spec string.
answer_group_index = self.payload.get('answer_group_index')
rule_spec_index = self.payload.get('rule_spec_index')
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
old_interaction = exploration.states[old_state_name].interaction
if answer_group_index == len(old_interaction.answer_groups):
rule_spec_string = exp_domain.DEFAULT_RULESPEC_STR
else:
rule_spec_string = (
old_interaction.answer_groups[answer_group_index].rule_specs[
rule_spec_index].stringify_classified_rule())
old_interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
old_interaction.id))
normalized_answer = old_interaction_instance.normalize_answer(answer)
# TODO(sll): Should this also depend on `params`?
event_services.AnswerSubmissionEventHandler.record(
exploration_id, version, old_state_name, rule_spec_string,
old_interaction_instance.get_stats_log_html(
old_interaction.customization_args, normalized_answer))
self.render_json({})
class StateHitEventHandler(base.BaseHandler):
"""Tracks a learner hitting a new state."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
new_state_name = self.payload.get('new_state_name')
exploration_version = self.payload.get('exploration_version')
session_id = self.payload.get('session_id')
# TODO(sll): why do we not record the value of this anywhere?
client_time_spent_in_secs = self.payload.get( # pylint: disable=unused-variable
'client_time_spent_in_secs')
old_params = self.payload.get('old_params')
# Record the state hit, if it is not the END state.
if new_state_name is not None:
event_services.StateHitEventHandler.record(
exploration_id, exploration_version, new_state_name,
session_id, old_params, feconf.PLAY_TYPE_NORMAL)
else:
logging.error('Unexpected StateHit event for the END state.')
class ClassifyHandler(base.BaseHandler):
"""Stateless handler that performs a classify() operation server-side and
returns the corresponding classification result, which is a dict containing
three keys:
'outcome': A dict representing the outcome of the answer group matched.
'answer_group_index': The index of the matched answer group.
'rule_spec_index': The index of the matched rule spec in the matched
answer group.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, unused_exploration_id):
"""Handle POST requests.
Note: unused_exploration_id is needed because @require_playable needs 2
arguments.
"""
# A domain object representing the old state.
old_state = exp_domain.State.from_dict(self.payload.get('old_state'))
# The learner's raw answer.
answer = self.payload.get('answer')
# The learner's parameter values.
params = self.payload.get('params')
params['answer'] = answer
self.render_json(classify(old_state, answer))
class ReaderFeedbackHandler(base.BaseHandler):
"""Submits feedback from the reader."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
state_name = self.payload.get('state_name')
subject = self.payload.get('subject', 'Feedback from a learner')
feedback = self.payload.get('feedback')
include_author = self.payload.get('include_author')
feedback_services.create_thread(
exploration_id,
state_name,
self.user_id if include_author else None,
subject,
feedback)
self.render_json(self.values)
class ExplorationStartEventHandler(base.BaseHandler):
"""Tracks a learner starting an exploration."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
event_services.StartExplorationEventHandler.record(
exploration_id, self.payload.get('version'),
self.payload.get('state_name'),
self.payload.get('session_id'),
self.payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
class ExplorationCompleteEventHandler(base.BaseHandler):
"""Tracks a learner completing an exploration.
The state name recorded should be a state with a terminal interaction.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
# This will be None if the exploration is not being played within the
# context of a collection.
collection_id = self.payload.get('collection_id')
user_id = self.user_id
event_services.CompleteExplorationEventHandler.record(
exploration_id,
self.payload.get('version'),
self.payload.get('state_name'),
self.payload.get('session_id'),
self.payload.get('client_time_spent_in_secs'),
self.payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
if user_id and collection_id:
collection_services.record_played_exploration_in_collection_context(
user_id, collection_id, exploration_id)
class ExplorationMaybeLeaveHandler(base.BaseHandler):
"""Tracks a learner leaving an exploration without completing it.
The state name recorded should be a state with a non-terminal interaction.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_playable
def post(self, exploration_id):
"""Handles POST requests."""
event_services.MaybeLeaveExplorationEventHandler.record(
exploration_id,
self.payload.get('version'),
self.payload.get('state_name'),
self.payload.get('session_id'),
self.payload.get('client_time_spent_in_secs'),
self.payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
class RatingHandler(base.BaseHandler):
"""Records the rating of an exploration submitted by a user.
Note that this represents ratings submitted on completion of the
exploration.
"""
PAGE_NAME_FOR_CSRF = 'player'
@require_playable
def get(self, exploration_id):
"""Handles GET requests."""
self.values.update({
'overall_ratings':
rating_services.get_overall_ratings_for_exploration(
exploration_id),
'user_rating': (
rating_services.get_user_specific_rating_for_exploration(
self.user_id, exploration_id) if self.user_id else None)
})
self.render_json(self.values)
@base.require_user
def put(self, exploration_id):
"""Handles PUT requests for submitting ratings at the end of an
exploration.
"""
user_rating = self.payload.get('user_rating')
rating_services.assign_rating_to_exploration(
self.user_id, exploration_id, user_rating)
self.render_json({})
class RecommendationsHandler(base.BaseHandler):
"""Provides recommendations to be displayed at the end of explorations.
Which explorations are provided depends on whether the exploration was
played within the context of a collection and whether the user is logged in.
If both are true, then the explorations are suggested from the collection,
if there are upcoming explorations for the learner to complete.
"""
@require_playable
def get(self, exploration_id):
"""Handles GET requests."""
collection_id = self.request.get('collection_id')
include_system_recommendations = self.request.get(
'include_system_recommendations')
try:
author_recommended_exp_ids = json.loads(self.request.get(
'stringified_author_recommended_ids'))
except Exception:
raise self.PageNotFoundException
auto_recommended_exp_ids = []
if self.user_id and collection_id:
next_exp_ids_in_collection = (
collection_services.get_next_exploration_ids_to_complete_by_user( # pylint: disable=line-too-long
self.user_id, collection_id))
auto_recommended_exp_ids = list(
set(next_exp_ids_in_collection) -
set(author_recommended_exp_ids))
elif include_system_recommendations:
system_chosen_exp_ids = (
recommendations_services.get_exploration_recommendations(
exploration_id))
filtered_exp_ids = list(
set(system_chosen_exp_ids) -
set(author_recommended_exp_ids))
auto_recommended_exp_ids = random.sample(
filtered_exp_ids,
min(MAX_SYSTEM_RECOMMENDATIONS, len(filtered_exp_ids)))
self.values.update({
'summaries': (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
author_recommended_exp_ids + auto_recommended_exp_ids)),
})
self.render_json(self.values)
| apache-2.0 |
halvertoluke/edx-platform | lms/djangoapps/courseware/features/annotatable.py | 158 | 3492 | import textwrap
from lettuce import world, steps
from nose.tools import assert_in, assert_equals
from common import i_am_registered_for_the_course, visit_scenario_item
DATA_TEMPLATE = textwrap.dedent("""\
<annotatable>
<instructions>Instruction text</instructions>
<p>{}</p>
</annotatable>
""")
ANNOTATION_TEMPLATE = textwrap.dedent("""\
Before {0}.
<annotation title="region {0}" body="Comment {0}" highlight="yellow" problem="{0}">
Region Contents {0}
</annotation>
After {0}.
""")
PROBLEM_TEMPLATE = textwrap.dedent("""\
<problem max_attempts="1" weight="">
<annotationresponse>
<annotationinput>
<title>Question {number}</title>
<text>Region Contents {number}</text>
<comment>What number is this region?</comment>
<comment_prompt>Type your response below:</comment_prompt>
<tag_prompt>What number is this region?</tag_prompt>
<options>
{options}
</options>
</annotationinput>
</annotationresponse>
<solution>
This problem is checking region {number}
</solution>
</problem>
""")
OPTION_TEMPLATE = """<option choice="{correctness}">{number}</option>"""
def _correctness(choice, target):
if choice == target:
return "correct"
elif abs(choice - target) == 1:
return "partially-correct"
else:
return "incorrect"
@steps
class AnnotatableSteps(object):
def __init__(self):
self.annotations_count = None
self.active_problem = None
def define_component(self, step, count):
r"""that a course has an annotatable component with (?P<count>\d+) annotations$"""
count = int(count)
coursenum = 'test_course'
i_am_registered_for_the_course(step, coursenum)
world.scenario_dict['ANNOTATION_VERTICAL'] = world.ItemFactory(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name="Test Annotation Vertical"
)
world.scenario_dict['ANNOTATABLE'] = world.ItemFactory(
parent_location=world.scenario_dict['ANNOTATION_VERTICAL'].location,
category='annotatable',
display_name="Test Annotation Module",
data=DATA_TEMPLATE.format("\n".join(ANNOTATION_TEMPLATE.format(i) for i in xrange(count)))
)
self.annotations_count = count
def view_component(self, step):
r"""I view the annotatable component$"""
visit_scenario_item('ANNOTATABLE')
def check_rendered(self, step):
r"""the annotatable component has rendered$"""
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Annotatable]").data("initialized")')
annotatable_text = world.css_find('.xblock-student_view[data-type=Annotatable]').first.text
assert_in("Instruction text", annotatable_text)
for i in xrange(self.annotations_count):
assert_in("Region Contents {}".format(i), annotatable_text)
def count_passages(self, step, count):
r"""the annotatable component has (?P<count>\d+) highlighted passages$"""
count = int(count)
assert_equals(len(world.css_find('.annotatable-span')), count)
assert_equals(len(world.css_find('.annotatable-span.highlight')), count)
assert_equals(len(world.css_find('.annotatable-span.highlight-yellow')), count)
# This line is required by @steps in order to actually bind the step
# regexes
AnnotatableSteps()
| agpl-3.0 |
bbockelm/root | interpreter/llvm/src/utils/DSAextract.py | 124 | 3350 | #! /usr/bin/python
#this is a script to extract given named nodes from a dot file, with
#the associated edges. An edge is kept iff for edge x -> y
# x and y are both nodes specified to be kept.
#known issues: if a line contains '->' and is not an edge line
#problems will occur. If node labels do not begin with
#Node this also will not work. Since this is designed to work
#on DSA dot output and not general dot files this is ok.
#If you want to use this on other files rename the node labels
#to Node[.*] with a script or something. This also relies on
#the length of a node name being 13 characters (as it is in all
#DSA dot output files)
#Note that the name of the node can be any substring of the actual
#name in the dot file. Thus if you say specify COLLAPSED
#as a parameter this script will pull out all COLLAPSED
#nodes in the file
#Specifying escape characters in the name like \n also will not work,
#as Python
#will make it \\n, I'm not really sure how to fix this
#currently the script prints the names it is searching for
#to STDOUT, so you can check to see if they are what you intend
import re
import string
import sys
if len(sys.argv) < 3:
print 'usage is ./DSAextract <dot_file_to_modify> \
<output_file> [list of nodes to extract]'
#open the input file
input = open(sys.argv[1], 'r')
#construct a set of node names
node_name_set = set()
for name in sys.argv[3:]:
node_name_set |= set([name])
#construct a list of compiled regular expressions from the
#node_name_set
regexp_list = []
for name in node_name_set:
regexp_list.append(re.compile(name))
#used to see what kind of line we are on
nodeexp = re.compile('Node')
#used to check to see if the current line is an edge line
arrowexp = re.compile('->')
node_set = set()
#read the file one line at a time
buffer = input.readline()
while buffer != '':
#filter out the unnecessary checks on all the edge lines
if not arrowexp.search(buffer):
#check to see if this is a node we are looking for
for regexp in regexp_list:
#if this name is for the current node, add the dot variable name
#for the node (it will be Node(hex number)) to our set of nodes
if regexp.search(buffer):
node_set |= set([re.split('\s+',buffer,2)[1]])
break
buffer = input.readline()
#test code
#print '\n'
print node_name_set
#print node_set
#open the output file
output = open(sys.argv[2], 'w')
#start the second pass over the file
input = open(sys.argv[1], 'r')
buffer = input.readline()
while buffer != '':
#there are three types of lines we are looking for
#1) node lines, 2) edge lines 3) support lines (like page size, etc)
#is this an edge line?
#note that this is no completely robust, if a none edge line
#for some reason contains -> it will be missidentified
#hand edit the file if this happens
if arrowexp.search(buffer):
#check to make sure that both nodes are in the node list
#if they are print this to output
nodes = arrowexp.split(buffer)
nodes[0] = string.strip(nodes[0])
nodes[1] = string.strip(nodes[1])
if nodes[0][:13] in node_set and \
nodes[1][:13] in node_set:
output.write(buffer)
elif nodeexp.search(buffer): #this is a node line
node = re.split('\s+', buffer,2)[1]
if node in node_set:
output.write(buffer)
else: #this is a support line
output.write(buffer)
buffer = input.readline()
| lgpl-2.1 |
kalxas/QGIS | python/plugins/processing/algs/gdal/sieve.py | 15 | 5672 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sieve.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class sieve(GdalAlgorithm):
INPUT = 'INPUT'
THRESHOLD = 'THRESHOLD'
EIGHT_CONNECTEDNESS = 'EIGHT_CONNECTEDNESS'
NO_MASK = 'NO_MASK'
MASK_LAYER = 'MASK_LAYER'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.THRESHOLD,
self.tr('Threshold'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=10))
self.addParameter(QgsProcessingParameterBoolean(self.EIGHT_CONNECTEDNESS,
self.tr('Use 8-connectedness'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_MASK,
self.tr('Do not use the default validity mask for the input band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterRasterLayer(self.MASK_LAYER,
self.tr('Validity mask'),
optional=True))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Sieved')))
def name(self):
return 'sieve'
def displayName(self):
return self.tr('Sieve')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'sieve.png'))
def commandName(self):
return 'gdal_sieve'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = [
'-st',
str(self.parameterAsInt(parameters, self.THRESHOLD, context)),
]
if self.parameterAsBoolean(parameters, self.EIGHT_CONNECTEDNESS, context):
arguments.append('-8')
else:
arguments.append('-4')
if self.parameterAsBoolean(parameters, self.NO_MASK, context):
arguments.append('-nomask')
mask = self.parameterAsRasterLayer(parameters, self.MASK_LAYER, context)
if mask:
arguments.append('-mask')
arguments.append(mask.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
arguments.append(out)
return [self.commandName() + ('.bat' if isWindows() else '.py'), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
lepricon49/CouchPotatoServer | libs/enzyme/mkv.py | 17 | 30471 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
# Copyright 2003-2006 Jason Tackaberry <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from exceptions import ParseError
from struct import unpack
import core
import logging
import re
__all__ = ['Parser']
# get logging object
log = logging.getLogger(__name__)
# Main IDs for the Matroska streams
MATROSKA_VIDEO_TRACK = 0x01
MATROSKA_AUDIO_TRACK = 0x02
MATROSKA_SUBTITLES_TRACK = 0x11
MATROSKA_HEADER_ID = 0x1A45DFA3
MATROSKA_TRACKS_ID = 0x1654AE6B
MATROSKA_CUES_ID = 0x1C53BB6B
MATROSKA_SEGMENT_ID = 0x18538067
MATROSKA_SEGMENT_INFO_ID = 0x1549A966
MATROSKA_CLUSTER_ID = 0x1F43B675
MATROSKA_VOID_ID = 0xEC
MATROSKA_CRC_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_DURATION_ID = 0x4489
MATROSKA_CRC32_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_MUXING_APP_ID = 0x4D80
MATROSKA_WRITING_APP_ID = 0x5741
MATROSKA_CODEC_ID = 0x86
MATROSKA_CODEC_PRIVATE_ID = 0x63A2
MATROSKA_FRAME_DURATION_ID = 0x23E383
MATROSKA_VIDEO_SETTINGS_ID = 0xE0
MATROSKA_VIDEO_WIDTH_ID = 0xB0
MATROSKA_VIDEO_HEIGHT_ID = 0xBA
MATROSKA_VIDEO_INTERLACED_ID = 0x9A
MATROSKA_VIDEO_DISPLAY_WIDTH_ID = 0x54B0
MATROSKA_VIDEO_DISPLAY_HEIGHT_ID = 0x54BA
MATROSKA_AUDIO_SETTINGS_ID = 0xE1
MATROSKA_AUDIO_SAMPLERATE_ID = 0xB5
MATROSKA_AUDIO_CHANNELS_ID = 0x9F
MATROSKA_TRACK_UID_ID = 0x73C5
MATROSKA_TRACK_NUMBER_ID = 0xD7
MATROSKA_TRACK_TYPE_ID = 0x83
MATROSKA_TRACK_LANGUAGE_ID = 0x22B59C
MATROSKA_TRACK_OFFSET = 0x537F
MATROSKA_TRACK_FLAG_DEFAULT_ID = 0x88
MATROSKA_TRACK_FLAG_ENABLED_ID = 0xB9
MATROSKA_TITLE_ID = 0x7BA9
MATROSKA_DATE_UTC_ID = 0x4461
MATROSKA_NAME_ID = 0x536E
MATROSKA_CHAPTERS_ID = 0x1043A770
MATROSKA_CHAPTER_UID_ID = 0x73C4
MATROSKA_EDITION_ENTRY_ID = 0x45B9
MATROSKA_CHAPTER_ATOM_ID = 0xB6
MATROSKA_CHAPTER_TIME_START_ID = 0x91
MATROSKA_CHAPTER_TIME_END_ID = 0x92
MATROSKA_CHAPTER_FLAG_ENABLED_ID = 0x4598
MATROSKA_CHAPTER_DISPLAY_ID = 0x80
MATROSKA_CHAPTER_LANGUAGE_ID = 0x437C
MATROSKA_CHAPTER_STRING_ID = 0x85
MATROSKA_ATTACHMENTS_ID = 0x1941A469
MATROSKA_ATTACHED_FILE_ID = 0x61A7
MATROSKA_FILE_DESC_ID = 0x467E
MATROSKA_FILE_NAME_ID = 0x466E
MATROSKA_FILE_MIME_TYPE_ID = 0x4660
MATROSKA_FILE_DATA_ID = 0x465C
MATROSKA_SEEKHEAD_ID = 0x114D9B74
MATROSKA_SEEK_ID = 0x4DBB
MATROSKA_SEEKID_ID = 0x53AB
MATROSKA_SEEK_POSITION_ID = 0x53AC
MATROSKA_TAGS_ID = 0x1254C367
MATROSKA_TAG_ID = 0x7373
MATROSKA_TARGETS_ID = 0x63C0
MATROSKA_TARGET_TYPE_VALUE_ID = 0x68CA
MATROSKA_TARGET_TYPE_ID = 0x63CA
MATRSOKA_TAGS_TRACK_UID_ID = 0x63C5
MATRSOKA_TAGS_EDITION_UID_ID = 0x63C9
MATRSOKA_TAGS_CHAPTER_UID_ID = 0x63C4
MATRSOKA_TAGS_ATTACHMENT_UID_ID = 0x63C6
MATROSKA_SIMPLE_TAG_ID = 0x67C8
MATROSKA_TAG_NAME_ID = 0x45A3
MATROSKA_TAG_LANGUAGE_ID = 0x447A
MATROSKA_TAG_STRING_ID = 0x4487
MATROSKA_TAG_BINARY_ID = 0x4485
# See mkv spec for details:
# http://www.matroska.org/technical/specs/index.html
# Map to convert to well known codes
# http://haali.cs.msu.ru/mkv/codecs.pdf
FOURCCMap = {
'V_THEORA': 'THEO',
'V_SNOW': 'SNOW',
'V_MPEG4/ISO/ASP': 'MP4V',
'V_MPEG4/ISO/AVC': 'AVC1',
'V_MPEGH/ISO/HEVC': 'HEVC',
'A_AC3': 0x2000,
'A_MPEG/L3': 0x0055,
'A_MPEG/L2': 0x0050,
'A_MPEG/L1': 0x0050,
'A_DTS': 0x2001,
'A_PCM/INT/LIT': 0x0001,
'A_PCM/FLOAT/IEEE': 0x003,
'A_TTA1': 0x77a1,
'A_WAVPACK4': 0x5756,
'A_VORBIS': 0x6750,
'A_FLAC': 0xF1AC,
'A_AAC': 0x00ff,
'A_AAC/': 0x00ff
}
def matroska_date_to_datetime(date):
"""
Converts a date in Matroska's date format to a python datetime object.
Returns the given date string if it could not be converted.
"""
# From the specs:
# The fields with dates should have the following format: YYYY-MM-DD
# HH:MM:SS.MSS [...] To store less accuracy, you remove items starting
# from the right. To store only the year, you would use, "2004". To store
# a specific day such as May 1st, 2003, you would use "2003-05-01".
format = re.split(r'([-:. ])', '%Y-%m-%d %H:%M:%S.%f')
while format:
try:
return datetime.strptime(date, ''.join(format))
except ValueError:
format = format[:-2]
return date
def matroska_bps_to_bitrate(bps):
"""
Tries to convert a free-form bps string into a bitrate (bits per second).
"""
m = re.search('([\d.]+)\s*(\D.*)', bps)
if m:
bps, suffix = m.groups()
if 'kbit' in suffix:
return float(bps) * 1024
elif 'kbyte' in suffix:
return float(bps) * 1024 * 8
elif 'byte' in suffix:
return float(bps) * 8
elif 'bps' in suffix or 'bit' in suffix:
return float(bps)
if bps.replace('.', '').isdigit():
if float(bps) < 30000:
# Assume kilobits and convert to bps
return float(bps) * 1024
return float(bps)
# Used to convert the official matroska tag names (only lower-cased) to core
# attributes. tag name -> attr, filter
TAGS_MAP = {
# From Media core
u'title': ('title', None),
u'subtitle': ('caption', None),
u'comment': ('comment', None),
u'url': ('url', None),
u'artist': ('artist', None),
u'keywords': ('keywords', lambda s: [word.strip() for word in s.split(',')]),
u'composer_nationality': ('country', None),
u'date_released': ('datetime', None),
u'date_recorded': ('datetime', None),
u'date_written': ('datetime', None),
# From Video core
u'encoder': ('encoder', None),
u'bps': ('bitrate', matroska_bps_to_bitrate),
u'part_number': ('trackno', int),
u'total_parts': ('trackof', int),
u'copyright': ('copyright', None),
u'genre': ('genre', None),
u'actor': ('actors', None),
u'written_by': ('writer', None),
u'producer': ('producer', None),
u'production_studio': ('studio', None),
u'law_rating': ('rating', None),
u'summary': ('summary', None),
u'synopsis': ('synopsis', None),
}
class EbmlEntity:
"""
This is class that is responsible to handle one Ebml entity as described in
the Matroska/Ebml spec
"""
def __init__(self, inbuf):
# Compute the EBML id
# Set the CRC len to zero
self.crc_len = 0
# Now loop until we find an entity without CRC
try:
self.build_entity(inbuf)
except IndexError:
raise ParseError()
while self.get_id() == MATROSKA_CRC32_ID:
self.crc_len += self.get_total_len()
inbuf = inbuf[self.get_total_len():]
self.build_entity(inbuf)
def build_entity(self, inbuf):
self.compute_id(inbuf)
if self.id_len == 0:
log.error(u'EBML entity not found, bad file format')
raise ParseError()
self.entity_len, self.len_size = self.compute_len(inbuf[self.id_len:])
self.entity_data = inbuf[self.get_header_len() : self.get_total_len()]
self.ebml_length = self.entity_len
self.entity_len = min(len(self.entity_data), self.entity_len)
# if the data size is 8 or less, it could be a numeric value
self.value = 0
if self.entity_len <= 8:
for pos, shift in zip(range(self.entity_len), range((self.entity_len - 1) * 8, -1, -8)):
self.value |= ord(self.entity_data[pos]) << shift
def add_data(self, data):
maxlen = self.ebml_length - len(self.entity_data)
if maxlen <= 0:
return
self.entity_data += data[:maxlen]
self.entity_len = len(self.entity_data)
def compute_id(self, inbuf):
self.id_len = 0
if len(inbuf) < 1:
return 0
first = ord(inbuf[0])
if first & 0x80:
self.id_len = 1
self.entity_id = first
elif first & 0x40:
if len(inbuf) < 2:
return 0
self.id_len = 2
self.entity_id = ord(inbuf[0]) << 8 | ord(inbuf[1])
elif first & 0x20:
if len(inbuf) < 3:
return 0
self.id_len = 3
self.entity_id = (ord(inbuf[0]) << 16) | (ord(inbuf[1]) << 8) | \
(ord(inbuf[2]))
elif first & 0x10:
if len(inbuf) < 4:
return 0
self.id_len = 4
self.entity_id = (ord(inbuf[0]) << 24) | (ord(inbuf[1]) << 16) | \
(ord(inbuf[2]) << 8) | (ord(inbuf[3]))
self.entity_str = inbuf[0:self.id_len]
def compute_len(self, inbuf):
if not inbuf:
return 0, 0
i = num_ffs = 0
len_mask = 0x80
len = ord(inbuf[0])
while not len & len_mask:
i += 1
len_mask >>= 1
if i >= 8:
return 0, 0
len &= len_mask - 1
if len == len_mask - 1:
num_ffs += 1
for p in range(i):
len = (len << 8) | ord(inbuf[p + 1])
if len & 0xff == 0xff:
num_ffs += 1
if num_ffs == i + 1:
len = 0
return len, i + 1
def get_crc_len(self):
return self.crc_len
def get_value(self):
return self.value
def get_float_value(self):
if len(self.entity_data) == 4:
return unpack('!f', self.entity_data)[0]
elif len(self.entity_data) == 8:
return unpack('!d', self.entity_data)[0]
return 0.0
def get_data(self):
return self.entity_data
def get_utf8(self):
return unicode(self.entity_data, 'utf-8', 'replace')
def get_str(self):
return unicode(self.entity_data, 'ascii', 'replace')
def get_id(self):
return self.entity_id
def get_str_id(self):
return self.entity_str
def get_len(self):
return self.entity_len
def get_total_len(self):
return self.entity_len + self.id_len + self.len_size
def get_header_len(self):
return self.id_len + self.len_size
class Matroska(core.AVContainer):
"""
Matroska video and audio parser. If at least one video stream is
detected it will set the type to MEDIA_AV.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.file = file
# Read enough that we're likely to get the full seekhead (FIXME: kludge)
buffer = file.read(2000)
if len(buffer) == 0:
# Regular File end
raise ParseError()
# Check the Matroska header
header = EbmlEntity(buffer)
if header.get_id() != MATROSKA_HEADER_ID:
raise ParseError()
log.debug(u'HEADER ID found %08X' % header.get_id())
self.mime = 'video/x-matroska'
self.type = 'Matroska'
self.has_idx = False
self.objects_by_uid = {}
# Now get the segment
self.segment = segment = EbmlEntity(buffer[header.get_total_len():])
# Record file offset of segment data for seekheads
self.segment.offset = header.get_total_len() + segment.get_header_len()
if segment.get_id() != MATROSKA_SEGMENT_ID:
log.debug(u'SEGMENT ID not found %08X' % segment.get_id())
return
log.debug(u'SEGMENT ID found %08X' % segment.get_id())
try:
for elem in self.process_one_level(segment):
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
self.process_elem(elem)
except ParseError:
pass
if not self.has_idx:
log.warning(u'File has no index')
self._set('corrupt', True)
def process_elem(self, elem):
elem_id = elem.get_id()
log.debug(u'BEGIN: process element %r' % hex(elem_id))
if elem_id == MATROSKA_SEGMENT_INFO_ID:
duration = 0
scalecode = 1000000.0
for ielem in self.process_one_level(elem):
ielem_id = ielem.get_id()
if ielem_id == MATROSKA_TIMECODESCALE_ID:
scalecode = ielem.get_value()
elif ielem_id == MATROSKA_DURATION_ID:
duration = ielem.get_float_value()
elif ielem_id == MATROSKA_TITLE_ID:
self.title = ielem.get_utf8()
elif ielem_id == MATROSKA_DATE_UTC_ID:
timestamp = unpack('!q', ielem.get_data())[0] / 10.0 ** 9
# Date is offset 2001-01-01 00:00:00 (timestamp 978307200.0)
self.timestamp = int(timestamp + 978307200)
self.length = duration * scalecode / 1000000000.0
elif elem_id == MATROSKA_TRACKS_ID:
self.process_tracks(elem)
elif elem_id == MATROSKA_CHAPTERS_ID:
self.process_chapters(elem)
elif elem_id == MATROSKA_ATTACHMENTS_ID:
self.process_attachments(elem)
elif elem_id == MATROSKA_SEEKHEAD_ID:
self.process_seekhead(elem)
elif elem_id == MATROSKA_TAGS_ID:
self.process_tags(elem)
elif elem_id == MATROSKA_CUES_ID:
self.has_idx = True
log.debug(u'END: process element %r' % hex(elem_id))
return True
def process_seekhead(self, elem):
for seek_elem in self.process_one_level(elem):
if seek_elem.get_id() != MATROSKA_SEEK_ID:
continue
for sub_elem in self.process_one_level(seek_elem):
if sub_elem.get_id() == MATROSKA_SEEKID_ID:
if sub_elem.get_value() == MATROSKA_CLUSTER_ID:
# Not interested in these.
return
elif sub_elem.get_id() == MATROSKA_SEEK_POSITION_ID:
self.file.seek(self.segment.offset + sub_elem.get_value())
buffer = self.file.read(100)
try:
elem = EbmlEntity(buffer)
except ParseError:
continue
# Fetch all data necessary for this element.
elem.add_data(self.file.read(elem.ebml_length))
self.process_elem(elem)
def process_tracks(self, tracks):
tracksbuf = tracks.get_data()
index = 0
while index < tracks.get_len():
trackelem = EbmlEntity(tracksbuf[index:])
log.debug (u'ELEMENT %X found' % trackelem.get_id())
self.process_track(trackelem)
index += trackelem.get_total_len() + trackelem.get_crc_len()
def process_one_level(self, item):
buf = item.get_data()
index = 0
while index < item.get_len():
if len(buf[index:]) == 0:
break
elem = EbmlEntity(buf[index:])
yield elem
index += elem.get_total_len() + elem.get_crc_len()
def set_track_defaults(self, track):
track.language = 'eng'
def process_track(self, track):
# Collapse generator into a list since we need to iterate over it
# twice.
elements = [x for x in self.process_one_level(track)]
track_type = [x.get_value() for x in elements if x.get_id() == MATROSKA_TRACK_TYPE_ID]
if not track_type:
log.debug(u'Bad track: no type id found')
return
track_type = track_type[0]
track = None
if track_type == MATROSKA_VIDEO_TRACK:
log.debug(u'Video track found')
track = self.process_video_track(elements)
elif track_type == MATROSKA_AUDIO_TRACK:
log.debug(u'Audio track found')
track = self.process_audio_track(elements)
elif track_type == MATROSKA_SUBTITLES_TRACK:
log.debug(u'Subtitle track found')
track = core.Subtitle()
self.set_track_defaults(track)
track.id = len(self.subtitles)
self.subtitles.append(track)
for elem in elements:
self.process_track_common(elem, track)
def process_track_common(self, elem, track):
elem_id = elem.get_id()
if elem_id == MATROSKA_TRACK_LANGUAGE_ID:
track.language = elem.get_str()
log.debug(u'Track language found: %r' % track.language)
elif elem_id == MATROSKA_NAME_ID:
track.title = elem.get_utf8()
elif elem_id == MATROSKA_TRACK_NUMBER_ID:
track.trackno = elem.get_value()
elif elem_id == MATROSKA_TRACK_FLAG_ENABLED_ID:
track.enabled = bool(elem.get_value())
elif elem_id == MATROSKA_TRACK_FLAG_DEFAULT_ID:
track.default = bool(elem.get_value())
elif elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_CODEC_PRIVATE_ID:
track.codec_private = elem.get_data()
elif elem_id == MATROSKA_TRACK_UID_ID:
self.objects_by_uid[elem.get_value()] = track
def process_video_track(self, elements):
track = core.VideoStream()
# Defaults
track.codec = u'Unknown'
track.fps = 0
self.set_track_defaults(track)
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_FRAME_DURATION_ID:
try:
track.fps = 1 / (pow(10, -9) * (elem.get_value()))
except ZeroDivisionError:
pass
elif elem_id == MATROSKA_VIDEO_SETTINGS_ID:
d_width = d_height = None
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_VIDEO_WIDTH_ID:
track.width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_HEIGHT_ID:
track.height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_WIDTH_ID:
d_width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_HEIGHT_ID:
d_height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_INTERLACED_ID:
value = int(settings_elem.get_value())
self._set('interlaced', value)
if None not in [d_width, d_height]:
track.aspect = float(d_width) / d_height
else:
self.process_track_common(elem, track)
# convert codec information
# http://haali.cs.msu.ru/mkv/codecs.pdf
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.endswith('FOURCC') and len(track.codec_private or '') == 40:
track.codec = track.codec_private[16:20]
elif track.codec.startswith('V_REAL/'):
track.codec = track.codec[7:]
elif track.codec.startswith('V_'):
# FIXME: add more video codecs here
track.codec = track.codec[2:]
track.id = len(self.video)
self.video.append(track)
return track
def process_audio_track(self, elements):
track = core.AudioStream()
track.codec = u'Unknown'
self.set_track_defaults(track)
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_AUDIO_SETTINGS_ID:
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_AUDIO_SAMPLERATE_ID:
track.samplerate = settings_elem.get_float_value()
elif settings_elem_id == MATROSKA_AUDIO_CHANNELS_ID:
track.channels = settings_elem.get_value()
else:
self.process_track_common(elem, track)
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.startswith('A_'):
track.codec = track.codec[2:]
track.id = len(self.audio)
self.audio.append(track)
return track
def process_chapters(self, chapters):
elements = self.process_one_level(chapters)
for elem in elements:
if elem.get_id() == MATROSKA_EDITION_ENTRY_ID:
buf = elem.get_data()
index = 0
while index < elem.get_len():
sub_elem = EbmlEntity(buf[index:])
if sub_elem.get_id() == MATROSKA_CHAPTER_ATOM_ID:
self.process_chapter_atom(sub_elem)
index += sub_elem.get_total_len() + sub_elem.get_crc_len()
def process_chapter_atom(self, atom):
elements = self.process_one_level(atom)
chap = core.Chapter()
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CHAPTER_TIME_START_ID:
# Scale timecode to seconds (float)
chap.pos = elem.get_value() / 1000000 / 1000.0
elif elem_id == MATROSKA_CHAPTER_FLAG_ENABLED_ID:
chap.enabled = elem.get_value()
elif elem_id == MATROSKA_CHAPTER_DISPLAY_ID:
# Matroska supports multiple (chapter name, language) pairs for
# each chapter, so chapter names can be internationalized. This
# logic will only take the last one in the list.
for display_elem in self.process_one_level(elem):
if display_elem.get_id() == MATROSKA_CHAPTER_STRING_ID:
chap.name = display_elem.get_utf8()
elif elem_id == MATROSKA_CHAPTER_UID_ID:
self.objects_by_uid[elem.get_value()] = chap
log.debug(u'Chapter %r found', chap.name)
chap.id = len(self.chapters)
self.chapters.append(chap)
def process_attachments(self, attachments):
buf = attachments.get_data()
index = 0
while index < attachments.get_len():
elem = EbmlEntity(buf[index:])
if elem.get_id() == MATROSKA_ATTACHED_FILE_ID:
self.process_attachment(elem)
index += elem.get_total_len() + elem.get_crc_len()
def process_attachment(self, attachment):
elements = self.process_one_level(attachment)
name = desc = mimetype = ""
data = None
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_FILE_NAME_ID:
name = elem.get_utf8()
elif elem_id == MATROSKA_FILE_DESC_ID:
desc = elem.get_utf8()
elif elem_id == MATROSKA_FILE_MIME_TYPE_ID:
mimetype = elem.get_data()
elif elem_id == MATROSKA_FILE_DATA_ID:
data = elem.get_data()
# Right now we only support attachments that could be cover images.
# Make a guess to see if this attachment is a cover image.
if mimetype.startswith("image/") and u"cover" in (name + desc).lower() and data:
self.thumbnail = data
log.debug(u'Attachment %r found' % name)
def process_tags(self, tags):
# Tags spec: http://www.matroska.org/technical/specs/tagging/index.html
# Iterate over Tags children. Tags element children is a
# Tag element (whose children are SimpleTags) and a Targets element
# whose children specific what objects the tags apply to.
for tag_elem in self.process_one_level(tags):
# Start a new dict to hold all SimpleTag elements.
tags_dict = core.Tags()
# A list of target uids this tags dict applies too. If empty,
# tags are global.
targets = []
for sub_elem in self.process_one_level(tag_elem):
if sub_elem.get_id() == MATROSKA_SIMPLE_TAG_ID:
self.process_simple_tag(sub_elem, tags_dict)
elif sub_elem.get_id() == MATROSKA_TARGETS_ID:
# Targets element: if there is no uid child (track uid,
# chapter uid, etc.) then the tags dict applies to the
# whole file (top-level Media object).
for target_elem in self.process_one_level(sub_elem):
target_elem_id = target_elem.get_id()
if target_elem_id in (MATRSOKA_TAGS_TRACK_UID_ID, MATRSOKA_TAGS_EDITION_UID_ID,
MATRSOKA_TAGS_CHAPTER_UID_ID, MATRSOKA_TAGS_ATTACHMENT_UID_ID):
targets.append(target_elem.get_value())
elif target_elem_id == MATROSKA_TARGET_TYPE_VALUE_ID:
# Target types not supported for now. (Unclear how this
# would fit with kaa.metadata.)
pass
if targets:
# Assign tags to all listed uids
for target in targets:
try:
self.objects_by_uid[target].tags.update(tags_dict)
self.tags_to_attributes(self.objects_by_uid[target], tags_dict)
except KeyError:
log.warning(u'Tags assigned to unknown/unsupported target uid %d', target)
else:
self.tags.update(tags_dict)
self.tags_to_attributes(self, tags_dict)
def process_simple_tag(self, simple_tag_elem, tags_dict):
"""
Returns a dict representing the Tag element.
"""
name = lang = value = children = None
binary = False
for elem in self.process_one_level(simple_tag_elem):
elem_id = elem.get_id()
if elem_id == MATROSKA_TAG_NAME_ID:
name = elem.get_utf8().lower()
elif elem_id == MATROSKA_TAG_STRING_ID:
value = elem.get_utf8()
elif elem_id == MATROSKA_TAG_BINARY_ID:
value = elem.get_data()
binary = True
elif elem_id == MATROSKA_TAG_LANGUAGE_ID:
lang = elem.get_utf8()
elif elem_id == MATROSKA_SIMPLE_TAG_ID:
if children is None:
children = core.Tags()
self.process_simple_tag(elem, children)
if children:
# Convert ourselves to a Tags object.
children.value = value
children.langcode = lang
value = children
else:
if name.startswith('date_'):
# Try to convert date to a datetime object.
value = matroska_date_to_datetime(value)
value = core.Tag(value, lang, binary)
if name in tags_dict:
# Multiple items of this tag name.
if not isinstance(tags_dict[name], list):
# Convert to a list
tags_dict[name] = [tags_dict[name]]
# Append to list
tags_dict[name].append(value)
else:
tags_dict[name] = value
def tags_to_attributes(self, obj, tags):
# Convert tags to core attributes.
for name, tag in tags.items():
if isinstance(tag, dict):
# Nested tags dict, recurse.
self.tags_to_attributes(obj, tag)
continue
elif name not in TAGS_MAP:
continue
attr, filter = TAGS_MAP[name]
if attr not in obj._keys and attr not in self._keys:
# Tag is not in any core attribute for this object or global,
# so skip.
continue
# Pull value out of Tag object or list of Tag objects.
value = [item.value for item in tag] if isinstance(tag, list) else tag.value
if filter:
try:
value = [filter(item) for item in value] if isinstance(value, list) else filter(value)
except Exception, e:
log.warning(u'Failed to convert tag to core attribute: %r', e)
# Special handling for tv series recordings. The 'title' tag
# can be used for both the series and the episode name. The
# same is true for trackno which may refer to the season
# and the episode number. Therefore, if we find these
# attributes already set we try some guessing.
if attr == 'trackno' and getattr(self, attr) is not None:
# delete trackno and save season and episode
self.season = self.trackno
self.episode = value
self.trackno = None
continue
if attr == 'title' and getattr(self, attr) is not None:
# store current value of title as series and use current
# value of title as title
self.series = self.title
if attr in obj._keys:
setattr(obj, attr, value)
else:
setattr(self, attr, value)
Parser = Matroska
| gpl-3.0 |
sukisuki/mvn | mvn/examples/kalman.py | 2 | 6229 | #! /usr/bin/env python
print 'starting'
import os
import sys
import numpy
import matplotlib
#matplotlib.use('cairo')
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MultipleLocator
import pylab
from mvn import Mvn
from mvn.matrix import Matrix
import mvn.plotTools
from collections import OrderedDict
colors = OrderedDict([
['Actual' , [1, 1, 0]],
['Updated' , [0, 0, 1]],
['Noise' , [1, 0, 0]],
['Updated+Noise', [1, 0, 1]],
['Measurement' , [0, 1, 0]],
['Filter Result', [0, 1, 1]],
])
actualParams = {
'marker':'*',
'markersize':20,
'color':colors['Actual'],
}
otherParams = {
'minalpha':0.5,
'slope':0.333
}
class Publisher(object):
def __init__(self, targetDir, formats=('png','svg')):
self.n = 0
self.formats = formats
self.targetDir = targetDir
try:
os.stat(self.targetDir)
except OSError:
os.mkdir(self.targetDir)
def publish(self, fig):
for format in self.formats:
fig.savefig(
"%s/%0.3d.%s" % (self.targetDir, self.n, format),
format=format
)
self.n += 1
def seed(path):
if len(sys.argv) > 1:
seed = int(sys.argv[1])
else:
seed = numpy.random.randint(10000)
print 'seed: %d' % seed
numpy.random.seed(seed)
open('%s/seed' % path, 'w').write(str(seed))
def drawLegend(ax):
patch = lambda color:matplotlib.patches.Ellipse(
[0, 0],
width=0, height=0, facecolor=color
)
patches = [patch(color) for [name,color] in colors.iteritems()]
ax.legend(
patches, list(colors.keys()),
loc='lower center',
ncol = 2
)
def newAx(fig, transform = Matrix.eye(2)):
fig.clear()
axgrid = GridSpec(1, 1)
#get axes
ax = pylab.subplot(
axgrid[:, :],
projection = 'custom',
transform = transform,
)
ax.autoscale(False)
# ax.set_xticks(numpy.arange(-10., 35., 5.))
# ax.set_yticks(numpy.arange(-10., 35., 5.))
ax.set_xlim([-5, 20])
ax.set_ylim([-5, 10])
ax.xaxis.set_major_locator(MultipleLocator(5))
ax.grid('on')
drawLegend(ax)
return ax
if __name__ == '__main__':
if not os.path.exists('kalman'):
os.mkdir('kalman')
## figure setup
#directory for resulting figures
path = 'kalman'
#seed the rng so results are reproducible.
seed(path)
#create publisher
P = Publisher(path)
#create figure
fig = pylab.figure(figsize = (6, 6))
## kalman filter parameters
#the actual, hidden state
actual = numpy.array([[0, 5]])
#the sensor
sensor = Mvn(vectors = [[1, 0], [0, 1]],var = [1, numpy.inf])
#the system noise
noise = Mvn(vectors = [[1, 0], [0, 1]], var = numpy.array([0.5, 1])**2)
#the shear transform to move the system forward
transform = Matrix([[1, 0], [0.5, 1]])
filtered = sensor.measure(actual)
## initial plot
ax = newAx(fig)
#plot the initial actual position
ax.plot(actual[:, 0], actual[:, 1], **actualParams)
ax.set_title('Kalman Filtering: Start')
pylab.xlabel('Position')
pylab.ylabel('Velocity')
P.publish(fig)
#measure the actual position, and plot the measurment
filtered.plot(facecolor=colors['Filter Result'], **otherParams)
ax.set_title('Initialize to first measurement')
pylab.xlabel('Position')
pylab.ylabel('Velocity')
P.publish(fig)
for n in range(6):
## plot immediately after the step foreward
#create a transformed axis
ax = newAx(fig)#,transform)
#update the system
actual = actual*transform
filtered = filtered*transform
#plot the updated system
ax.plot(actual[:, 0], actual[:, 1], **actualParams)
filtered.plot(facecolor=colors['Updated'], **otherParams)
ax.set_title('Update')
pylab.xlabel('Position')
pylab.ylabel('Velocity')
P.publish(fig)
#realign the axes
ax = newAx(fig)
#re-plot the filter result
filtered.plot(facecolor=colors['Updated'], **otherParams)
#add noise and plot the actual and filtered values
actual_noise = noise+actual
filtered_noise = noise+filtered
actual_noise.plot(facecolor = colors['Noise'], **otherParams)
filtered_noise.plot(facecolor = colors['Noise'], **otherParams)
# sample the position of the actual distribution, to find it's new position
ax.plot(actual[:, 0], actual[:, 1], **actualParams)
actual=actual_noise.sample()
ax.plot(actual[:, 0], actual[:, 1], **actualParams)
ax.set_title('Add process noise')
pylab.xlabel('Position')
pylab.ylabel('Velocity')
P.publish(fig)
ax = newAx(fig)
filtered = filtered_noise
ax.plot(actual[:, 0], actual[:, 1], **actualParams)
filtered.plot(facecolor=colors['Updated+Noise'], **otherParams)
ax.set_title('Add process noise')
pylab.xlabel('Position')
pylab.ylabel('Velocity')
P.publish(fig)
measure=sensor.measure(actual)
measure.plot(facecolor = colors['Measurement'], **otherParams)
ax.set_title('Measure')
P.publish(fig)
filtered = filtered&measure
filtered.plot(facecolor = colors['Filter Result'], **otherParams)
ax.set_title('Merge')
pylab.xlabel('Position')
pylab.ylabel('Velocity')
P.publish(fig)
ax = newAx(fig)
ax.plot(actual[:, 0], actual[:, 1], **actualParams)
filtered.plot(facecolor=colors['Filter Result'], **otherParams)
pylab.xlabel('Position')
pylab.ylabel('Velocity')
ax.set_title('Merge')
P.publish(fig)
# os.system('convert -limit memory 32 -delay 100 %s/*.png kalman.gif' % path)
os.system('convert -delay 150 %s/*.png kalman.gif' % path)
| bsd-2-clause |
elbruno/Blog | 20190521 Python FaceRecognition/Labs2.py | 2 | 1799 | import face_recognition
import cv2
import numpy as np
def LoadFaces():
bruno_image = face_recognition.load_image_file("d:\Faces\Bruno1.jpg")
bruno_face_encoding = face_recognition.face_encodings(bruno_image)[0]
valentino_image = face_recognition.load_image_file("d:\Faces\Valen1.jpg")
valentino_face_encoding = face_recognition.face_encodings(valentino_image)[0]
known_face_encodings = [
bruno_face_encoding,
valentino_face_encoding
]
known_face_names = [
"Bruno",
"Valentino"
]
return known_face_encodings, known_face_names;
video_capture = cv2.VideoCapture(0)
known_face_encodings, known_face_names = LoadFaces()
while True:
ret, frame = video_capture.read()
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.7, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows() | gpl-2.0 |
TechInvestLab/dot15926 | editor_qt/iso15926/kb/other_defs.py | 1 | 2658 | """
.15925 Editor
Copyright 2014 TechInvestLab.ru [email protected]
.15925 Editor is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
.15925 Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with .15925 Editor.
"""
ns_pcardl_rdf = 'http://posccaesar.org/rdl/'
ns_pcardl_sparql = 'http://posccaesar.org/rdl/'
annlist_pcardl = [
'hasIdPCA', 'hasDesignation', 'hasDesignationAltern', 'hasDesignationAbbrev',
'hasDefinition', 'hasDefinitionAdapted', 'hasStatus', 'hasCreationDate',
'hasCreator', 'hasDeleted', 'hasSubmitter', 'hasSubmitterOrg', 'hasRegistrar',
'hasRegistrarAuth', 'hasStewardship', 'hasStewardshipContact', 'hasNote',
'hasNoteAdmin', 'hasNoteExplanatory', 'hasNoteExample', 'hasNoteChange',
'hasNoteIssue', 'defaultRdsId',
]
pca_rdf_designation = ns_pcardl_rdf + 'hasDesignation'
pca_rdf_designation = ns_pcardl_rdf + 'hasDesignation'
pca_rdf_definition = ns_pcardl_rdf + 'hasDefinition'
pca_sparql_designation = ns_pcardl_sparql + 'hasDesignation'
pca_sparql_definition = ns_pcardl_sparql + 'hasDefinition'
labels_pca_rdf = [pca_rdf_designation, pca_rdf_definition]
labels_pca_sparql = [pca_sparql_designation, pca_sparql_definition]
labels_pca = labels_pca_rdf + labels_pca_sparql
ns_rdswip = 'http://rdl.rdlfacade.org/data#'
annlist_meta = [
'annUniqueName', 'annTextDefinition', 'annSource', 'annNotes',
'annAdministrativeNote', 'annExplanatoryComment', 'annChangeDescription',
'annRule', 'annAccessCode', 'annURI', 'annUniqueNumber', 'annSynonym',
'annCreationDate', 'annEffectiveDate', 'annLastChangeDate', 'annRegistrationStatus',
'annStewardshipContact', 'annStewardshipOrganization', 'annSubmissionContact',
'annSubmittingOrganization', 'annUnresolvedIssues', 'annSymbol', 'annOperator',
'annFirstOperand', 'annSecondOperand', 'annFactor_Prefix', 'annExponent'
]
ns_old_part4 = 'http://rds.posccaesar.org/2009/10/OWL/ISO-15926-4_2007#'
annlist_old_part4 = ['spreadsheet']
ns_old_part6 = 'http://rds.posccaesar.org/2008/02/OWL/ISO-15926-6_2008_Draft#'
annlist_old_part6 = ['designation', 'definition', 'source', 'notes']
ns_til = 'http://techinvestlab.ru/meta#'
annlist_til = ['label_ru', 'label_en']
| lgpl-3.0 |
hibooboo2/python-agent | cattle/plugins/host_info/memory.py | 2 | 1502 | import platform
class MemoryCollector(object):
def __init__(self):
self.key_map = {'memtotal': 'memTotal',
'memfree': 'memFree',
'memvailable': 'memAvailable',
'buffers': 'buffers',
'cached': 'cached',
'swapcached': 'swapCached',
'active': 'active',
'inactive': 'inactive',
'swaptotal': 'swapTotal',
'swapfree': 'swapFree'
}
self.unit = 1024.0
def _get_meminfo_data(self):
with open('/proc/meminfo') as f:
return f.readlines()
def _parse_linux_meminfo(self):
data = {k: None for k in self.key_map.values()}
# /proc/meminfo file has all values in kB
mem_data = self._get_meminfo_data()
for line in mem_data:
line_list = line.split(':')
key_lower = line_list[0].lower()
possible_mem_value = line_list[1].strip().split(' ')[0]
if self.key_map.get(key_lower):
converted_mem_val = float(possible_mem_value)/self.unit
data[self.key_map[key_lower]] = round(converted_mem_val, 3)
return data
def key_name(self):
return "memoryInfo"
def get_data(self):
if platform.system() == 'Linux':
return self._parse_linux_meminfo()
else:
return {}
| apache-2.0 |
srajag/nova | nova/tests/api/openstack/compute/contrib/test_rescue.py | 5 | 7244 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
import webob
from nova import compute
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
pass
def unrescue(self, context, instance):
pass
class RescueTest(test.NoDBTestCase):
def setUp(self):
super(RescueTest, self).setUp()
def fake_compute_get(*args, **kwargs):
uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
return {'id': 1, 'uuid': uuid}
self.stubs.Set(compute.api.API, "get", fake_compute_get)
self.stubs.Set(compute.api.API, "rescue", rescue)
self.stubs.Set(compute.api.API, "unrescue", unrescue)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Rescue'])
self.app = fakes.wsgi_app(init_only=('servers',))
def test_rescue_from_locked_server(self):
def fake_rescue_from_locked_server(self, context,
instance, rescue_password=None, rescue_image_ref=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API,
'rescue',
fake_rescue_from_locked_server)
body = {"rescue": {"adminPass": "AABBCC112233"}}
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_rescue_with_preset_password(self):
body = {"rescue": {"adminPass": "AABBCC112233"}}
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual("AABBCC112233", resp_json['adminPass'])
def test_rescue_generates_password(self):
body = dict(rescue=None)
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_rescue_of_rescued_instance(self):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise exception.InstanceInvalidState('fake message')
self.stubs.Set(compute.api.API, "rescue", fake_rescue)
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_unrescue(self):
body = dict(unrescue=None)
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_unrescue_from_locked_server(self):
def fake_unrescue_from_locked_server(self, context,
instance):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API,
'unrescue',
fake_unrescue_from_locked_server)
body = dict(unrescue=None)
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_unrescue_of_active_instance(self):
body = dict(unrescue=None)
def fake_unrescue(*args, **kwargs):
raise exception.InstanceInvalidState('fake message')
self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_rescue_raises_unrescuable(self):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise exception.InstanceNotRescuable('fake message')
self.stubs.Set(compute.api.API, "rescue", fake_rescue)
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
@mock.patch('nova.compute.api.API.rescue')
def test_rescue_raises_not_implemented(self, rescue_mock):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise NotImplementedError('not implemented')
rescue_mock.side_effect = fake_rescue
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 501)
@mock.patch('nova.compute.api.API.unrescue')
def test_unrescue_raises_not_implemented(self, unrescue_mock):
body = dict(unrescue=None)
def fake_unrescue(*args, **kwargs):
raise NotImplementedError('not implemented')
unrescue_mock.side_effect = fake_unrescue
req = webob.Request.blank('/v2/fake/servers/test_inst/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 501)
| apache-2.0 |
mechaxl/mixer | setup.py | 2 | 2027 | #!/usr/bin/env python
""" mixer -- Generate tests data.
mixer -- Description
"""
import re
import sys
from os import path as op
from setuptools import setup
def _read(fname):
try:
return open(op.join(op.dirname(__file__), fname)).read()
except IOError:
return ''
_meta = _read('mixer/__init__.py')
_license = re.search(r'^__license__\s*=\s*"(.*)"', _meta, re.M).group(1)
_project = re.search(r'^__project__\s*=\s*"(.*)"', _meta, re.M).group(1)
_version = re.search(r'^__version__\s*=\s*"(.*)"', _meta, re.M).group(1)
install_requires = [
l for l in _read('requirements.txt').split('\n')
if l and not l.startswith('#')]
tests_require = [
l for l in _read('requirements-tests.txt').split('\n')
if l and not l.startswith('#')]
# FIXME: Fix fake-factory installation
if sys.version_info < (2, 7, 0):
install_requires.append('importlib')
setup(
name=_project,
version=_version,
license=_license,
description=_read('DESCRIPTION'),
long_description=_read('README.rst'),
platforms=('Any'),
keywords = "django flask sqlalchemy testing mock stub mongoengine data".split(), # noqa
author='Kirill Klenov',
author_email='[email protected]',
url='http://github.com/klen/mixer',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Natural Language :: Russian',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
],
packages=['mixer', 'mixer.backend'],
include_package_data=True,
install_requires=install_requires,
tests_require=tests_require,
test_suite='tests',
)
# lint_ignore=F0401
| bsd-3-clause |
bascht/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/as.py | 61 | 2989 | """SCons.Tool.as
Tool-specific initialization for as, the generic Posix assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/as.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Defaults
import SCons.Tool
import SCons.Util
assemblers = ['as']
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for as to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
shared_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
shared_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['AS'] = env.Detect(assemblers) or 'as'
env['ASFLAGS'] = SCons.Util.CLVar('')
env['ASCOM'] = '$AS $ASFLAGS -o $TARGET $SOURCES'
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES'
def exists(env):
return env.Detect(assemblers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
yewang15215/django | tests/template_tests/filter_tests/test_addslashes.py | 473 | 1202 | from django.template.defaultfilters import addslashes
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class AddslashesTests(SimpleTestCase):
@setup({'addslashes01': '{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}'})
def test_addslashes01(self):
output = self.engine.render_to_string('addslashes01', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
@setup({'addslashes02': '{{ a|addslashes }} {{ b|addslashes }}'})
def test_addslashes02(self):
output = self.engine.render_to_string('addslashes02', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
addslashes('"double quotes" and \'single quotes\''),
'\\"double quotes\\" and \\\'single quotes\\\'',
)
def test_backslashes(self):
self.assertEqual(addslashes(r'\ : backslashes, too'), '\\\\ : backslashes, too')
def test_non_string_input(self):
self.assertEqual(addslashes(123), '123')
| bsd-3-clause |
anant-dev/django | tests/files/tests.py | 277 | 11286 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import os
import struct
import tempfile
import unittest
from io import BytesIO, StringIO
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile
from django.test import mock
from django.utils import six
from django.utils._os import upath
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
def test_seekable(self):
"""
File.seekable() should be available on Python 3.
"""
with tempfile.TemporaryFile() as temp:
temp.write(b"contents\n")
test_file = File(temp, name="something.txt")
if six.PY2:
self.assertFalse(hasattr(test_file, 'seekable'))
if six.PY3:
self.assertTrue(hasattr(test_file, 'seekable'))
self.assertTrue(test_file.seekable())
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertEqual(File(BytesIO(b'A file with no name')).name, None)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertEqual(ContentFile(b"content").name, None)
def test_content_file_custom_name(self):
"""
Test that the constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
Test that ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
if six.PY3:
self.assertIsInstance(ContentFile("español").read(), six.text_type)
else:
self.assertIsInstance(ContentFile("español").read(), bytes)
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
size = images.get_image_dimensions(img_path)
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
@unittest.skipUnless(Image, "Pillow not installed")
class GetImageDimensionsTests(unittest.TestCase):
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
def test_valid_image(self):
"""
get_image_dimensions() should catch struct.error while feeding the PIL
Image parser (#24544).
Emulates the Parser feed error. Since the error is raised on every feed
attempt, the resulting image size should be invalid: (None, None).
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error):
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False))
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
| bsd-3-clause |
achals/servo | tests/wpt/web-platform-tests/tools/py/testing/test_iniconfig.py | 162 | 8194 | import py
import pytest
from py._iniconfig import IniConfig, ParseError, __all__ as ALL
from py._iniconfig import iscommentline
from textwrap import dedent
def pytest_generate_tests(metafunc):
if 'input' in metafunc.funcargnames:
for name, (input, expected) in check_tokens.items():
metafunc.addcall(id=name, funcargs={
'input': input,
'expected': expected,
})
elif hasattr(metafunc.function, 'multi'):
kwargs = metafunc.function.multi.kwargs
names, values = zip(*kwargs.items())
values = cartesian_product(*values)
for p in values:
metafunc.addcall(funcargs=dict(zip(names, p)))
def cartesian_product(L,*lists):
# copied from http://bit.ly/cyIXjn
if not lists:
for x in L:
yield (x,)
else:
for x in L:
for y in cartesian_product(lists[0],*lists[1:]):
yield (x,)+y
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line':(
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
@py.test.mark.multi(line=[
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
py.test.raises(TypeError, "IniConfig(data=path.read())")
def test_iniconfig_section_first(tmpdir):
excinfo = py.test.raises(ParseError, """
IniConfig("x", data='name=1')
""")
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
excinfo = py.test.raises(ParseError, r"""
IniConfig("x", data='[section]\n[section]')
""")
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
excinfo = py.test.raises(ParseError, r"""
IniConfig("x", data='[section]\nname = Alice\nname = bob')
""")
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
)
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2','value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
py.test.raises(KeyError,'config["other"]')
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items==[('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line)
| mpl-2.0 |
nhomar/odoo-mirror | addons/l10n_pl/__openerp__.py | 63 | 2148 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Grzegorz Grzelak [email protected]
# All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Poland - Accounting',
'version' : '1.0',
'author' : 'Grzegorz Grzelak (OpenGLOBE)',
'website': 'http://www.openglobe.pl',
'category' : 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart and taxes for Poland in OpenERP.
==================================================================================
To jest moduł do tworzenia wzorcowego planu kont, podatków, obszarów podatkowych i
rejestrów podatkowych. Moduł ustawia też konta do kupna i sprzedaży towarów
zakładając, że wszystkie towary są w obrocie hurtowym.
Niniejszy moduł jest przeznaczony dla odoo 8.0.
Wewnętrzny numer wersji OpenGLOBE 1.01
""",
'depends' : ['account', 'base_iban', 'base_vat', 'account_chart'],
'demo' : [],
'data' : ['account_tax_code.xml',
'account_chart.xml',
'account_tax.xml',
'fiscal_position.xml',
'country_pl.xml',
'l10n_chart_pl_wizard.xml'
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
siemens/thrift | test/py/SerializationTest.py | 2 | 15256 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from ThriftTest.ttypes import *
from DebugProtoTest.ttypes import CompactProtoTestStruct, Empty
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TCompactProtocol, TJSONProtocol
from thrift.TSerialization import serialize, deserialize
import sys
import unittest
class AbstractTest(unittest.TestCase):
def setUp(self):
self.v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321,
)
self.v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7,8,9],
newset=set([42,1,8]),
newmap={1:2,2:3},
newstring="Hola!",
end_in_both=54321,
)
self.bools = Bools(im_true=True, im_false=False)
self.bools_flipped = Bools(im_true=False, im_false=True)
self.large_deltas = LargeDeltas (
b1=self.bools,
b10=self.bools_flipped,
b100=self.bools,
check_true=True,
b1000=self.bools_flipped,
check_false=False,
vertwo2000=VersioningTestV2(newstruct=Bonk(message='World!', type=314)),
a_set2500=set(['lazy', 'brown', 'cow']),
vertwo3000=VersioningTestV2(newset=set([2, 3, 5, 7, 11])),
big_numbers=[2**8, 2**16, 2**31-1, -(2**31-1)]
)
self.compact_struct = CompactProtoTestStruct(
a_byte = 127,
a_i16=32000,
a_i32=1000000000,
a_i64=0xffffffffff,
a_double=5.6789,
a_string="my string",
true_field=True,
false_field=False,
empty_struct_field=Empty(),
byte_list=[-127, -1, 0, 1, 127],
i16_list=[-1, 0, 1, 0x7fff],
i32_list= [-1, 0, 0xff, 0xffff, 0xffffff, 0x7fffffff],
i64_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff],
double_list=[0.1, 0.2, 0.3],
string_list=["first", "second", "third"],
boolean_list=[True, True, True, False, False, False],
struct_list=[Empty(), Empty()],
byte_set=set([-127, -1, 0, 1, 127]),
i16_set=set([-1, 0, 1, 0x7fff]),
i32_set=set([1, 2, 3]),
i64_set=set([-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff]),
double_set=set([0.1, 0.2, 0.3]),
string_set=set(["first", "second", "third"]),
boolean_set=set([True, False]),
#struct_set=set([Empty()]), # unhashable instance
byte_byte_map={1 : 2},
i16_byte_map={1 : 1, -1 : 1, 0x7fff : 1},
i32_byte_map={1 : 1, -1 : 1, 0x7fffffff : 1},
i64_byte_map={0 : 1, 1 : 1, -1 : 1, 0x7fffffffffffffff : 1},
double_byte_map={-1.1 : 1, 1.1 : 1},
string_byte_map={"first" : 1, "second" : 2, "third" : 3, "" : 0},
boolean_byte_map={True : 1, False: 0},
byte_i16_map={1 : 1, 2 : -1, 3 : 0x7fff},
byte_i32_map={1 : 1, 2 : -1, 3 : 0x7fffffff},
byte_i64_map={1 : 1, 2 : -1, 3 : 0x7fffffffffffffff},
byte_double_map={1 : 0.1, 2 : -0.1, 3 : 1000000.1},
byte_string_map={1 : "", 2 : "blah", 3 : "loooooooooooooong string"},
byte_boolean_map={1 : True, 2 : False},
#list_byte_map # unhashable
#set_byte_map={set([1, 2, 3]) : 1, set([0, 1]) : 2, set([]) : 0}, # unhashable
#map_byte_map # unhashable
byte_map_map={0 : {}, 1 : {1 : 1}, 2 : {1 : 1, 2 : 2}},
byte_set_map={0 : set([]), 1 : set([1]), 2 : set([1, 2])},
byte_list_map={0 : [], 1 : [1], 2 : [1, 2]},
)
self.nested_lists_i32x2 = NestedListsI32x2(
[
[ 1, 1, 2 ],
[ 2, 7, 9 ],
[ 3, 5, 8 ]
]
)
self.nested_lists_i32x3 = NestedListsI32x3(
[
[
[ 2, 7, 9 ],
[ 3, 5, 8 ]
],
[
[ 1, 1, 2 ],
[ 1, 4, 9 ]
]
]
)
self.nested_mixedx2 = NestedMixedx2( int_set_list=[
set([1,2,3]),
set([1,4,9]),
set([1,2,3,5,8,13,21]),
set([-1, 0, 1])
],
# note, the sets below are sets of chars, since the strings are iterated
map_int_strset={ 10:set('abc'), 20:set('def'), 30:set('GHI') },
map_int_strset_list=[
{ 10:set('abc'), 20:set('def'), 30:set('GHI') },
{ 100:set('lmn'), 200:set('opq'), 300:set('RST') },
{ 1000:set('uvw'), 2000:set('wxy'), 3000:set('XYZ') }
]
)
self.nested_lists_bonk = NestedListsBonk(
[
[
[
Bonk(message='inner A first', type=1),
Bonk(message='inner A second', type=1)
],
[
Bonk(message='inner B first', type=2),
Bonk(message='inner B second', type=2)
]
]
]
)
self.list_bonks = ListBonks(
[
Bonk(message='inner A', type=1),
Bonk(message='inner B', type=2),
Bonk(message='inner C', type=0)
]
)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testForwards(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v1obj))
self.assertEquals(obj.begin_in_both, self.v1obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v1obj.end_in_both)
def testBackwards(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v2obj))
self.assertEquals(obj.begin_in_both, self.v2obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v2obj.end_in_both)
def testSerializeV1(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v1obj))
self.assertEquals(obj, self.v1obj)
def testSerializeV2(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v2obj))
self.assertEquals(obj, self.v2obj)
def testBools(self):
self.assertNotEquals(self.bools, self.bools_flipped)
self.assertNotEquals(self.bools, self.v1obj)
obj = self._deserialize(Bools, self._serialize(self.bools))
self.assertEquals(obj, self.bools)
obj = self._deserialize(Bools, self._serialize(self.bools_flipped))
self.assertEquals(obj, self.bools_flipped)
rep = repr(self.bools)
self.assertTrue(len(rep) > 0)
def testLargeDeltas(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(LargeDeltas, self._serialize(self.large_deltas))
self.assertEquals(obj, self.large_deltas)
rep = repr(self.large_deltas)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x2(self):
obj = self._deserialize(NestedListsI32x2, self._serialize(self.nested_lists_i32x2))
self.assertEquals(obj, self.nested_lists_i32x2)
rep = repr(self.nested_lists_i32x2)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x3(self):
obj = self._deserialize(NestedListsI32x3, self._serialize(self.nested_lists_i32x3))
self.assertEquals(obj, self.nested_lists_i32x3)
rep = repr(self.nested_lists_i32x3)
self.assertTrue(len(rep) > 0)
def testNestedMixedx2(self):
obj = self._deserialize(NestedMixedx2, self._serialize(self.nested_mixedx2))
self.assertEquals(obj, self.nested_mixedx2)
rep = repr(self.nested_mixedx2)
self.assertTrue(len(rep) > 0)
def testNestedListsBonk(self):
obj = self._deserialize(NestedListsBonk, self._serialize(self.nested_lists_bonk))
self.assertEquals(obj, self.nested_lists_bonk)
rep = repr(self.nested_lists_bonk)
self.assertTrue(len(rep) > 0)
def testListBonks(self):
obj = self._deserialize(ListBonks, self._serialize(self.list_bonks))
self.assertEquals(obj, self.list_bonks)
rep = repr(self.list_bonks)
self.assertTrue(len(rep) > 0)
def testCompactStruct(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(CompactProtoTestStruct, self._serialize(self.compact_struct))
self.assertEquals(obj, self.compact_struct)
rep = repr(self.compact_struct)
self.assertTrue(len(rep) > 0)
def testIntegerLimits(self):
if (sys.version_info[0] == 2 and sys.version_info[1] <= 6):
print('Skipping testIntegerLimits for Python 2.6')
return
bad_values = [CompactProtoTestStruct(a_byte=128), CompactProtoTestStruct(a_byte=-129),
CompactProtoTestStruct(a_i16=32768), CompactProtoTestStruct(a_i16=-32769),
CompactProtoTestStruct(a_i32=2147483648), CompactProtoTestStruct(a_i32=-2147483649),
CompactProtoTestStruct(a_i64=9223372036854775808), CompactProtoTestStruct(a_i64=-9223372036854775809)
]
for value in bad_values:
self.assertRaises(Exception, self._serialize, value)
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
class CompactProtocolTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class JSONProtocolTest(AbstractTest):
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
class AcceleratedFramedTest(unittest.TestCase):
def testSplit(self):
"""Test FramedTransport and BinaryProtocolAccelerated
Tests that TBinaryProtocolAccelerated and TFramedTransport
play nicely together when a read spans a frame"""
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
bigstring = "".join(chr(byte) for byte in range(ord("a"), ord("z")+1))
databuf = TTransport.TMemoryBuffer()
prot = protocol_factory.getProtocol(databuf)
prot.writeI32(42)
prot.writeString(bigstring)
prot.writeI16(24)
data = databuf.getvalue()
cutpoint = len(data) // 2
parts = [ data[:cutpoint], data[cutpoint:] ]
framed_buffer = TTransport.TMemoryBuffer()
framed_writer = TTransport.TFramedTransport(framed_buffer)
for part in parts:
framed_writer.write(part)
framed_writer.flush()
self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)
# Recreate framed_buffer so we can read from it.
framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())
framed_reader = TTransport.TFramedTransport(framed_buffer)
prot = protocol_factory.getProtocol(framed_reader)
self.assertEqual(prot.readI32(), 42)
self.assertEqual(prot.readString(), bigstring)
self.assertEqual(prot.readI16(), 24)
class SerializersTest(unittest.TestCase):
def testSerializeThenDeserialize(self):
obj = Xtruct2(i32_thing=1,
struct_thing=Xtruct(string_thing="foo"))
s1 = serialize(obj)
for i in range(10):
self.assertEquals(s1, serialize(obj))
objcopy = Xtruct2()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
obj = Xtruct(string_thing="bar")
objcopy = Xtruct()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test booleans
obj = Bools(im_true=True, im_false=False)
objcopy = Bools()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test enums
for num, name in Numberz._VALUES_TO_NAMES.items():
obj = Bonk(message='enum Numberz value %d is string %s' % (num, name), type=num)
objcopy = Bonk()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(CompactProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(JSONProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedFramedTest))
suite.addTest(loader.loadTestsFromTestCase(SerializersTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.