repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
javazquez/vertx-web
|
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
|
1005
|
92627
|
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
|
apache-2.0
|
RonEld/mbed
|
tools/export/makefile/__init__.py
|
4
|
7853
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import splitext, basename, relpath, join, abspath, dirname,\
exists
from os import remove
import sys
from subprocess import check_output, CalledProcessError, Popen, PIPE
import shutil
from jinja2.exceptions import TemplateNotFound
from tools.export.exporters import Exporter, filter_supported
from tools.utils import NotSupportedException
from tools.targets import TARGET_MAP
class Makefile(Exporter):
"""Generic Makefile template that mimics the behavior of the python build
system
"""
DOT_IN_RELATIVE_PATH = True
MBED_CONFIG_HEADER_SUPPORTED = True
POST_BINARY_WHITELIST = set([
"MCU_NRF51Code.binary_hook",
"TEENSY3_1Code.binary_hook"
])
def generate(self):
"""Generate the makefile
Note: subclasses should not need to override this method
"""
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
self.resources.win_to_unix()
to_be_compiled = [splitext(src)[0] + ".o" for src in
self.resources.s_sources +
self.resources.c_sources +
self.resources.cpp_sources]
libraries = [self.prepare_lib(basename(lib)) for lib
in self.resources.libraries]
sys_libs = [self.prepare_sys_lib(lib) for lib
in self.toolchain.sys_libs]
ctx = {
'name': self.project_name,
'to_be_compiled': to_be_compiled,
'object_files': self.resources.objects,
'include_paths': list(set(self.resources.inc_dirs)),
'library_paths': self.resources.lib_dirs,
'linker_script': self.resources.linker_script,
'libraries': libraries,
'ld_sys_libs': sys_libs,
'hex_files': self.resources.hex_files,
'vpath': (["../../.."]
if (basename(dirname(dirname(self.export_dir)))
== "projectfiles")
else [".."]),
'cc_cmd': " ".join(["\'" + part + "\'" for part
in ([basename(self.toolchain.cc[0])] +
self.toolchain.cc[1:])]),
'cppc_cmd': " ".join(["\'" + part + "\'" for part
in ([basename(self.toolchain.cppc[0])] +
self.toolchain.cppc[1:])]),
'asm_cmd': " ".join(["\'" + part + "\'" for part
in ([basename(self.toolchain.asm[0])] +
self.toolchain.asm[1:])]),
'ld_cmd': "\'" + basename(self.toolchain.ld[0]) + "\'",
'elf2bin_cmd': "\'" + basename(self.toolchain.elf2bin) + "\'",
'link_script_ext': self.toolchain.LINKER_EXT,
'link_script_option': self.LINK_SCRIPT_OPTION,
'user_library_flag': self.USER_LIBRARY_FLAG,
}
if hasattr(self.toolchain, "preproc"):
ctx['pp_cmd'] = " ".join(["\'" + part + "\'" for part
in ([basename(self.toolchain.preproc[0])] +
self.toolchain.preproc[1:] +
self.toolchain.ld[1:])])
else:
ctx['pp_cmd'] = None
for key in ['include_paths', 'library_paths', 'linker_script',
'hex_files']:
if isinstance(ctx[key], list):
ctx[key] = [ctx['vpath'][0] + "/" + t for t in ctx[key]]
else:
ctx[key] = ctx['vpath'][0] + "/" + ctx[key]
if "../." not in ctx["include_paths"]:
ctx["include_paths"] += ['../.']
for key in ['include_paths', 'library_paths', 'hex_files',
'to_be_compiled']:
ctx[key] = sorted(ctx[key])
ctx.update(self.flags)
for templatefile in \
['makefile/%s_%s.tmpl' % (self.TEMPLATE,
self.target.lower())] + \
['makefile/%s_%s.tmpl' % (self.TEMPLATE,
label.lower()) for label
in self.toolchain.target.extra_labels] +\
['makefile/%s.tmpl' % self.TEMPLATE]:
try:
self.gen_file(templatefile, ctx, 'Makefile')
break
except TemplateNotFound:
pass
else:
raise NotSupportedException("This make tool is in development")
@staticmethod
def build(project_name, log_name="build_log.txt", cleanup=True):
""" Build Make project """
# > Make -j
cmd = ["make", "-j"]
# Build the project
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
out_string = "=" * 10 + "STDOUT" + "=" * 10 + "\n"
out_string += out
out_string += "=" * 10 + "STDERR" + "=" * 10 + "\n"
out_string += err
if ret_code == 0:
out_string += "SUCCESS"
else:
out_string += "FAILURE"
print out_string
if log_name:
# Write the output to the log file
with open(log_name, 'w+') as f:
f.write(out_string)
# Cleanup the exported and built files
if cleanup:
remove("Makefile")
remove(log_name)
# legacy .build directory cleaned if exists
if exists('.build'):
shutil.rmtree('.build')
if exists('BUILD'):
shutil.rmtree('BUILD')
if ret_code != 0:
# Seems like something went wrong.
return -1
else:
return 0
class GccArm(Makefile):
"""GCC ARM specific makefile target"""
TARGETS = filter_supported("GCC_ARM", Makefile.POST_BINARY_WHITELIST)
NAME = 'Make-GCC-ARM'
TEMPLATE = 'make-gcc-arm'
TOOLCHAIN = "GCC_ARM"
LINK_SCRIPT_OPTION = "-T"
USER_LIBRARY_FLAG = "-L"
@staticmethod
def prepare_lib(libname):
return "-l:" + libname
@staticmethod
def prepare_sys_lib(libname):
return "-l" + libname
class Armc5(Makefile):
"""ARM Compiler 5 specific makefile target"""
TARGETS = filter_supported("ARM", Makefile.POST_BINARY_WHITELIST)
NAME = 'Make-ARMc5'
TEMPLATE = 'make-armc5'
TOOLCHAIN = "ARM"
LINK_SCRIPT_OPTION = "--scatter"
USER_LIBRARY_FLAG = "--userlibpath "
@staticmethod
def prepare_lib(libname):
return libname
@staticmethod
def prepare_sys_lib(libname):
return libname
class IAR(Makefile):
"""IAR specific makefile target"""
TARGETS = filter_supported("IAR", Makefile.POST_BINARY_WHITELIST)
NAME = 'Make-IAR'
TEMPLATE = 'make-iar'
TOOLCHAIN = "IAR"
LINK_SCRIPT_OPTION = "--config"
USER_LIBRARY_FLAG = "-L"
@staticmethod
def prepare_lib(libname):
if "lib" == libname[:3]:
libname = libname[3:]
return "-l" + splitext(libname)[0]
@staticmethod
def prepare_sys_lib(libname):
if "lib" == libname[:3]:
libname = libname[3:]
return "-l" + splitext(libname)[0]
|
apache-2.0
|
marknca/cling
|
dependencies/boto3/dynamodb/transform.py
|
8
|
12264
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from collections import Mapping, MutableSequence
from boto3.dynamodb.types import TypeSerializer, TypeDeserializer
from boto3.dynamodb.conditions import ConditionBase
from boto3.dynamodb.conditions import ConditionExpressionBuilder
from boto3.docs.utils import DocumentModifiedShape
def register_high_level_interface(base_classes, **kwargs):
base_classes.insert(0, DynamoDBHighLevelResource)
def copy_dynamodb_params(params, **kwargs):
return copy.deepcopy(params)
class DynamoDBHighLevelResource(object):
def __init__(self, *args, **kwargs):
super(DynamoDBHighLevelResource, self).__init__(*args, **kwargs)
# Apply handler that creates a copy of the user provided dynamodb
# item such that it can be modified.
self.meta.client.meta.events.register(
'provide-client-params.dynamodb',
copy_dynamodb_params,
unique_id='dynamodb-create-params-copy'
)
self._injector = TransformationInjector()
# Apply the handler that generates condition expressions including
# placeholders.
self.meta.client.meta.events.register(
'before-parameter-build.dynamodb',
self._injector.inject_condition_expressions,
unique_id='dynamodb-condition-expression')
# Apply the handler that serializes the request from python
# types to dynamodb types.
self.meta.client.meta.events.register(
'before-parameter-build.dynamodb',
self._injector.inject_attribute_value_input,
unique_id='dynamodb-attr-value-input')
# Apply the handler that deserializes the response from dynamodb
# types to python types.
self.meta.client.meta.events.register(
'after-call.dynamodb',
self._injector.inject_attribute_value_output,
unique_id='dynamodb-attr-value-output')
# Apply the documentation customizations to account for
# the transformations.
attr_value_shape_docs = DocumentModifiedShape(
'AttributeValue',
new_type='valid DynamoDB type',
new_description=(
'- The value of the attribute. The valid value types are '
'listed in the '
':ref:`DynamoDB Reference Guide<ref_valid_dynamodb_types>`.'
),
new_example_value=(
'\'string\'|123|Binary(b\'bytes\')|True|None|set([\'string\'])'
'|set([123])|set([Binary(b\'bytes\')])|[]|{}')
)
key_expression_shape_docs = DocumentModifiedShape(
'KeyExpression',
new_type=(
'condition from :py:class:`boto3.dynamodb.conditions.Key` '
'method'
),
new_description=(
'The condition(s) a key(s) must meet. Valid conditions are '
'listed in the '
':ref:`DynamoDB Reference Guide<ref_dynamodb_conditions>`.'
),
new_example_value='Key(\'mykey\').eq(\'myvalue\')'
)
con_expression_shape_docs = DocumentModifiedShape(
'ConditionExpression',
new_type=(
'condition from :py:class:`boto3.dynamodb.conditions.Attr` '
'method'
),
new_description=(
'The condition(s) an attribute(s) must meet. Valid conditions '
'are listed in the '
':ref:`DynamoDB Reference Guide<ref_dynamodb_conditions>`.'
),
new_example_value='Attr(\'myattribute\').eq(\'myvalue\')'
)
self.meta.client.meta.events.register(
'docs.*.dynamodb.*.complete-section',
attr_value_shape_docs.replace_documentation_for_matching_shape,
unique_id='dynamodb-attr-value-docs')
self.meta.client.meta.events.register(
'docs.*.dynamodb.*.complete-section',
key_expression_shape_docs.replace_documentation_for_matching_shape,
unique_id='dynamodb-key-expression-docs')
self.meta.client.meta.events.register(
'docs.*.dynamodb.*.complete-section',
con_expression_shape_docs.replace_documentation_for_matching_shape,
unique_id='dynamodb-cond-expression-docs')
class TransformationInjector(object):
"""Injects the transformations into the user provided parameters."""
def __init__(self, transformer=None, condition_builder=None,
serializer=None, deserializer=None):
self._transformer = transformer
if transformer is None:
self._transformer = ParameterTransformer()
self._condition_builder = condition_builder
if condition_builder is None:
self._condition_builder = ConditionExpressionBuilder()
self._serializer = serializer
if serializer is None:
self._serializer = TypeSerializer()
self._deserializer = deserializer
if deserializer is None:
self._deserializer = TypeDeserializer()
def inject_condition_expressions(self, params, model, **kwargs):
"""Injects the condition expression transformation into the parameters
This injection includes transformations for ConditionExpression shapes
and KeyExpression shapes. It also handles any placeholder names and
values that are generated when transforming the condition expressions.
"""
self._condition_builder.reset()
generated_names = {}
generated_values = {}
# Create and apply the Condition Expression transformation.
transformation = ConditionExpressionTransformation(
self._condition_builder,
placeholder_names=generated_names,
placeholder_values=generated_values,
is_key_condition=False
)
self._transformer.transform(
params, model.input_shape, transformation,
'ConditionExpression')
# Create and apply the Key Condition Expression transformation.
transformation = ConditionExpressionTransformation(
self._condition_builder,
placeholder_names=generated_names,
placeholder_values=generated_values,
is_key_condition=True
)
self._transformer.transform(
params, model.input_shape, transformation,
'KeyExpression')
expr_attr_names_input = 'ExpressionAttributeNames'
expr_attr_values_input = 'ExpressionAttributeValues'
# Now that all of the condition expression transformation are done,
# update the placeholder dictionaries in the request.
if expr_attr_names_input in params:
params[expr_attr_names_input].update(generated_names)
else:
if generated_names:
params[expr_attr_names_input] = generated_names
if expr_attr_values_input in params:
params[expr_attr_values_input].update(generated_values)
else:
if generated_values:
params[expr_attr_values_input] = generated_values
def inject_attribute_value_input(self, params, model, **kwargs):
"""Injects DynamoDB serialization into parameter input"""
self._transformer.transform(
params, model.input_shape, self._serializer.serialize,
'AttributeValue')
def inject_attribute_value_output(self, parsed, model, **kwargs):
"""Injects DynamoDB deserialization into responses"""
self._transformer.transform(
parsed, model.output_shape, self._deserializer.deserialize,
'AttributeValue')
class ConditionExpressionTransformation(object):
"""Provides a transformation for condition expressions
The ``ParameterTransformer`` class can call this class directly
to transform the condition expressions in the parameters provided.
"""
def __init__(self, condition_builder, placeholder_names,
placeholder_values, is_key_condition=False):
self._condition_builder = condition_builder
self._placeholder_names = placeholder_names
self._placeholder_values = placeholder_values
self._is_key_condition = is_key_condition
def __call__(self, value):
if isinstance(value, ConditionBase):
# Create a conditional expression string with placeholders
# for the provided condition.
built_expression = self._condition_builder.build_expression(
value, is_key_condition=self._is_key_condition)
self._placeholder_names.update(
built_expression.attribute_name_placeholders)
self._placeholder_values.update(
built_expression.attribute_value_placeholders)
return built_expression.condition_expression
# Use the user provided value if it is not a ConditonBase object.
return value
class ParameterTransformer(object):
"""Transforms the input to and output from botocore based on shape"""
def transform(self, params, model, transformation, target_shape):
"""Transforms the dynamodb input to or output from botocore
It applies a specified transformation whenever a specific shape name
is encountered while traversing the parameters in the dictionary.
:param params: The parameters structure to transform.
:param model: The operation model.
:param transformation: The function to apply the parameter
:param target_shape: The name of the shape to apply the
transformation to
"""
self._transform_parameters(
model, params, transformation, target_shape)
def _transform_parameters(self, model, params, transformation,
target_shape):
type_name = model.type_name
if type_name in ['structure', 'map', 'list']:
getattr(self, '_transform_%s' % type_name)(
model, params, transformation, target_shape)
def _transform_structure(self, model, params, transformation,
target_shape):
if not isinstance(params, Mapping):
return
for param in params:
if param in model.members:
member_model = model.members[param]
member_shape = member_model.name
if member_shape == target_shape:
params[param] = transformation(params[param])
else:
self._transform_parameters(
member_model, params[param], transformation,
target_shape)
def _transform_map(self, model, params, transformation, target_shape):
if not isinstance(params, Mapping):
return
value_model = model.value
value_shape = value_model.name
for key, value in params.items():
if value_shape == target_shape:
params[key] = transformation(value)
else:
self._transform_parameters(
value_model, params[key], transformation, target_shape)
def _transform_list(self, model, params, transformation, target_shape):
if not isinstance(params, MutableSequence):
return
member_model = model.member
member_shape = member_model.name
for i, item in enumerate(params):
if member_shape == target_shape:
params[i] = transformation(item)
else:
self._transform_parameters(
member_model, params[i], transformation, target_shape)
|
apache-2.0
|
jpkrohling/jaeger
|
plugin/storage/es/esRollover.py
|
2
|
11705
|
#!/usr/bin/env python3
import ast
import curator
import elasticsearch
import logging
import os
import requests
import ssl
import subprocess
import sys
import re
from requests.auth import HTTPBasicAuth
ARCHIVE_INDEX = 'jaeger-span-archive'
ROLLBACK_CONDITIONS = '{"max_age": "2d"}'
UNIT = 'days'
UNIT_COUNT = 2
SHARDS = 5
REPLICAS = 1
ILM_POLICY_NAME = 'jaeger-ilm-policy'
TIMEOUT=120
def main():
if len(sys.argv) != 3:
print(
'USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} ACTION http://HOSTNAME[:PORT]'.format(
sys.argv[0]))
print('ACTION ... one of:')
print('\tinit - creates indices and aliases')
print('\trollover - rollover to new write index')
print('\tlookback - removes old indices from read alias')
print('HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from.')
print('INDEX_PREFIX ... specifies index prefix.')
print('ARCHIVE ... handle archive indices (default false).')
print('ES_USERNAME ... The username required by Elasticsearch.')
print('ES_PASSWORD ... The password required by Elasticsearch.')
print('ES_TLS ... enable TLS (default false).')
print('ES_TLS_CA ... Path to TLS CA file.')
print('ES_TLS_CERT ... Path to TLS certificate file.')
print('ES_TLS_KEY ... Path to TLS key file.')
print('ES_USE_ILM .. Use ILM to manage jaeger indices.')
print('ES_ILM_POLICY_NAME .. The name of the ILM policy to use if ILM is active.')
print('ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server\'s certificate chain and host name verification.')
print(
'ES_VERSION ... The major Elasticsearch version. If not specified, the value will be auto-detected from Elasticsearch.')
print('init configuration:')
print('\tSHARDS ... the number of shards per index in Elasticsearch (default {}).'.format(SHARDS))
print('\tREPLICAS ... the number of replicas per index in Elasticsearch (default {}).'.format(REPLICAS))
print('rollover configuration:')
print('\tCONDITIONS ... conditions used to rollover to a new write index (default \'{}\'.'.format(
ROLLBACK_CONDITIONS))
print('lookback configuration:')
print(
'\tUNIT ... used with lookback to remove indices from read alias e.g. ..., days, weeks, months, years (default {}).'.format(
UNIT))
print('\tUNIT_COUNT ... count of UNITs (default {}).'.format(UNIT_COUNT))
print('TIMEOUT ... number of seconds to wait for master node response (default {}).'.format(TIMEOUT))
sys.exit(1)
timeout = int(os.getenv("TIMEOUT", TIMEOUT))
client = create_client(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')),
os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"),
str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false')), timeout)
prefix = os.getenv('INDEX_PREFIX', '')
if prefix != '':
prefix += '-'
action = sys.argv[1]
if str2bool(os.getenv('ARCHIVE', 'false')):
write_alias = prefix + ARCHIVE_INDEX + '-write'
read_alias = prefix + ARCHIVE_INDEX + '-read'
perform_action(action, client, write_alias, read_alias, prefix + 'jaeger-span-archive', 'jaeger-span', prefix)
else:
write_alias = prefix + 'jaeger-span-write'
read_alias = prefix + 'jaeger-span-read'
perform_action(action, client, write_alias, read_alias, prefix + 'jaeger-span', 'jaeger-span', prefix)
write_alias = prefix + 'jaeger-service-write'
read_alias = prefix + 'jaeger-service-read'
perform_action(action, client, write_alias, read_alias, prefix + 'jaeger-service', 'jaeger-service', prefix)
def perform_action(action, client, write_alias, read_alias, index_to_rollover, template_name, prefix):
if action == 'init':
shards = os.getenv('SHARDS', SHARDS)
replicas = os.getenv('REPLICAS', REPLICAS)
esVersion = get_version(client)
use_ilm = str2bool(os.getenv("ES_USE_ILM", 'false'))
ilm_policy_name = os.getenv('ES_ILM_POLICY_NAME', ILM_POLICY_NAME)
if esVersion == 7:
if use_ilm:
check_if_ilm_policy_exists(ilm_policy_name)
else:
if use_ilm:
sys.exit("ILM is supported only for ES version 7+")
create_index_template(fix_mapping(template_name, esVersion, shards, replicas, prefix.rstrip("-"), use_ilm, ilm_policy_name),
prefix + template_name)
index = index_to_rollover + '-000001'
create_index(client, index)
if is_alias_empty(client, read_alias):
create_aliases(client, read_alias, index, use_ilm)
if is_alias_empty(client, write_alias):
create_aliases(client, write_alias, index, use_ilm)
elif action == 'rollover':
cond = ast.literal_eval(os.getenv('CONDITIONS', ROLLBACK_CONDITIONS))
rollover(client, write_alias, read_alias, cond)
elif action == 'lookback':
read_alias_lookback(client, write_alias, read_alias, os.getenv('UNIT', UNIT),
int(os.getenv('UNIT_COUNT', UNIT_COUNT)))
else:
print('Unrecognized action {}'.format(action))
sys.exit(1)
def create_index_template(template, template_name):
print('Creating index template {}'.format(template_name))
headers = {'Content-Type': 'application/json'}
s = get_request_session(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')),
os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"),
os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false'))
r = s.put(sys.argv[2] + '/_template/' + template_name, headers=headers, data=template)
print(r.text)
r.raise_for_status()
def create_index(client, name):
"""
Create archive index
"""
print('Creating index {}'.format(name))
create = curator.CreateIndex(client=client, name=name, ignore_existing=True)
create.do_action()
def create_aliases(client, alias_name, archive_index_name, use_ilm):
""""
Create read write aliases
"""
ilo = curator.IndexList(client)
ilo.filter_by_regex(kind='regex', value='^' + archive_index_name + '$')
for index in ilo.working_list():
print("Adding index {} to alias {}".format(index, alias_name))
if re.search(r'write', alias_name) and use_ilm:
alias = curator.Alias(client=client, name=alias_name, extra_settings={'is_write_index': True})
else:
alias = curator.Alias(client=client, name=alias_name)
alias.add(ilo)
alias.do_action()
def is_alias_empty(client, alias_name):
""""
Checks whether alias is empty or not
"""
ilo = curator.IndexList(client)
ilo.filter_by_alias(aliases=alias_name)
if len(ilo.working_list()) > 0:
print("Alias {} is not empty. Not adding indices to it.".format(alias_name))
return False
return True
def rollover(client, write_alias, read_alias, conditions):
"""
Rollover to new index and put it into read alias
"""
print("Rollover {}, based on conditions {}".format(write_alias, conditions))
roll = curator.Rollover(client=client, name=write_alias, conditions=conditions)
roll.do_action()
ilo = curator.IndexList(client)
ilo.filter_by_alias(aliases=[write_alias])
alias = curator.Alias(client=client, name=read_alias)
for index in ilo.working_list():
print("Adding index {} to alias {}".format(index, read_alias))
alias.add(ilo)
alias.do_action()
def read_alias_lookback(client, write_alias, read_alias, unit, unit_count):
"""
This is used to mimic --es.max-span-age - The maximum lookback for spans in Elasticsearch
by removing old indices from read alias
"""
ilo = curator.IndexList(client)
ilo.filter_by_alias(aliases=[read_alias])
ilo.filter_by_alias(aliases=[write_alias], exclude=True)
ilo.filter_by_age(source='creation_date', direction='older', unit=unit, unit_count=unit_count)
empty_list(ilo, 'No indices to remove from alias {}'.format(read_alias))
for index in ilo.working_list():
print("Removing index {} from alias {}".format(index, read_alias))
alias = curator.Alias(client=client, name=read_alias)
alias.remove(ilo)
alias.do_action()
def str2bool(v):
return v.lower() in ('true', '1')
def fix_mapping(template_name, esVersion, shards, replicas, indexPrefix, use_ilm, ilm_policy_name):
output = subprocess.Popen(['esmapping-generator', '--mapping', template_name, '--es-version', str(esVersion),
'--shards', str(shards), '--replicas',
str(replicas), '--index-prefix', indexPrefix,
'--use-ilm', str(use_ilm), '--ilm-policy-name', ilm_policy_name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
mapping, stderr = output.communicate()
if output.returncode != 0:
sys.exit(mapping)
return mapping
def empty_list(ilo, error_msg):
try:
ilo.empty_list_check()
except curator.NoIndices:
print(error_msg)
sys.exit(0)
def get_request_session(username, password, tls, ca, cert, key, skipHostVerify):
session = requests.Session()
if ca is not None:
session.verify = ca
elif skipHostVerify:
session.verify = False
if username is not None and password is not None:
session.auth = HTTPBasicAuth(username, password)
elif tls:
session.verify = ca
session.cert = (cert, key)
return session
def get_version(client):
esVersion = os.getenv('ES_VERSION')
if esVersion is None or esVersion == '':
esVersion = client.info()['version']['number'][0]
print('Detected ElasticSearch Version {}'.format(esVersion))
esVersion = int(esVersion)
return esVersion
def create_client(username, password, tls, ca, cert, key, skipHostVerify, timeout):
context = ssl.create_default_context()
if ca is not None:
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca)
elif skipHostVerify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
if username is not None and password is not None:
return elasticsearch.Elasticsearch(sys.argv[2:], http_auth=(username, password), ssl_context=context, timeout=timeout)
elif tls:
context.load_cert_chain(certfile=cert, keyfile=key)
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context, timeout=timeout)
else:
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context, timeout=timeout)
def check_if_ilm_policy_exists(ilm_policy):
""""
Checks whether ilm is created in Elasticsearch
"""
s = get_request_session(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')),
os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"),
os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false'))
r = s.get(sys.argv[2] + '/_ilm/policy/' + ilm_policy)
if r.status_code != 200:
sys.exit("ILM policy '{}' doesn't exist in Elasticsearch. Please create it and rerun init".format(ilm_policy))
if __name__ == "__main__":
logging.getLogger().setLevel(logging.DEBUG)
main()
|
apache-2.0
|
Akrog/cinder
|
cinder/manager.py
|
1
|
4996
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Manager class.
Managers are responsible for a certain aspect of the system. It is a logical
grouping of code relating to a portion of the system. In general other
components should be using the manager to make changes to the components that
it is responsible for.
For example, other components that need to deal with volumes in some way,
should do so by calling methods on the VolumeManager instead of directly
changing fields in the database. This allows us to keep all of the code
relating to volumes in the same place.
We have adopted a basic strategy of Smart managers and dumb data, which means
rather than attaching methods to data objects, components should call manager
methods that act on the data.
Methods on managers that can be executed locally should be called directly. If
a particular method must execute on a remote host, this should be done via rpc
to the service that wraps the manager
Managers should be responsible for most of the db access, and
non-implementation specific data. Anything implementation specific that can't
be generalized should be done by the Driver.
In general, we prefer to have one manager with multiple drivers for different
implementations, but sometimes it makes sense to have multiple managers. You
can think of it this way: Abstract different overall strategies at the manager
level(FlatNetwork vs VlanNetwork), and different implementations at the driver
level(LinuxNetDriver vs CiscoNetDriver).
Managers will often provide methods for initial setup of a host or periodic
tasks to a wrapping service.
This module provides Manager, a base class for managers.
"""
from oslo_config import cfg
import oslo_messaging as messaging
from cinder.db import base
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Manager(base.Base, periodic_task.PeriodicTasks):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, host=None, db_driver=None):
if not host:
host = CONF.host
self.host = host
self.additional_endpoints = []
super(Manager, self).__init__(db_driver)
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""Handle initialization if this is a standalone service.
Child classes should override this method.
"""
pass
def service_version(self, context):
return version.version_string()
def service_config(self, context):
config = {}
for key in CONF:
config[key] = CONF.get(key, None)
return config
class SchedulerDependentManager(Manager):
"""Periodically send capability updates to the Scheduler services.
Services that need to update the Scheduler of their capabilities
should derive from this class. Otherwise they can derive from
manager.Manager directly. Updates are only sent after
update_service_capabilities is called with non-None values.
"""
def __init__(self, host=None, db_driver=None, service_name='undefined'):
self.last_capabilities = None
self.service_name = service_name
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
super(SchedulerDependentManager, self).__init__(host, db_driver)
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
self.last_capabilities = capabilities
@periodic_task.periodic_task
def _publish_service_capabilities(self, context):
"""Pass data back to the scheduler at a periodic interval."""
if self.last_capabilities:
LOG.debug('Notifying Schedulers of capabilities ...')
self.scheduler_rpcapi.update_service_capabilities(
context,
self.service_name,
self.host,
self.last_capabilities)
|
apache-2.0
|
vrenaville/partner-contact
|
base_location_geonames_import/wizard/geonames_import.py
|
21
|
5910
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Base Location Geonames Import module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
# Copyright (C) 2014 Agile Business Group (http://www.agilebg.com)
# @author Lorenzo Battistini <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import Warning
import requests
import tempfile
import StringIO
import zipfile
import os
import logging
try:
import unicodecsv
except ImportError:
unicodecsv = None
logger = logging.getLogger(__name__)
class BetterZipGeonamesImport(models.TransientModel):
_name = 'better.zip.geonames.import'
_description = 'Import Better Zip from Geonames'
_rec_name = 'country_id'
country_id = fields.Many2one('res.country', 'Country', required=True)
@api.model
def transform_city_name(self, city, country):
"""Override it for transforming city name (if needed)
:param city: Original city name
:param country: Country record
:return: Transformed city name
"""
return city
@api.model
def _domain_search_better_zip(self, row, country):
return [('name', '=', row[1]),
('city', '=', self.transform_city_name(row[2], country)),
('country_id', '=', country.id)]
@api.model
def _prepare_better_zip(self, row, country):
state = self.select_or_create_state(row, country)
vals = {
'name': row[1],
'city': self.transform_city_name(row[2], country),
'state_id': state.id,
'country_id': country.id,
}
return vals
@api.model
def create_better_zip(self, row, country):
if row[0] != country.code:
raise Warning(
_("The country code inside the file (%s) doesn't "
"correspond to the selected country (%s).")
% (row[0], country.code))
logger.debug('ZIP = %s - City = %s' % (row[1], row[2]))
if row[1] and row[2]:
zip_model = self.env['res.better.zip']
zips = zip_model.search(self._domain_search_better_zip(
row, country))
if zips:
return zips[0]
else:
vals = self._prepare_better_zip(row, country)
if vals:
return zip_model.create(vals)
else:
return False
@api.model
def select_or_create_state(
self, row, country, code_row_index=4, name_row_index=3):
states = self.env['res.country.state'].search([
('country_id', '=', country.id),
('code', '=', row[code_row_index]),
])
if len(states) > 1:
raise Warning(
_("Too many states with code %s for country %s")
% (row[code_row_index], country.code))
if len(states) == 1:
return states[0]
else:
return self.env['res.country.state'].create({
'name': row[name_row_index],
'code': row[code_row_index],
'country_id': country.id
})
@api.one
def run_import(self):
zip_model = self.env['res.better.zip']
country_code = self.country_id.code
config_url = self.env['ir.config_parameter'].get_param(
'geonames.url',
default='http://download.geonames.org/export/zip/%s.zip')
url = config_url % country_code
logger.info('Starting to download %s' % url)
res_request = requests.get(url)
if res_request.status_code != requests.codes.ok:
raise Warning(
_('Got an error %d when trying to download the file %s.')
% (res_request.status_code, url))
# Store current record list
zips_to_delete = zip_model.search(
[('country_id', '=', self.country_id.id)])
f_geonames = zipfile.ZipFile(StringIO.StringIO(res_request.content))
tempdir = tempfile.mkdtemp(prefix='openerp')
f_geonames.extract('%s.txt' % country_code, tempdir)
logger.info('The geonames zipfile has been decompressed')
data_file = open(os.path.join(tempdir, '%s.txt' % country_code), 'r')
data_file.seek(0)
logger.info('Starting to create the better zip entries')
for row in unicodecsv.reader(
data_file, encoding='utf-8', delimiter=' '):
zip = self.create_better_zip(row, self.country_id)
if zip in zips_to_delete:
zips_to_delete -= zip
data_file.close()
if zips_to_delete:
zips_to_delete.unlink()
logger.info('%d better zip entries deleted for country %s' %
(len(zips_to_delete), self.country_id.name))
logger.info(
'The wizard to create better zip entries from geonames '
'has been successfully completed.')
return True
|
agpl-3.0
|
stackforge/python-tackerclient
|
tackerclient/tacker/v1_0/nfvo/nsd.py
|
2
|
3324
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from oslo_serialization import jsonutils
from tackerclient.i18n import _
from tackerclient.tacker import v1_0 as tackerV10
_NSD = "nsd"
class ListNSD(tackerV10.ListCommand):
"""List NSDs that belong to a given tenant."""
resource = _NSD
list_columns = ['id', 'name', 'template_source', 'description']
def get_parser(self, prog_name):
parser = super(ListNSD, self).get_parser(prog_name)
parser.add_argument(
'--template-source',
help=_("List NSD with specified template source. Available \
options are 'onboared' (default), 'inline' or 'all'"),
action='store',
default='onboarded')
return parser
def args2search_opts(self, parsed_args):
search_opts = super(ListNSD, self).args2search_opts(parsed_args)
template_source = parsed_args.template_source
if parsed_args.template_source:
search_opts.update({'template_source': template_source})
return search_opts
class ShowNSD(tackerV10.ShowCommand):
"""Show information of a given NSD."""
resource = _NSD
class CreateNSD(tackerV10.CreateCommand):
"""Create a NSD."""
resource = _NSD
remove_output_fields = ["attributes"]
def add_known_arguments(self, parser):
parser.add_argument('--nsd-file', help='Specify NSD file',
required=True)
parser.add_argument(
'name', metavar='NAME',
help='Set a name for the NSD')
parser.add_argument(
'--description',
help='Set a description for the NSD')
def args2body(self, parsed_args):
body = {self.resource: {}}
nsd = None
with open(parsed_args.nsd_file) as f:
nsd = yaml.safe_load(f.read())
tackerV10.update_dict(parsed_args, body[self.resource],
['tenant_id', 'name', 'description'])
if nsd:
body[self.resource]['attributes'] = {'nsd': nsd}
return body
class DeleteNSD(tackerV10.DeleteCommand):
"""Delete a given NSD."""
resource = _NSD
class ShowTemplateNSD(tackerV10.ShowCommand):
"""Show template of a given NSD."""
resource = _NSD
def run(self, parsed_args):
self.log.debug('run(%s)', parsed_args)
template = None
data = self.get_data(parsed_args)
try:
attributes_index = data[0].index('attributes')
attributes_json = data[1][attributes_index]
template = jsonutils.loads(attributes_json).get('nsd', None)
except (IndexError, TypeError, ValueError) as e:
self.log.debug('Data handling error: %s', str(e))
print(template or _('Unable to display NSD template!'))
|
apache-2.0
|
veger/ansible
|
lib/ansible/utils/module_docs_fragments/shell_common.py
|
79
|
1833
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
remote_tmp:
description:
- Temporary directory to use on targets when executing tasks.
default: '~/.ansible/tmp'
env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
ini:
- section: defaults
key: remote_tmp
vars:
- name: ansible_remote_tmp
system_tmpdirs:
description:
- "List of valid system temporary directories for Ansible to choose when it cannot use
``remote_tmp``, normally due to permission issues. These must be world readable, writable,
and executable."
default: [ /var/tmp, /tmp ]
type: list
env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
ini:
- section: defaults
key: system_tmpdirs
vars:
- name: ansible_system_tmpdirs
async_dir:
description:
- Directory in which ansible will keep async job information
default: '~/.ansible_async'
env: [{name: ANSIBLE_ASYNC_DIR}]
ini:
- section: defaults
key: async_dir
vars:
- name: ansible_async_dir
environment:
type: dict
default: {}
description:
- dictionary of environment variables and their values to use when executing commands.
admin_users:
type: list
default: ['root', 'toor']
description:
- list of users to be expected to have admin privileges. This is used by the controller to
determine how to share temporary files between the remote user and the become user.
env:
- name: ANSIBLE_ADMIN_USERS
ini:
- section: defaults
key: admin_users
vars:
- name: ansible_admin_users
"""
|
gpl-3.0
|
ionelmc/python-cogen
|
examples/cogen-chat/ChatApp/chatapp/config/environment.py
|
4
|
1470
|
"""Pylons environment configuration"""
import os
from mako.lookup import TemplateLookup
from pylons import config
import chatapp.lib.app_globals as app_globals
import chatapp.lib.helpers
from chatapp.config.routing import make_map
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='chatapp', paths=paths)
config['routes.map'] = make_map()
config['pylons.app_globals'] = app_globals.Globals()
config['pylons.h'] = chatapp.lib.helpers
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'], input_encoding='utf-8',
imports=['from webhelpers.html import escape'],
default_filters=['escape'], output_encoding='utf-8',
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
)
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
|
mit
|
debugtalk/ApiTestEngine
|
httprunner/testcase.py
|
1
|
13449
|
import inspect
from typing import Text, Any, Union, Callable
from httprunner.models import (
TConfig,
TStep,
TRequest,
MethodEnum,
TestCase,
)
class Config(object):
def __init__(self, name: Text):
self.__name = name
self.__variables = {}
self.__base_url = ""
self.__verify = False
self.__export = []
self.__weight = 1
caller_frame = inspect.stack()[1]
self.__path = caller_frame.filename
@property
def name(self) -> Text:
return self.__name
@property
def path(self) -> Text:
return self.__path
@property
def weight(self) -> int:
return self.__weight
def variables(self, **variables) -> "Config":
self.__variables.update(variables)
return self
def base_url(self, base_url: Text) -> "Config":
self.__base_url = base_url
return self
def verify(self, verify: bool) -> "Config":
self.__verify = verify
return self
def export(self, *export_var_name: Text) -> "Config":
self.__export.extend(export_var_name)
return self
def locust_weight(self, weight: int) -> "Config":
self.__weight = weight
return self
def perform(self) -> TConfig:
return TConfig(
name=self.__name,
base_url=self.__base_url,
verify=self.__verify,
variables=self.__variables,
export=list(set(self.__export)),
path=self.__path,
weight=self.__weight,
)
class StepRequestValidation(object):
def __init__(self, step_context: TStep):
self.__step_context = step_context
def assert_equal(
self, jmes_path: Text, expected_value: Any, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"equal": [jmes_path, expected_value, message]}
)
return self
def assert_not_equal(
self, jmes_path: Text, expected_value: Any, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"not_equal": [jmes_path, expected_value, message]}
)
return self
def assert_greater_than(
self, jmes_path: Text, expected_value: Union[int, float], message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"greater_than": [jmes_path, expected_value, message]}
)
return self
def assert_less_than(
self, jmes_path: Text, expected_value: Union[int, float], message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"less_than": [jmes_path, expected_value, message]}
)
return self
def assert_greater_or_equals(
self, jmes_path: Text, expected_value: Union[int, float], message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"greater_or_equals": [jmes_path, expected_value, message]}
)
return self
def assert_less_or_equals(
self, jmes_path: Text, expected_value: Union[int, float], message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"less_or_equals": [jmes_path, expected_value, message]}
)
return self
def assert_length_equal(
self, jmes_path: Text, expected_value: int, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"length_equal": [jmes_path, expected_value, message]}
)
return self
def assert_length_greater_than(
self, jmes_path: Text, expected_value: int, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"length_greater_than": [jmes_path, expected_value, message]}
)
return self
def assert_length_less_than(
self, jmes_path: Text, expected_value: int, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"length_less_than": [jmes_path, expected_value, message]}
)
return self
def assert_length_greater_or_equals(
self, jmes_path: Text, expected_value: int, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"length_greater_or_equals": [jmes_path, expected_value, message]}
)
return self
def assert_length_less_or_equals(
self, jmes_path: Text, expected_value: int, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"length_less_or_equals": [jmes_path, expected_value, message]}
)
return self
def assert_string_equals(
self, jmes_path: Text, expected_value: Any, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"string_equals": [jmes_path, expected_value, message]}
)
return self
def assert_startswith(
self, jmes_path: Text, expected_value: Text, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"startswith": [jmes_path, expected_value, message]}
)
return self
def assert_endswith(
self, jmes_path: Text, expected_value: Text, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"endswith": [jmes_path, expected_value, message]}
)
return self
def assert_regex_match(
self, jmes_path: Text, expected_value: Text, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"regex_match": [jmes_path, expected_value, message]}
)
return self
def assert_contains(
self, jmes_path: Text, expected_value: Any, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"contains": [jmes_path, expected_value, message]}
)
return self
def assert_contained_by(
self, jmes_path: Text, expected_value: Any, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"contained_by": [jmes_path, expected_value, message]}
)
return self
def assert_type_match(
self, jmes_path: Text, expected_value: Any, message: Text = ""
) -> "StepRequestValidation":
self.__step_context.validators.append(
{"type_match": [jmes_path, expected_value, message]}
)
return self
def perform(self) -> TStep:
return self.__step_context
class StepRequestExtraction(object):
def __init__(self, step_context: TStep):
self.__step_context = step_context
def with_jmespath(self, jmes_path: Text, var_name: Text) -> "StepRequestExtraction":
self.__step_context.extract[var_name] = jmes_path
return self
# def with_regex(self):
# # TODO: extract response html with regex
# pass
#
# def with_jsonpath(self):
# # TODO: extract response json with jsonpath
# pass
def validate(self) -> StepRequestValidation:
return StepRequestValidation(self.__step_context)
def perform(self) -> TStep:
return self.__step_context
class RequestWithOptionalArgs(object):
def __init__(self, step_context: TStep):
self.__step_context = step_context
def with_params(self, **params) -> "RequestWithOptionalArgs":
self.__step_context.request.params.update(params)
return self
def with_headers(self, **headers) -> "RequestWithOptionalArgs":
self.__step_context.request.headers.update(headers)
return self
def with_cookies(self, **cookies) -> "RequestWithOptionalArgs":
self.__step_context.request.cookies.update(cookies)
return self
def with_data(self, data) -> "RequestWithOptionalArgs":
self.__step_context.request.data = data
return self
def with_json(self, req_json) -> "RequestWithOptionalArgs":
self.__step_context.request.req_json = req_json
return self
def set_timeout(self, timeout: float) -> "RequestWithOptionalArgs":
self.__step_context.request.timeout = timeout
return self
def set_verify(self, verify: bool) -> "RequestWithOptionalArgs":
self.__step_context.request.verify = verify
return self
def set_allow_redirects(self, allow_redirects: bool) -> "RequestWithOptionalArgs":
self.__step_context.request.allow_redirects = allow_redirects
return self
def upload(self, **file_info) -> "RequestWithOptionalArgs":
self.__step_context.request.upload.update(file_info)
return self
def teardown_hook(
self, hook: Text, assign_var_name: Text = None
) -> "RequestWithOptionalArgs":
if assign_var_name:
self.__step_context.teardown_hooks.append({assign_var_name: hook})
else:
self.__step_context.teardown_hooks.append(hook)
return self
def extract(self) -> StepRequestExtraction:
return StepRequestExtraction(self.__step_context)
def validate(self) -> StepRequestValidation:
return StepRequestValidation(self.__step_context)
def perform(self) -> TStep:
return self.__step_context
class RunRequest(object):
def __init__(self, name: Text):
self.__step_context = TStep(name=name)
def with_variables(self, **variables) -> "RunRequest":
self.__step_context.variables.update(variables)
return self
def setup_hook(self, hook: Text, assign_var_name: Text = None) -> "RunRequest":
if assign_var_name:
self.__step_context.setup_hooks.append({assign_var_name: hook})
else:
self.__step_context.setup_hooks.append(hook)
return self
def get(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.GET, url=url)
return RequestWithOptionalArgs(self.__step_context)
def post(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.POST, url=url)
return RequestWithOptionalArgs(self.__step_context)
def put(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.PUT, url=url)
return RequestWithOptionalArgs(self.__step_context)
def head(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.HEAD, url=url)
return RequestWithOptionalArgs(self.__step_context)
def delete(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.DELETE, url=url)
return RequestWithOptionalArgs(self.__step_context)
def options(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.OPTIONS, url=url)
return RequestWithOptionalArgs(self.__step_context)
def patch(self, url: Text) -> RequestWithOptionalArgs:
self.__step_context.request = TRequest(method=MethodEnum.PATCH, url=url)
return RequestWithOptionalArgs(self.__step_context)
class StepRefCase(object):
def __init__(self, step_context: TStep):
self.__step_context = step_context
def teardown_hook(self, hook: Text, assign_var_name: Text = None) -> "StepRefCase":
if assign_var_name:
self.__step_context.teardown_hooks.append({assign_var_name: hook})
else:
self.__step_context.teardown_hooks.append(hook)
return self
def export(self, *var_name: Text) -> "StepRefCase":
self.__step_context.export.extend(var_name)
return self
def perform(self) -> TStep:
return self.__step_context
class RunTestCase(object):
def __init__(self, name: Text):
self.__step_context = TStep(name=name)
def with_variables(self, **variables) -> "RunTestCase":
self.__step_context.variables.update(variables)
return self
def setup_hook(self, hook: Text, assign_var_name: Text = None) -> "RunTestCase":
if assign_var_name:
self.__step_context.setup_hooks.append({assign_var_name: hook})
else:
self.__step_context.setup_hooks.append(hook)
return self
def call(self, testcase: Callable) -> StepRefCase:
self.__step_context.testcase = testcase
return StepRefCase(self.__step_context)
def perform(self) -> TStep:
return self.__step_context
class Step(object):
def __init__(
self,
step_context: Union[
StepRequestValidation,
StepRequestExtraction,
RequestWithOptionalArgs,
RunTestCase,
StepRefCase,
],
):
self.__step_context = step_context.perform()
@property
def request(self) -> TRequest:
return self.__step_context.request
@property
def testcase(self) -> TestCase:
return self.__step_context.testcase
def perform(self) -> TStep:
return self.__step_context
|
mit
|
vv1133/home_web
|
tests/model_fields/test_imagefield.py
|
54
|
16032
|
from __future__ import absolute_import
import os
import shutil
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from django.core.files.images import ImageFile
from django.test import TestCase
from django.utils._os import upath
from django.utils.unittest import skipIf
try:
from .models import Image
except ImproperlyConfigured:
Image = None
if Image:
from .models import (Person, PersonWithHeight, PersonWithHeightAndWidth,
PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile)
from .models import temp_storage_dir
else:
# Pillow not available, create dummy classes (tests will be skipped anyway)
class Person():
pass
PersonWithHeight = PersonWithHeightAndWidth = PersonDimensionsFirst = Person
PersonTwoImages = Person
class ImageFieldTestMixin(object):
"""
Mixin class to provide common functionality to ImageField test classes.
"""
# Person model to use for tests.
PersonModel = PersonWithHeightAndWidth
# File class to use for file instances.
File = ImageFile
def setUp(self):
"""
Creates a pristine temp directory (or deletes and recreates if it
already exists) that the model uses as its storage directory.
Sets up two ImageFile instances for use in tests.
"""
if os.path.exists(temp_storage_dir):
shutil.rmtree(temp_storage_dir)
os.mkdir(temp_storage_dir)
file_path1 = os.path.join(os.path.dirname(upath(__file__)), "4x8.png")
self.file1 = self.File(open(file_path1, 'rb'))
file_path2 = os.path.join(os.path.dirname(upath(__file__)), "8x4.png")
self.file2 = self.File(open(file_path2, 'rb'))
def tearDown(self):
"""
Removes temp directory and all its contents.
"""
shutil.rmtree(temp_storage_dir)
def check_dimensions(self, instance, width, height,
field_name='mugshot'):
"""
Asserts that the given width and height values match both the
field's height and width attributes and the height and width fields
(if defined) the image field is caching to.
Note, this method will check for dimension fields named by adding
"_width" or "_height" to the name of the ImageField. So, the
models used in these tests must have their fields named
accordingly.
By default, we check the field named "mugshot", but this can be
specified by passing the field_name parameter.
"""
field = getattr(instance, field_name)
# Check height/width attributes of field.
if width is None and height is None:
self.assertRaises(ValueError, getattr, field, 'width')
self.assertRaises(ValueError, getattr, field, 'height')
else:
self.assertEqual(field.width, width)
self.assertEqual(field.height, height)
# Check height/width fields of model, if defined.
width_field_name = field_name + '_width'
if hasattr(instance, width_field_name):
self.assertEqual(getattr(instance, width_field_name), width)
height_field_name = field_name + '_height'
if hasattr(instance, height_field_name):
self.assertEqual(getattr(instance, height_field_name), height)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertEqual(p1.mugshot == p2.mugshot, False)
self.assertEqual(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertEqual(p1_db.mugshot == p2.mugshot, False)
self.assertEqual(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertEqual(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertEqual(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + '.moved')
p2 = self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertEqual(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
_ = p.mugshot.size
self.assertEqual(p.mugshot.closed, True)
def test_pickle(self):
"""
Tests that ImageField can be pickled, unpickled, and that the
image of the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
p2 = Person(name="Bob")
p2.mugshot = self.file1
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase):
"""
Tests behavior of an ImageField and its dimensions fields.
"""
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
p.save()
self.check_dimensions(p, 4, 8)
def test_image_after_constructor(self):
"""
Tests behavior when image is not passed in constructor.
"""
p = self.PersonModel(name='Joe')
# TestImageField value will default to being an instance of its
# attr_class, a TestImageFieldFile, with name == None, which will
# cause it to evaluate as False.
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
# Test setting a fresh created model instance.
p = self.PersonModel(name='Joe')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8)
def test_create(self):
"""
Tests assigning an image in Manager.create().
"""
p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
def test_default_value(self):
"""
Tests that the default value for an ImageField is an instance of
the field's attr_class (TestImageFieldFile in this case) with no
name (name set to None).
"""
p = self.PersonModel()
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
def test_assignment_to_None(self):
"""
Tests that assigning ImageField to None clears dimensions.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
# If image assigned to None, dimension fields should be cleared.
p.mugshot = None
self.check_dimensions(p, None, None)
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
def test_field_save_and_delete_methods(self):
"""
Tests assignment using the field's save method and deletion using
the field's delete method.
"""
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# A new file should update dimensions.
p.mugshot.save("mug", self.file2)
self.check_dimensions(p, 8, 4)
# Field and dimensions should be cleared after a delete.
p.mugshot.delete(save=False)
self.assertEqual(p.mugshot, None)
self.check_dimensions(p, None, None)
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set if file is saved.
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.check_dimensions(p, 4, 8)
# After checking dimensions on the image field, the file will have
# opened.
self.assertEqual(p.mugshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
self.check_dimensions(p, 4, 8)
self.assertEqual(p.mugshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with no dimension fields.
"""
PersonModel = Person
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField where the dimensions fields are
defined before the ImageField.
"""
PersonModel = PersonDimensionsFirst
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
@skipIf(Image is None, "PIL is required to test ImageField")
class TwoImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests a model with two ImageFields.
"""
PersonModel = PersonTwoImages
def test_constructor(self):
p = self.PersonModel(mugshot=self.file1, headshot=self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.save()
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
def test_create(self):
p = self.PersonModel.objects.create(mugshot=self.file1,
headshot=self.file2)
self.check_dimensions(p, 4, 8)
self.check_dimensions(p, 8, 4, 'headshot')
def test_assignment(self):
p = self.PersonModel()
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot = self.file2
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Clear the ImageFields one at a time.
p.mugshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.headshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_field_save_and_delete_methods(self):
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# We can use save=True when deleting the image field with null=True
# dimension fields and the other field has an image.
p.headshot.delete(save=True)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot.delete(save=False)
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set for the saved file.
p.mugshot.save("mug", self.file1)
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# After checking dimensions on the image fields, the files will
# have been opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
p.headshot.was_opened = False
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
p.headshot = self.file1
self.check_dimensions(p, 8, 4, 'mugshot')
self.check_dimensions(p, 4, 8, 'headshot')
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
|
bsd-3-clause
|
drnextgis/QGIS
|
python/plugins/processing/algs/qgis/SaveSelectedFeatures.py
|
5
|
2493
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SaveSelectedFeatures.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class SaveSelectedFeatures(GeoAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Save selected features')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer')))
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Selection')))
def processAlgorithm(self, progress):
inputFilename = self.getParameterValue(self.INPUT_LAYER)
output = self.getOutputFromName(self.OUTPUT_LAYER)
vectorLayer = dataobjects.getObjectFromUri(inputFilename)
writer = output.getVectorWriter(vectorLayer.fields(),
vectorLayer.wkbType(), vectorLayer.crs())
features = vector.features(vectorLayer)
total = 100.0 / len(features)
for current, feat in enumerate(features):
writer.addFeature(feat)
progress.setPercentage(int(current * total))
del writer
|
gpl-2.0
|
elbeardmorez/quodlibet
|
quodlibet/quodlibet/player/__init__.py
|
1
|
1901
|
# -*- coding: utf-8 -*-
# Copyright 2007 Joe Wreschnig
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import importlib
from quodlibet import util
from quodlibet.compat import swap_to_string, text_type
@swap_to_string
class PlayerError(Exception):
"""Error raised by player loading/initialization and emitted by the
error signal during playback.
Both short_desc and long_desc are meant for displaying in the UI.
They should be unicode.
"""
def __init__(self, short_desc, long_desc=None):
self.short_desc = short_desc
self.long_desc = long_desc
def __str__(self):
return self.short_desc + (
u"\n" + self.long_desc if self.long_desc else u"")
def __bytes__(self):
return text_type(self).encode('utf-8')
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, repr(self.short_desc), repr(self.long_desc))
def init_player(backend_name, librarian):
"""Loads the specified backend and initializes it.
Returns a BasePlayer implementation instance.
Raises PlayerError in case of an error.
"""
backend = init_backend(backend_name)
return backend.init(librarian)
def init_backend(backend_name):
"""Imports the player backend module for the given name.
Raises PlayerError if the import fails.
the module provides the following functions:
init(librarian) -> new player instance
"""
modulename = "quodlibet.player." + backend_name
try:
backend = importlib.import_module(modulename)
except ImportError as e:
util.reraise(PlayerError, str(e))
else:
return backend
|
gpl-2.0
|
AlexOugh/horizon
|
openstack_dashboard/dashboards/settings/password/forms.py
|
63
|
3043
|
# Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.forms import ValidationError # noqa
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from horizon.utils import validators
from openstack_dashboard import api
class PasswordForm(forms.SelfHandlingForm):
current_password = forms.CharField(
label=_("Current password"),
widget=forms.PasswordInput(render_value=False))
new_password = forms.RegexField(
label=_("New password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid':
validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm new password"),
widget=forms.PasswordInput(render_value=False))
no_autocomplete = True
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'new_password' in data:
if data['new_password'] != data.get('confirm_password', None):
raise ValidationError(_('Passwords do not match.'))
return data
# We have to protect the entire "data" dict because it contains the
# oldpassword and newpassword strings.
@sensitive_variables('data')
def handle(self, request, data):
user_is_editable = api.keystone.keystone_can_edit_user()
if user_is_editable:
try:
api.keystone.user_update_own_password(request,
data['current_password'],
data['new_password'])
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
msg = _("Password changed. Please log in again to continue.")
utils.add_logout_reason(request, response, msg)
return response
except Exception:
exceptions.handle(request,
_('Unable to change password.'))
return False
else:
messages.error(request, _('Changing password is not supported.'))
return False
|
apache-2.0
|
EndPointCorp/interactivespaces-python-api
|
scripts/ispaces-relaunch.py
|
1
|
29285
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append("../")
sys.path.append("/home/galadmin/src/interactivespaces-python-api/")
import time
import json
import pprint
import urllib2
import argparse
import requests
import commands
import eventlet
import subprocess
import ConfigParser
from interactivespaces import Master
from interactivespaces import LiveActivityGroup
from termcolor import colored
from subprocess import CalledProcessError
from interactivespaces.exception import ControllerNotFoundException
eventlet.monkey_patch()
__report_indent = [0]
def debug(fn):
def wrap(*params, **kwargs):
call = wrap.callcount = wrap.callcount + 1
indent = ' ' * __report_indent[0]
fc = "%s(%s)" % (fn.__name__, ', '.join(
[a.__repr__() for a in params] +
["%s = %s" % (a, repr(b)) for a, b in kwargs.items()]
))
#print("%s%s called [#%s]" % (indent, fc, call))
__report_indent[0] += 1
ret = fn(*params, **kwargs)
__report_indent[0] -= 1
#print("%s%s returned %s [#%s]" % (indent, fc, repr(ret), call))
return ret
wrap.callcount = 0
return wrap
class InteractiveSpacesRelaunch(object):
@debug
def __init__(self, config_path, relaunch_options):
self.config_path = config_path
self.init_config(relaunch_options)
self.master = Master(host=self.host,
port=self.port,
logfile_path=self.log_path)
self.relaunch_container = []
self.stopped = False
self.activated = False
# Parsing arguments takes precedence over config file options
self.relaunch_live_activities = True
self.relaunch_controllers = False
self.relaunch_master = False
if relaunch_options['no_live_activities']:
print "Performing relaunch without relaunching live activities"
self.relaunch_live_activities = False
else:
self.relaunch_live_activities = True
if relaunch_options['full_relaunch'] or relaunch_options['full']:
print colored("Performing full relaunch.", 'white', attrs=['bold'])
self.relaunch_controllers = True
self.relaunch_master = True
if relaunch_options['master_only']:
print colored("Performing relaunch of master only", 'white', attrs=['bold'])
self.relaunch_controllers = False
self.relaunch_master = True
self.relaunch_live_activities = False
if relaunch_options['controllers_only']:
print colored("Performing relaunch of controllers only", 'white', attrs=['bold'])
self.relaunch_controllers = True
self.relaunch_master = False
self.relaunch_live_activities = False
if relaunch_options['live_activity_groups']:
self.relaunch_sequence = relaunch_options['live_activity_groups'].split(',')
if len(self.relaunch_sequence) == 0:
print colored("Relaunch sequence is empty")
sys.exit(1)
print colored("Live activity groups to be relaunched: %s" % self.relaunch_sequence, 'white', attrs=['bold'])
if relaunch_options['status']:
print colored("Getting status of IS stack", 'white', attrs=['bold'])
else:
print colored("This is what's going to be launched:", 'white', attrs=['bold'])
print "Controllers: %s" % self.relaunch_controllers
print "Master: %s" % self.relaunch_master
print "Live activities: %s" % self.relaunch_live_activities
if self.relaunch_live_activities:
print "Live activity groups: " + colored("%s" % (',').join(self.relaunch_sequence), 'magenta')
@debug
def init_config(self, relaunch_options):
self.config = ConfigParser.RawConfigParser()
self.config.read(self.config_path)
self.host = self.config.get('master', 'host')
self.port = self.config.get('master', 'port')
self.shutdown_attempts = self.config.getint('relaunch','shutdown_attempts')
self.startup_attempts = self.config.getint('relaunch','startup_attempts')
self.relaunch_sequence = self.config.get('relaunch','relaunch_sequence').split(',')
self.interval_between_attempts = self.config.getint('relaunch','interval_between_attempts')
self.relaunch_controllers = self.config.getint('relaunch','relaunch_controllers')
self.relaunch_master = self.config.getint('relaunch', 'relaunch_master')
self.master_stop_command = self.config.get('master', 'stop_command')
self.master_launch_command = self.config.get('master', 'launch_command')
self.master_destroy_tmux_command = self.config.get('master', 'destroy_tmux_command')
self.log_path = self.config.get('global', 'logfile_path')
self.ssh_command = self.config.get('global', 'ssh_command')
self.pp = pprint.PrettyPrinter(indent=4)
self.controllers_data = self.init_controllers_config(relaunch_options)
@debug
def create_uri_for(self, location):
return "http://" + self.host + ":" + self.port + location
@debug
def init_controllers_config(self, relaunch_options):
config = {}
if relaunch_options['controllers']:
controllers_list = relaunch_options['controllers'].split(',')
else:
controllers_list = self.config.get('global', 'controllers_list').split(',')
print colored("Controller(s) to relaunch: %s" % (',').join(controllers_list), 'green')
for controller_name in controllers_list:
config[controller_name] = {}
config[controller_name]['name'] = self.config.get(controller_name, 'name')
config[controller_name]['hostname'] = self.config.get(controller_name, 'hostname')
config[controller_name]['stop_command'] = self.config.get(controller_name, 'stop_command')
config[controller_name]['launch_command'] = self.config.get(controller_name, 'launch_command')
config[controller_name]['pid_command'] = self.config.get(controller_name, 'pid_command')
config[controller_name]['destroy_tmux_command'] = self.config.get(controller_name, 'destroy_tmux_command')
return config
@debug
def stop_controller(self, controller_name):
"""
@summary: stops a remote controller process
@rtype: string containing output of stop command
"""
command = "%s %s '%s'" % (self.ssh_command, controller_name, self.controllers_data[controller_name]['stop_command'])
cmd_process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = cmd_process.communicate()[0].replace('\n', '').split(' ')
return output
@debug
def destroy_tmux_session(self, controller_name):
"""
@summary: destroys tmux session of a controller
@rtype: string with command output
"""
command = "%s %s '%s'" % (self.ssh_command, controller_name, self.controllers_data[controller_name]['destroy_tmux_command'])
cmd_process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = cmd_process.communicate()[0].replace('\n', '').split(' ')
return output
@debug
def start_controller(self, controller_name):
"""
@summary: starts controller (most likely a tmux session)
@rtype: string containing start output
"""
command = "%s %s '%s'" % (self.ssh_command, controller_name, self.controllers_data[controller_name]['launch_command'])
cmd_process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = cmd_process.communicate()[0].replace('\n', '').split(' ')
return output
@debug
def controller_connected(self, controller_name):
"""
@summary: We always return False because ispaces controllers are tricky
"""
try:
controller = self.master.get_space_controller({'space_controller_name' : controller_name,
'space_controller_mode' : 'ENABLED',
'space_controller_state': 'RUNNING'})
return True
except ControllerNotFoundException, e :
return False
except MasterException, e:
return False
except urllib2.HTTPError,e :
print colored("Failed to communicate with master (%s) - is it running?" % e, 'red')
@debug
def connect_controller(self, controller_name):
"""
@summary: connects controller through Master API and refreshes it's status.
after that it will wait for the controller to appear as connected and runnning
if controller does not appear as connected, this method will finally return False
@rtype: bool
"""
timeout = self.config.getint('relaunch', 'controllers_timeout')
try:
controller = self.master.get_space_controller({'space_controller_name': controller_name})
except urllib2.HTTPError, e:
print colored("Failed to connect to master (%s) - is it running?" % e, 'red')
sys.exit(1)
controller.send_connect()
controller.send_status_refresh()
for wait in xrange(0, timeout):
if self.controller_connected(controller_name):
print colored("Controller '%s' is connected" % controller_name, 'green')
return True
else:
print colored('.', 'red'),
time.sleep(1)
sys.stdout.flush()
print colored("Could not connect controller '%s' in specified timeout" % controller_name, 'red')
print colored("Check if controller exists, started and whether the name is unique", 'red')
return False
@debug
def controller_tmux_session_exists(self, controller_name):
"""
@summary: returns True if tmux session of a controller exists - False otherwise
@rtype: bool
"""
try:
cmd = 'ssh %s -t "tmux ls | grep ISController"' % controller_name
out = subprocess.check_call(cmd, shell=True, stdout=subprocess.PIPE)
return True
except CalledProcessError, e:
print "tmux session ISController does not exist on %s" % controller_name
return False
@debug
def relaunch_master_process(self):
"""
@summary: relaunches master process and returns True as soon as the Master API is reachable
@rtype: bool
"""
cmd_process = subprocess.Popen(self.master_stop_command, shell=True, stdout=subprocess.PIPE)
cmd_process = subprocess.Popen(self.master_destroy_tmux_command, shell=True, stdout=subprocess.PIPE)
self.simple_wait('kill_master_process', 5)
cmd_process = subprocess.Popen(self.master_launch_command, shell=True, stdout=subprocess.PIPE)
if self.api_wait('start_master',
timeout=120,
url=self.create_uri_for(self.config.get('master', 'verify_url'))
):
return True
else:
return False
print colored('Could not restart Interactivespaces Master within specified timeout', 'red')
sys.exit(1)
@debug
def verify_controllers_tmux_sessions(self):
"""
@summary: checks all controllers tmux sessions and revives them if they dont exist
"""
for controller_name, controller_data in self.controllers_data.iteritems():
if self.controller_tmux_session_exists(controller_name):
print colored("Tmux session ISController on %s exists" % controller_name, 'green')
else:
print colored("Tmux session ISController on %s does not exist - reviving" % controller_name, 'red')
self.start_controller(controller_name)
print "Connecting controller %s on %s" % (controller_data['name'], controller_name)
self.connect_controller(controller_data['name'])
self.controllers_data[controller_name]['connected'] = self.controller_connected(controller_data['name'])
@debug
def assert_controllers_api_statuses(self):
"""
@summary: checks whether all controllers are running by connecting them and making an
assert on every one of them thru Master API.
@rtype: bool
"""
for controller_name, controller_data in self.controllers_data.iteritems():
self.connect_controller(controller_data['name'])
if self.controller_connected(controller_data['name']):
self.controllers_data[controller_name]['connected'] = True
else:
self.controllers_data[controller_name]['connected'] = False
for controller_name, controller_data in self.controllers_data.iteritems():
if self.controllers_data[controller_name]['connected']:
pass
else:
return False
return True
@debug
def verify_controllers_connected(self):
print "Waiting for all controllers to come up with status: " + colored('RUNNING', 'green')
timeout = self.config.get('relaunch', 'controllers_timeout')
for blah in xrange(0, int(timeout)):
if self.assert_controllers_api_statuses():
return True
else:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
print colored('Waiting timed out :(', 'red')
return False
@debug
def connect_controllers(self):
"""
@summary: should verify all connections and try to revive them if they're not connected
After that controllers should be visible as "Connected" in Master API.
@rtype: bool
"""
self.verify_controllers_tmux_sessions()
return self.verify_controllers_connected()
@debug
def controllers_connected(self):
"""
@summary: Iterates over all controllers and makes sure they're connected
"""
if self.connect_controllers():
return True
else:
help = self.produce_controllers_tmux_help()
print "Could not connect all controllers - use '--help'. Start and stop commands are here: %s" % help
return False
@debug
def produce_controllers_tmux_help(self):
help = {}
controllers_list = self.config.get('global', 'controllers_list').split(',')
for controller_name in controllers_list:
help[controller_name] = {}
help[controller_name]['stop_command'] = self.config.get(controller_name, 'stop_command')
help[controller_name]['launch_command'] = self.config.get(controller_name, 'launch_command')
return help
@debug
def prepare_container(self):
"""
@summary: prepares a list of live activity API objects to relaunch
@rtype: bool
"""
print "Adding live activity groups to relaunch queue: "
for live_activity_group_name in self.relaunch_sequence:
print colored(" %s " % live_activity_group_name, 'magenta'),
sys.stdout.flush()
try:
live_activity_group = self.master.get_live_activity_group(
{'live_activity_group_name' : live_activity_group_name})
except urllib2.HTTPError, e:
print colored("Failed to communicate with master (%s)- is it running?" % e, 'red')
sys.exit(1)
self.relaunch_container.append(live_activity_group)
print ""
return True
@debug
def assert_api_url_status(self, url, http_status_code=200, timeout=5):
"""
@summary: method used to make asserts on an API. It returns true if API under `url`
returned `http_status_code` within specified `timeout` with JSON that's parseable
@rtype: bool
"""
try:
with eventlet.Timeout(timeout):
try:
url = url.strip('"')
#print "Getting URL %s" % url
response = requests.get(url)
print 'HTTP %s' % response.status_code,
except Exception:
return False
if response.status_code == http_status_code:
try:
json.loads(response.content)
print colored(' API JSON is valid. ', 'green'),
except Exception:
print colored(' JSON returned by API is not parseable ', 'red'),
return False
return True
else:
return False
except eventlet.timeout.Timeout:
print colored('%s timed out' % url, 'red')
return False
@debug
def simple_wait(self, info, timeout):
"""
@summary: wait function with an info used for short sync'like sleep for freeing file descriptors:
- destroying tmuxes
- killing java process
- ???
"""
print "Waiting for " + colored(self.interval_between_attempts, 'green') + " second(s) for " + colored(info, 'green')
time.sleep(self.interval_between_attempts)
@debug
def api_wait(self, info, timeout=None, url=None):
print "Waiting for the webservice to come up with: " + colored('200 OK', 'green')
for blah in xrange(0, timeout):
if self.assert_api_url_status(url):
print colored("Service is up (%s)" % info, 'green')
sys.stdout.flush()
return True
else:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
print colored('Waiting timed out :(', 'red')
return False
@debug
def shutdown_all_activities(self):
while self.stopped == False and self.shutdown_attempts >= 0:
self.shutdown()
self.status_refresh()
self.shutdown_attempts -= 1
self.stopped = self.check_if_stopped()
if self.stopped:
return True
else:
print colored("Shutdown attempts left %s" % self.shutdown_attempts, 'red')
sys.stdout.flush()
@debug
def activate_all_live_activity_groups(self):
while self.startup_attempts >= 0:
self.set_state()
self.status_refresh()
self.relaunched = self.check_if_activated()
if self.relaunched:
return True
else:
print colored("Startup attempts left %s" % self.startup_attempts, 'red')
self.startup_attempts -= 1
@debug
def loop_till_finished(self):
'''
@summary: first make sure we're stopped, then make sure we're activated
'''
if self.controllers_connected() and self.shutdown_all_activities() and self.activate_all_live_activity_groups():
return True
else:
return False
@debug
def get_statuses(self):
"""
@summary: checks for live activities statuses and returns them
@rtype: dict
"""
statuses = {}
for live_activity_group_name in self.relaunch_sequence:
sys.stdout.flush()
live_activity_group = self.master.get_live_activity_group(
{'live_activity_group_name' : live_activity_group_name})
if type(live_activity_group) == LiveActivityGroup:
for live_activity in live_activity_group.live_activities():
statuses[live_activity.name()] = live_activity.status()
else:
print "Live activity group not found %s - please make sure that relaunch sequence contains existing group names" % live_activity_group_name
sys.exit(1)
return statuses
@debug
def check_if_stopped(self):
"""
@summary: returns True if all live activities were stopped - False otherwise
@rtype: bool
"""
timeout = self.config.getint('relaunch', 'live_activities_timeout')
print colored("Waiting for live activities to stop", "green")
for wait in xrange(0, timeout):
time.sleep(1)
statuses = self.get_statuses()
statuses = {k: v for k, v in statuses.iteritems() if v != 'READY' }
statuses = {k: v for k, v in statuses.iteritems() if v != 'DOESNT_EXIST' }
if statuses:
print colored(".", 'red'),
sys.stdout.flush()
else:
print colored("All activities have been succesfully shutdown", 'green')
return True
print ""
print colored("Giving up - following live activities could not be shut down:", 'red')
self.pp.pprint(statuses)
return False
@debug
def check_if_activated(self):
"""
@summary: returnes True if all activites got status "ACTIVE" or "RUNNING". It will poll and wait for a while
for each activity
@rtype: bool
"""
timeout = self.config.getint('relaunch', 'live_activities_timeout')
print colored("Waiting for live activities to start", "green")
for wait in xrange(0, timeout):
statuses = self.get_statuses()
for live_activity in [live_activity for live_activity in statuses.keys() if statuses[live_activity] == 'ACTIVE']:
statuses.pop(live_activity)
for live_activity in [live_activity for live_activity in statuses.keys() if statuses[live_activity] == 'RUNNING']:
statuses.pop(live_activity)
if statuses:
print colored(".", 'red'),
sys.stdout.flush()
time.sleep(1)
else:
print colored("All activities activated", 'green')
return True
print colored("Following live activities could not get activated:", 'red')
self.pp.pprint(statuses)
return False
@debug
def shutdown(self):
"""
@summary: shuts down all activities from teh .relaunch_container - in addition it will clean la's tmp
@rtype: bool
"""
print "Attempting shutdown of live activities:"
for live_activity_group in self.relaunch_container:
live_activities = live_activity_group.live_activities()
for live_activity in live_activities:
print colored(" %s " % live_activity.name(), 'magenta'),
sys.stdout.flush()
live_activity.send_shutdown()
live_activity.send_clean_tmp()
print ""
return True
@debug
def status_refresh(self):
"""
@summary: sends 'status' refresh request to live activity groups that
were listed in .relaunch_container
"""
for live_activity_group in self.relaunch_container:
live_activity_group.send_status_refresh()
@debug
def set_state(self):
"""
@summary: sets activated/running state all live activity groups by deploying, configuring and activating them
"""
print colored("Attempting (D)eploy -> (C)onfigure -> (S)tartup -> (A)ctivate of live activity groups:", 'green')
for live_activity_group in self.relaunch_container:
print colored(" %s " % live_activity_group.name(), 'magenta'),
sys.stdout.flush()
print colored("D", 'blue'),
sys.stdout.flush()
live_activity_group.send_deploy()
print colored("C", 'blue'),
sys.stdout.flush()
live_activity_group.send_configure()
desired_state = self.config.get('relaunch_states', live_activity_group.name())
if desired_state == 'activate':
live_activity_group.send_activate()
print colored("A", 'blue')
sys.stdout.flush()
elif desired_state == 'startup':
live_activity_group.send_startup()
print colored("S", 'blue')
sys.stdout.flush()
else:
live_activity_group.send_activate()
print colored("A", 'blue')
sys.stdout.flush()
print ""
@debug
def relaunch_controllers_processes(self):
"""
@summary: stop controllers processes (most likely tmux sessions) and starts them afterwards.
No asserts are made in the API
@rtype: bool
"""
for controller_name, controller_data in self.controllers_data.iteritems():
self.stop_controller(controller_name)
self.destroy_tmux_session(controller_name)
print "Connecting controller %s on %s" % (controller_data['name'], controller_name)
self.simple_wait("Waiting for controllers to free file descriptors", 3)
print colored("Launching controllers", 'green')
for controller_name, controller_data in self.controllers_data.iteritems():
self.start_controller(controller_name)
self.simple_wait("Waiting for controllers to come up", 3)
for controller_name, controller_data in self.controllers_data.iteritems():
self.connect_controller(controller_data['name'])
self.controllers_data[controller_name]['connected'] = self.controller_connected(controller_data['name'])
return True
@debug
def get_status(self):
"""
@summary: gets live activities statuses
"""
print colored('Live activities', 'green')
self.pp.pprint(self.get_statuses())
print colored('Space controllers state', 'green')
self.assert_controllers_api_statuses()
@debug
def relaunch(self):
if self.relaunch_master:
self.relaunch_master_process()
if self.relaunch_controllers:
self.relaunch_controllers_processes()
if self.relaunch_live_activities:
self.prepare_container()
if self.loop_till_finished() == True:
print colored("Successfully relaunched ispaces", 'green', attrs=['bold'])
sys.exit(0)
else:
print colored("Exiting: could not relaunch ispaces - look for errors in %s and 'ugly' interface" % self.log_path, 'red')
sys.exit(1)
if __name__ == '__main__':
"""
@summary: parse all arguments, unset TMUX variable to be able to use this tool from a tmux session and
also check whether user is asking for a status or a relaunch
"""
os.unsetenv('TMUX')
parser = argparse.ArgumentParser(description='Relaunch interactivespaces')
parser.add_argument("--full-relaunch", help="Additionally relaunch controllers and master process", action="store_true")
parser.add_argument("--full", help="Alias for --full-relaunch", action="store_true")
parser.add_argument("--master-only", help="Relaunch the master process only - remember to relaunch controllers after that", action="store_true")
parser.add_argument("--controllers-only", help="Relaunch the controllers only", action="store_true")
parser.add_argument("--controllers", help="Comma separated of controllers to restart e.g. : --controllers=42-a,42-b,42-c (works with --controllers-only only)")
parser.add_argument("--no-live-activities", help="Don't relaunch live activities", action="store_true")
parser.add_argument("--config", help="Provide path to config file - /home/galadmin/etc/ispaces-client.conf by default")
parser.add_argument("--live-activity-groups", help="Provide quoted, comma-delimited names of live activity groups to manage e.g. --live-activity-groups='Touchscreen Browser','Media Services' ")
parser.add_argument("--status", help="Print current status of managed live activities.", action="store_true")
args = parser.parse_args()
relaunch_options = { 'full_relaunch': args.full_relaunch,
'full': args.full,
'master_only': args.master_only,
'controllers_only': args.controllers_only,
'controllers': args.controllers,
'no_live_activities': args.no_live_activities,
'live_activity_groups': args.live_activity_groups,
'status': args.status
}
if args.config:
config_path = args.config
else:
config_path = '/home/galadmin/etc/ispaces-client.conf'
if os.path.isfile(config_path):
ir = InteractiveSpacesRelaunch(config_path, relaunch_options)
if relaunch_options['status']:
ir.get_status()
else:
ir.relaunch()
else:
print "Could not open config file %s" % config_path
|
apache-2.0
|
GoogleCloudPlatform/python-compat-runtime
|
appengine-vmruntime/vmruntime/tests/wsgi_config_test.py
|
1
|
2403
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import unittest
from mock import patch
from vmruntime import wsgi_config
from vmruntime.tests import wsgi_test
from werkzeug import test
from werkzeug import wrappers
@wrappers.Request.application
def salutation_world(request):
salutation = request.args.get('salutation', 'Hello')
return wrappers.Response('%s World!' % salutation)
def goodbye_world_middleware(app):
def goodbye_wrapper(wsgi_env, start_response):
wsgi_env['QUERY_STRING'] = 'salutation=Goodbye'
return app(wsgi_env, start_response)
return goodbye_wrapper
class AppConfigTestCase(unittest.TestCase):
def test_app_for_script(self):
with patch.object(wsgi_config,
'get_add_middleware_from_appengine_config',
return_value=None):
app = wsgi_config.app_for_script(
wsgi_test.script_path('salutation_world',
test_name=__name__))
client = test.Client(app, wrappers.Response)
response = client.get('/?salutation=Hello')
self.assertEqual(response.status_code, httplib.OK)
self.assertEqual(response.data, 'Hello World!')
def test_app_for_script_with_middleware(self):
with patch.object(wsgi_config,
'get_add_middleware_from_appengine_config',
return_value=goodbye_world_middleware):
app = wsgi_config.app_for_script(
wsgi_test.script_path('salutation_world',
test_name=__name__))
client = test.Client(app, wrappers.Response)
response = client.get('/?salutation=Hello')
self.assertEqual(response.status_code, httplib.OK)
self.assertEqual(response.data, 'Goodbye World!')
|
apache-2.0
|
redaktor/pHash
|
bindings/python/test/add_mvptree_dct.py
|
3
|
3616
|
#!/usr/bin/python -OO
# -*- coding: iso-8859-15 -*-
#
#
# pHash, the open source perceptual hash library
# Copyright (C) 2009 Aetilius, Inc.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Loic Jaquemet - [email protected]
#
import pHash
import locale,logging,os,sys,time
from os.path import join
def distancefunc(pa,pb):
# pa.hash is a void * pointer.
# we need to cast it into ulong64* AND get it's value
d = pHash.ph_hamming_distance(pHash.ulong64Ptr_value(pHash.voidToULong64(pa.hash)), pHash.ulong64Ptr_value(pHash.voidToULong64(pb.hash)))
return d
def main(argv):
'''
'''
logging.basicConfig(level=logging.DEBUG)
print pHash.ph_about()
if (len(argv) < 2):
print "not enough input arguments"
print "usage: %s directory dbname [radius] [knearest] [threshold]"%( sys.argv[0])
return -1
dir_name = argv[0]#/* name of files in directory of query images */
filename = argv[1]#/* name of file to save db */
mvpfile=pHash.MVPFile()
mvpfile.filename = filename
pHash.my_set_callback(mvpfile,distancefunc)
mvpfile.hash_type = pHash.UINT64ARRAY
nbfiles = 0
print "dir name: %s"%( dir_name)
nbfiles=0
files=None
for root, dirs, filest in os.walk(dir_name):
nbfiles=len(filest)
files=[os.path.join(root,f) for f in filest]
files.sort()
print "nbfiles = %d"%( nbfiles)
#allocate a list of nbfiles elements # hashlist = (DP**)malloc(nbfiles*sizeof(DP*));
hashlist=pHash.DPptrArray(nbfiles)
if ( hashlist is None):
print "mem alloc error"
return -3
count=0
tmphash=0x00000000
for i in range(0,nbfiles):
tmpdp=pHash.ph_malloc_datapoint(mvpfile.hash_type)
if (tmpdp is None):
print "mem alloc error"
return -4
tmpdp.thisown=0
hashlist[count]=tmpdp
#useless malloc, we use copy_
#hashlist[count].hash=pHash.new_ulong64Ptr()
#if (hashlist[count].hash is None):
# print "mem alloc error"
# return -5
print "file[%d] = %s"%( i, files[i])
ret=pHash.ph_dct_imagehash(files[i])
if (type(ret) is int):
print "unable to get hash"
hashlist[count].hash=None
phash.ph_free_datapoint(hashlist[count])
continue
(res,tmphash)=ret
hashlist[count].id = files[i]
hashlist[count].hash=pHash.copy_ulong64Ptr(tmphash)
hashlist[count].hash_length = 1
count+=1
#
print "add files to file %s"%(filename)
nbsaved=0
ret = pHash.ph_add_mvptree(mvpfile, hashlist.cast(), count)
if (type(ret) is int):
print "error on ph_add_mvptree"
return -6
(res,nbsaved)=ret
print "number saved %d out of %d, ret code %d"%( nbsaved,count,res)
# freeeee. we need to add %newobject to ph_readfilesnames
#for i in range(0,nbfiles):
# free(files[i])
#
files=None
for i in range(0,nbfiles):
hashlist[i].hash = None
pHash.ph_free_datapoint(hashlist[i])
hashlist=None
return 0
if __name__ == '__main__':
main(sys.argv[1:])
|
gpl-3.0
|
dsuch/sec-wall
|
code/tests/test_core.py
|
1
|
7669
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at gefira.pl>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import re
from datetime import datetime, timedelta
from logging import makeLogRecord
from uuid import uuid4
# nose
from nose.tools import assert_true, eq_
# sec-wall
from secwall.core import AuthResult, InvocationContext, LoggingFormatter, \
SecurityException, SecWallException, version_info, version
def test_core():
""" Tests info global to the module.
"""
eq_(version_info, ('1', '1'))
eq_(version, '1.1')
def test_exceptions():
""" Tests sec-wall specific exceptions.
"""
assert_true(SecWallException, Exception)
assert_true(SecurityException, SecWallException)
description = uuid4().hex
e = SecurityException(description)
eq_(e.description, description)
def test_auth_result_nonzero():
""" Tests AuthResult in boolean contexts.
"""
# It's False by default.
a1 = AuthResult()
eq_(False, bool(a1))
a2 = AuthResult(True)
eq_(True, bool(a2))
def test_auth_result_properties():
""" Tests that AuthResult's properties can be read correctly.
"""
# Check the defaults first.
a1 = AuthResult()
eq_(False, a1.status)
eq_('-1', a1.code)
eq_('', a1.description)
status, code, description = [uuid4().hex for x in range(3)]
a2 = AuthResult(status, code, description)
eq_(status, a2.status)
eq_(code, a2.code)
eq_(description, a2.description)
def test_auth_result_repr():
""" Tests the AuthResult's __repr__ output.
"""
at_pattern = '\w*'
status, code, description = [uuid4().hex for x in range(3)]
auth_info = {b'abc':b'def'}
a1 = AuthResult(status, code, description)
a1.auth_info = auth_info
r = repr(a1)
pattern = '<AuthResult at {0} status={1} code={2} description={3} auth_info={{abc: def}}\n>'
pattern = pattern.format(at_pattern, status, code, description)
regexp = re.compile(pattern)
assert_true(regexp.match(r) is not None, (pattern, r))
def test_logging_formatter():
""" Makes sure that the logging formatter prepends messages
with the expected string.
"""
lf = LoggingFormatter()
_msg = uuid4().hex
d = {'msg': _msg}
record = makeLogRecord(d)
msg = lf.format(record)
eq_(msg, 'sec-wall {0}'.format(_msg))
def test_invocation_context_init_parameters():
""" Makes sure the parameters passed into InocationContext.__init___
are being assigned to the instance correctly.
"""
(_instance_name, _instance_unique, _message_number, _proc_start,
_proc_end, _ext_start, _ext_end, _env, _url_config, _client_cert,
_data, _remote_address, _auth_result, _config_type, _path_info,
_query_string, _client_address, _request_method) = [uuid4().hex for x in range(18)]
ctx = InvocationContext(_instance_name, _instance_unique,
_message_number, _proc_start, _proc_end, _ext_start, _ext_end,
_env, _url_config, _client_cert, _data, _remote_address,
_auth_result, _config_type, _path_info, _query_string,
_client_address, _request_method)
eq_(ctx.instance_name, _instance_name)
eq_(ctx.instance_unique, _instance_unique)
eq_(ctx.message_number, _message_number)
eq_(ctx.proc_start, _proc_start)
eq_(ctx.proc_end, _proc_end)
eq_(ctx.ext_start, _ext_start)
eq_(ctx.ext_end, _ext_end)
eq_(ctx.env, _env)
eq_(ctx.url_config, _url_config)
eq_(ctx.client_cert, _client_cert)
eq_(ctx.data, _data)
eq_(ctx.remote_address, _remote_address)
eq_(ctx.auth_result, _auth_result)
eq_(ctx.config_type, _config_type)
eq_(ctx.path_info, _path_info)
eq_(ctx.query_string, _query_string)
eq_(ctx.client_address, _client_address)
eq_(ctx.request_method, _request_method)
eq_(ctx.stop_watch_format, '{0.seconds}.{0.microseconds:06d}')
eq_(ctx.invocation_id, '{0}/{1}/{2}'.format(_instance_name, _instance_unique,
_message_number))
def test_invocation_context_format_log_message():
""" Tests the correctness of formatting of logging messages.
"""
_auth1 = AuthResult(True)
_auth2 = AuthResult(False, uuid4().hex)
for _auth_result in _auth1, _auth2:
for _needs_details in True, False:
_now = datetime.now()
_start_to_ext_start = timedelta(seconds=1, microseconds=129)
_ext_took = timedelta(seconds=3, microseconds=9017)
_ext_end_to_proc_end = timedelta(seconds=7, microseconds=3511)
_proc_start = _now
_proc_end = _now + _start_to_ext_start + _ext_took + _ext_end_to_proc_end
_ext_start = _now + _start_to_ext_start
_ext_end = _now + _start_to_ext_start + _ext_took
_env = {'HTTP_USER_AGENT':uuid4().hex, 'SERVER_SOFTWARE':uuid4().hex,
'SERVER_NAME':uuid4().hex, 'SERVER_PORT':uuid4().hex}
_code = uuid4().hex
(_instance_name, _instance_unique, _message_number, _url_config, _client_cert,
_data, _remote_address, _config_type, _path_info,
_query_string, _client_address, _request_method) = [uuid4().hex for x in range(12)]
ctx = InvocationContext(_instance_name, _instance_unique,
_message_number, _proc_start, _proc_end, _ext_start, _ext_end,
_env, _url_config, _client_cert, _data, _remote_address,
_auth_result, _config_type, _path_info, _query_string,
_client_address, _request_method)
msg = ctx.format_log_message(_code, _needs_details)
if _needs_details:
(invocation_id, code, proc_start, remote_address, req_info,
secwall_overhead, ext_overhead, proc_total, auth_result,
auth_code, http_user_agent, server_software, server_name, server_port,
config_type, data) = msg.split(';')
else:
(invocation_id, code, proc_start, remote_address, req_info,
secwall_overhead, ext_overhead, proc_total, auth_result,
auth_code) = msg.split(';')
eq_(invocation_id, ctx.invocation_id)
eq_(code, _code)
eq_(proc_start, str(_proc_start))
eq_(remote_address, _remote_address)
eq_(req_info, _request_method + ' ' + _path_info + _query_string)
_proc_total = _proc_end - _proc_start
_ext_overhead = _ext_end - _ext_start
_secwall_overhead = _proc_total - _ext_overhead
eq_(proc_total, str(_proc_total.seconds) + '.' + str(_proc_total.microseconds).zfill(6))
eq_(ext_overhead, str(_ext_overhead.seconds) + '.' + str(_ext_overhead.microseconds).zfill(6))
eq_(secwall_overhead, str(_secwall_overhead.seconds) + '.' + str(_secwall_overhead.microseconds).zfill(6))
if _auth_result:
eq_(auth_result, '0')
else:
eq_(auth_result, '1')
eq_(auth_code, _auth_result.code)
if _needs_details:
eq_(http_user_agent, '"{0}"'.format(_env.get('HTTP_USER_AGENT')))
eq_(server_software, _env.get('SERVER_SOFTWARE'))
eq_(server_name, _env.get('SERVER_NAME'))
eq_(server_port, _env.get('SERVER_PORT'))
eq_(config_type, _config_type)
eq_(data, _data)
|
gpl-3.0
|
PW-Sat2/PWSat2OBC
|
integration_tests/experiment_file/payload.py
|
1
|
2312
|
from parsec import count, joint
from base import pid, label_as, field, to_dict
from parsing import byte, uint16
from parsec import Parser, Value
from emulator.beacon_parser import eps_controller_a_telemetry_parser, eps_controller_b_telemetry_parser, \
error_counting_telemetry, experiment_telemetry_parser, mcu_temperature_parser
from emulator.beacon_parser.parser import BitArrayParser
from math import ceil
PayloadWhoAmI = pid(0x30) >> count(byte, 1)
PayloadWhoAmI >>= label_as('Payload Who Am I')
PayloadHousekeeping = pid(0x34) >> joint(
field('INT 3V3D', uint16),
field('OBC 3V3D', uint16),
).bind(to_dict)
PayloadHousekeeping >>= label_as('Payload Housekeeping')
##########################################################################
class PayloadOBCTelemetryParser:
def __init__(self):
self.storage = {}
def write(self, category, name, value):
if category not in self.storage.keys():
self.storage[category] = {}
self.storage[category][name] = value
def GetParsers(self, reader, store):
return [error_counting_telemetry.ErrorCountingTelemetry(reader, store),
experiment_telemetry_parser.ExperimentTelemetryParser(reader, store),
mcu_temperature_parser.McuTemperatureParser(reader, store),
eps_controller_a_telemetry_parser.EPSControllerATelemetryParser(reader, store),
eps_controller_b_telemetry_parser.EPSControllerBTelemetryParser(reader, store)]
def GetSize(self):
parsers = self.GetParsers(None, self)
size = 0
for parser in parsers:
size = size + parser.get_bit_count()
return int(ceil(size / 8.0))
@Parser
def PayloadParser(text, index):
telemetry_parser = PayloadOBCTelemetryParser()
size = telemetry_parser.GetSize()
if index + size <= len(text):
part = text[index: index + size]
parser = BitArrayParser(telemetry_parser, part, telemetry_parser)
parser.parse()
return Value.success(index + size, telemetry_parser.storage)
else:
return Value.failure(index, 'Decode failed')
PayloadObcTelemetry = pid(0x36) >> PayloadParser
PayloadObcTelemetry >>= label_as('Payload Obc Telemetry')
|
agpl-3.0
|
conwayje/ase-python
|
ase/units.py
|
5
|
1828
|
from math import pi, sqrt
# Constants from Konrad Hinsen's PhysicalQuantities module (1986 CODATA):
_c = 299792458. # speed of light, m/s
_mu0 = 4.e-7 * pi # permeability of vacuum
_eps0 = 1 / _mu0 / _c**2 # permittivity of vacuum
_Grav = 6.67259e-11 # gravitational constant
_hplanck = 6.6260755e-34 # Planck constant, J s
_hbar = _hplanck / (2 * pi) # Planck constant / 2pi, J s
_e = 1.60217733e-19 # elementary charge
_me = 9.1093897e-31 # electron mass
_mp = 1.6726231e-27 # proton mass
_Nav = 6.0221367e23 # Avogadro number
_k = 1.380658e-23 # Boltzmann constant, J/K
_amu = 1.6605402e-27 # atomic mass unit, kg
Ang = Angstrom = 1.0
nm = 10.0
Bohr = 4e10 * pi * _eps0 * _hbar**2 / _me / _e**2 # Bohr radius
eV = 1.0
Hartree = _me * _e**3 / 16 / pi**2 / _eps0**2 / _hbar**2
kJ = 1000.0 / _e
kcal = 4.184 * kJ
mol = _Nav
Rydberg = 0.5 * Hartree
Ry = Rydberg
Ha = Hartree
second = 1e10 * sqrt(_e / _amu)
fs = 1e-15 * second
kB = _k / _e # Boltzmann constant, eV/K
Pascal = (1 / _e) / 1e30 # J/m^3
GPa = 1e9 * Pascal
Debye = 1.0 / 1e11 / _e / _c
alpha = _e**2 / (4 * pi * _eps0) / _hbar / _c # fine structure constant
# Derived atomic units that have no assigned name:
_aut = _hbar / (alpha**2 * _me * _c**2) # atomic unit of time, s
_auv = _e**2 / _hbar / (4 * pi * _eps0) # atomic unit of velocity, m/s
_auf = alpha**3 * _me**2 * _c**3 / _hbar # atomic unit of force, N
_aup = alpha**5 * _me**4 * _c**5 / _hbar**3 # atomic unit of pressure, Pa
AUT = second * _aut
# SI units
m = 1e10 * Ang # metre
kg = 1. / _amu # kilogram
s = second # second
A = 1.0 / _e / s # ampere
# derived
J = kJ / 1000 # Joule = kg * m**2 / s**2
C = 1.0 / _e # Coulomb = A * s
del pi, sqrt
|
gpl-2.0
|
gunan/tensorflow
|
tensorflow/python/util/keyword_args.py
|
190
|
1657
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keyword args functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.util import decorator_utils
def keyword_args_only(func):
"""Decorator for marking specific function accepting keyword args only.
This decorator raises a `ValueError` if the input `func` is called with any
non-keyword args. This prevents the caller from providing the arguments in
wrong order.
Args:
func: The function or method needed to be decorated.
Returns:
Decorated function or method.
Raises:
ValueError: If `func` is not callable.
"""
decorator_utils.validate_callable(func, "keyword_args_only")
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Keyword args only wrapper."""
if args:
raise ValueError(
"Must use keyword args to call {}.".format(func.__name__))
return func(**kwargs)
return new_func
|
apache-2.0
|
glwu/python-for-android
|
python-modules/twisted/twisted/conch/test/test_transport.py
|
49
|
73648
|
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for ssh/transport.py and the classes therein.
"""
try:
import pyasn1
except ImportError:
pyasn1 = None
try:
import Crypto.Cipher.DES3
except ImportError:
Crypto = None
if pyasn1 is not None and Crypto is not None:
dependencySkip = None
from twisted.conch.ssh import transport, common, keys, factory
from twisted.conch.test import keydata
else:
if pyasn1 is None:
dependencySkip = "can't run w/o PyASN1"
elif Crypto is None:
dependencySkip = "can't run w/o PyCrypto"
class transport: # fictional modules to make classes work
class SSHTransportBase: pass
class SSHServerTransport: pass
class SSHClientTransport: pass
class factory:
class SSHFactory:
pass
from twisted.trial import unittest
from twisted.internet import defer
from twisted.protocols import loopback
from twisted.python import randbytes
from twisted.python.reflect import qual
from twisted.python.hashlib import md5, sha1
from twisted.conch.ssh import service
from twisted.test import proto_helpers
from twisted.conch.error import ConchError
class MockTransportBase(transport.SSHTransportBase):
"""
A base class for the client and server protocols. Stores the messages
it receieves instead of ignoring them.
@ivar errors: a list of tuples: (reasonCode, description)
@ivar unimplementeds: a list of integers: sequence number
@ivar debugs: a list of tuples: (alwaysDisplay, message, lang)
@ivar ignoreds: a list of strings: ignored data
"""
def connectionMade(self):
"""
Set up instance variables.
"""
transport.SSHTransportBase.connectionMade(self)
self.errors = []
self.unimplementeds = []
self.debugs = []
self.ignoreds = []
self.gotUnsupportedVersion = None
def _unsupportedVersionReceived(self, remoteVersion):
"""
Intercept unsupported version call.
@type remoteVersion: C{str}
"""
self.gotUnsupportedVersion = remoteVersion
return transport.SSHTransportBase._unsupportedVersionReceived(self, remoteVersion)
def receiveError(self, reasonCode, description):
"""
Store any errors received.
@type reasonCode: C{int}
@type description: C{str}
"""
self.errors.append((reasonCode, description))
def receiveUnimplemented(self, seqnum):
"""
Store any unimplemented packet messages.
@type seqnum: C{int}
"""
self.unimplementeds.append(seqnum)
def receiveDebug(self, alwaysDisplay, message, lang):
"""
Store any debug messages.
@type alwaysDisplay: C{bool}
@type message: C{str}
@type lang: C{str}
"""
self.debugs.append((alwaysDisplay, message, lang))
def ssh_IGNORE(self, packet):
"""
Store any ignored data.
@type packet: C{str}
"""
self.ignoreds.append(packet)
class MockCipher(object):
"""
A mocked-up version of twisted.conch.ssh.transport.SSHCiphers.
"""
outCipType = 'test'
encBlockSize = 6
inCipType = 'test'
decBlockSize = 6
inMACType = 'test'
outMACType = 'test'
verifyDigestSize = 1
usedEncrypt = False
usedDecrypt = False
outMAC = (None, '', '', 1)
inMAC = (None, '', '', 1)
keys = ()
def encrypt(self, x):
"""
Called to encrypt the packet. Simply record that encryption was used
and return the data unchanged.
"""
self.usedEncrypt = True
if (len(x) % self.encBlockSize) != 0:
raise RuntimeError("length %i modulo blocksize %i is not 0: %i" %
(len(x), self.encBlockSize, len(x) % self.encBlockSize))
return x
def decrypt(self, x):
"""
Called to decrypt the packet. Simply record that decryption was used
and return the data unchanged.
"""
self.usedDecrypt = True
if (len(x) % self.encBlockSize) != 0:
raise RuntimeError("length %i modulo blocksize %i is not 0: %i" %
(len(x), self.decBlockSize, len(x) % self.decBlockSize))
return x
def makeMAC(self, outgoingPacketSequence, payload):
"""
Make a Message Authentication Code by sending the character value of
the outgoing packet.
"""
return chr(outgoingPacketSequence)
def verify(self, incomingPacketSequence, packet, macData):
"""
Verify the Message Authentication Code by checking that the packet
sequence number is the same.
"""
return chr(incomingPacketSequence) == macData
def setKeys(self, ivOut, keyOut, ivIn, keyIn, macIn, macOut):
"""
Record the keys.
"""
self.keys = (ivOut, keyOut, ivIn, keyIn, macIn, macOut)
class MockCompression:
"""
A mocked-up compression, based on the zlib interface. Instead of
compressing, it reverses the data and adds a 0x66 byte to the end.
"""
def compress(self, payload):
return payload[::-1] # reversed
def decompress(self, payload):
return payload[:-1][::-1]
def flush(self, kind):
return '\x66'
class MockService(service.SSHService):
"""
A mocked-up service, based on twisted.conch.ssh.service.SSHService.
@ivar started: True if this service has been started.
@ivar stopped: True if this service has been stopped.
"""
name = "MockService"
started = False
stopped = False
protocolMessages = {0xff: "MSG_TEST", 71: "MSG_fiction"}
def logPrefix(self):
return "MockService"
def serviceStarted(self):
"""
Record that the service was started.
"""
self.started = True
def serviceStopped(self):
"""
Record that the service was stopped.
"""
self.stopped = True
def ssh_TEST(self, packet):
"""
A message that this service responds to.
"""
self.transport.sendPacket(0xff, packet)
class MockFactory(factory.SSHFactory):
"""
A mocked-up factory based on twisted.conch.ssh.factory.SSHFactory.
"""
services = {
'ssh-userauth': MockService}
def getPublicKeys(self):
"""
Return the public keys that authenticate this server.
"""
return {
'ssh-rsa': keys.Key.fromString(keydata.publicRSA_openssh),
'ssh-dsa': keys.Key.fromString(keydata.publicDSA_openssh)}
def getPrivateKeys(self):
"""
Return the private keys that authenticate this server.
"""
return {
'ssh-rsa': keys.Key.fromString(keydata.privateRSA_openssh),
'ssh-dsa': keys.Key.fromString(keydata.privateDSA_openssh)}
def getPrimes(self):
"""
Return the Diffie-Hellman primes that can be used for the
diffie-hellman-group-exchange-sha1 key exchange.
"""
return {
1024: ((2, transport.DH_PRIME),),
2048: ((3, transport.DH_PRIME),),
4096: ((5, 7),)}
class MockOldFactoryPublicKeys(MockFactory):
"""
The old SSHFactory returned mappings from key names to strings from
getPublicKeys(). We return those here for testing.
"""
def getPublicKeys(self):
"""
We used to map key types to public key blobs as strings.
"""
keys = MockFactory.getPublicKeys(self)
for name, key in keys.items()[:]:
keys[name] = key.blob()
return keys
class MockOldFactoryPrivateKeys(MockFactory):
"""
The old SSHFactory returned mappings from key names to PyCrypto key
objects from getPrivateKeys(). We return those here for testing.
"""
def getPrivateKeys(self):
"""
We used to map key types to PyCrypto key objects.
"""
keys = MockFactory.getPrivateKeys(self)
for name, key in keys.items()[:]:
keys[name] = key.keyObject
return keys
class TransportTestCase(unittest.TestCase):
"""
Base class for transport test cases.
"""
klass = None
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o PyASN1"
def setUp(self):
self.transport = proto_helpers.StringTransport()
self.proto = self.klass()
self.packets = []
def secureRandom(len):
"""
Return a consistent entropy value
"""
return '\x99' * len
self.oldSecureRandom = randbytes.secureRandom
randbytes.secureRandom = secureRandom
def stubSendPacket(messageType, payload):
self.packets.append((messageType, payload))
self.proto.makeConnection(self.transport)
# we just let the kex packet go into the transport
self.proto.sendPacket = stubSendPacket
def tearDown(self):
randbytes.secureRandom = self.oldSecureRandom
self.oldSecureRandom = None
class BaseSSHTransportTestCase(TransportTestCase):
"""
Test TransportBase. It implements the non-server/client specific
parts of the SSH transport protocol.
"""
klass = MockTransportBase
def test_sendVersion(self):
"""
Test that the first thing sent over the connection is the version
string.
"""
# the other setup was done in the setup method
self.assertEquals(self.transport.value().split('\r\n', 1)[0],
"SSH-2.0-Twisted")
def test_sendPacketPlain(self):
"""
Test that plain (unencrypted, uncompressed) packets are sent
correctly. The format is::
uint32 length (including type and padding length)
byte padding length
byte type
bytes[length-padding length-2] data
bytes[padding length] padding
"""
proto = MockTransportBase()
proto.makeConnection(self.transport)
self.transport.clear()
message = ord('A')
payload = 'BCDEFG'
proto.sendPacket(message, payload)
value = self.transport.value()
self.assertEquals(value, '\x00\x00\x00\x0c\x04ABCDEFG\x99\x99\x99\x99')
def test_sendPacketEncrypted(self):
"""
Test that packets sent while encryption is enabled are sent
correctly. The whole packet should be encrypted.
"""
proto = MockTransportBase()
proto.makeConnection(self.transport)
proto.currentEncryptions = testCipher = MockCipher()
message = ord('A')
payload = 'BC'
self.transport.clear()
proto.sendPacket(message, payload)
self.assertTrue(testCipher.usedEncrypt)
value = self.transport.value()
self.assertEquals(value, '\x00\x00\x00\x08\x04ABC\x99\x99\x99\x99\x01')
def test_sendPacketCompressed(self):
"""
Test that packets sent while compression is enabled are sent
correctly. The packet type and data should be encrypted.
"""
proto = MockTransportBase()
proto.makeConnection(self.transport)
proto.outgoingCompression = MockCompression()
self.transport.clear()
proto.sendPacket(ord('A'), 'B')
value = self.transport.value()
self.assertEquals(
value,
'\x00\x00\x00\x0c\x08BA\x66\x99\x99\x99\x99\x99\x99\x99\x99')
def test_sendPacketBoth(self):
"""
Test that packets sent while compression and encryption are
enabled are sent correctly. The packet type and data should be
compressed and then the whole packet should be encrypted.
"""
proto = MockTransportBase()
proto.makeConnection(self.transport)
proto.currentEncryptions = testCipher = MockCipher()
proto.outgoingCompression = MockCompression()
message = ord('A')
payload = 'BC'
self.transport.clear()
proto.sendPacket(message, payload)
value = self.transport.value()
self.assertEquals(
value,
'\x00\x00\x00\x0e\x09CBA\x66\x99\x99\x99\x99\x99\x99\x99\x99\x99'
'\x01')
def test_getPacketPlain(self):
"""
Test that packets are retrieved correctly out of the buffer when
no encryption is enabled.
"""
proto = MockTransportBase()
proto.makeConnection(self.transport)
self.transport.clear()
proto.sendPacket(ord('A'), 'BC')
proto.buf = self.transport.value() + 'extra'
self.assertEquals(proto.getPacket(), 'ABC')
self.assertEquals(proto.buf, 'extra')
def test_getPacketEncrypted(self):
"""
Test that encrypted packets are retrieved correctly.
See test_sendPacketEncrypted.
"""
proto = MockTransportBase()
proto.sendKexInit = lambda: None # don't send packets
proto.makeConnection(self.transport)
self.transport.clear()
proto.currentEncryptions = testCipher = MockCipher()
proto.sendPacket(ord('A'), 'BCD')
value = self.transport.value()
proto.buf = value[:MockCipher.decBlockSize]
self.assertEquals(proto.getPacket(), None)
self.assertTrue(testCipher.usedDecrypt)
self.assertEquals(proto.first, '\x00\x00\x00\x0e\x09A')
proto.buf += value[MockCipher.decBlockSize:]
self.assertEquals(proto.getPacket(), 'ABCD')
self.assertEquals(proto.buf, '')
def test_getPacketCompressed(self):
"""
Test that compressed packets are retrieved correctly. See
test_sendPacketCompressed.
"""
proto = MockTransportBase()
proto.makeConnection(self.transport)
self.transport.clear()
proto.outgoingCompression = MockCompression()
proto.incomingCompression = proto.outgoingCompression
proto.sendPacket(ord('A'), 'BCD')
proto.buf = self.transport.value()
self.assertEquals(proto.getPacket(), 'ABCD')
def test_getPacketBoth(self):
"""
Test that compressed and encrypted packets are retrieved correctly.
See test_sendPacketBoth.
"""
proto = MockTransportBase()
proto.sendKexInit = lambda: None
proto.makeConnection(self.transport)
self.transport.clear()
proto.currentEncryptions = testCipher = MockCipher()
proto.outgoingCompression = MockCompression()
proto.incomingCompression = proto.outgoingCompression
proto.sendPacket(ord('A'), 'BCDEFG')
proto.buf = self.transport.value()
self.assertEquals(proto.getPacket(), 'ABCDEFG')
def test_ciphersAreValid(self):
"""
Test that all the supportedCiphers are valid.
"""
ciphers = transport.SSHCiphers('A', 'B', 'C', 'D')
iv = key = '\x00' * 16
for cipName in self.proto.supportedCiphers:
self.assertTrue(ciphers._getCipher(cipName, iv, key))
def test_sendKexInit(self):
"""
Test that the KEXINIT (key exchange initiation) message is sent
correctly. Payload::
bytes[16] cookie
string key exchange algorithms
string public key algorithms
string outgoing ciphers
string incoming ciphers
string outgoing MACs
string incoming MACs
string outgoing compressions
string incoming compressions
bool first packet follows
uint32 0
"""
value = self.transport.value().split('\r\n', 1)[1]
self.proto.buf = value
packet = self.proto.getPacket()
self.assertEquals(packet[0], chr(transport.MSG_KEXINIT))
self.assertEquals(packet[1:17], '\x99' * 16)
(kex, pubkeys, ciphers1, ciphers2, macs1, macs2, compressions1,
compressions2, languages1, languages2,
buf) = common.getNS(packet[17:], 10)
self.assertEquals(kex, ','.join(self.proto.supportedKeyExchanges))
self.assertEquals(pubkeys, ','.join(self.proto.supportedPublicKeys))
self.assertEquals(ciphers1, ','.join(self.proto.supportedCiphers))
self.assertEquals(ciphers2, ','.join(self.proto.supportedCiphers))
self.assertEquals(macs1, ','.join(self.proto.supportedMACs))
self.assertEquals(macs2, ','.join(self.proto.supportedMACs))
self.assertEquals(compressions1,
','.join(self.proto.supportedCompressions))
self.assertEquals(compressions2,
','.join(self.proto.supportedCompressions))
self.assertEquals(languages1, ','.join(self.proto.supportedLanguages))
self.assertEquals(languages2, ','.join(self.proto.supportedLanguages))
self.assertEquals(buf, '\x00' * 5)
def test_sendDebug(self):
"""
Test that debug messages are sent correctly. Payload::
bool always display
string debug message
string language
"""
self.proto.sendDebug("test", True, 'en')
self.assertEquals(
self.packets,
[(transport.MSG_DEBUG,
"\x01\x00\x00\x00\x04test\x00\x00\x00\x02en")])
def test_receiveDebug(self):
"""
Test that debug messages are received correctly. See test_sendDebug.
"""
self.proto.dispatchMessage(
transport.MSG_DEBUG,
'\x01\x00\x00\x00\x04test\x00\x00\x00\x02en')
self.assertEquals(self.proto.debugs, [(True, 'test', 'en')])
def test_sendIgnore(self):
"""
Test that ignored messages are sent correctly. Payload::
string ignored data
"""
self.proto.sendIgnore("test")
self.assertEquals(
self.packets, [(transport.MSG_IGNORE,
'\x00\x00\x00\x04test')])
def test_receiveIgnore(self):
"""
Test that ignored messages are received correctly. See
test_sendIgnore.
"""
self.proto.dispatchMessage(transport.MSG_IGNORE, 'test')
self.assertEquals(self.proto.ignoreds, ['test'])
def test_sendUnimplemented(self):
"""
Test that unimplemented messages are sent correctly. Payload::
uint32 sequence number
"""
self.proto.sendUnimplemented()
self.assertEquals(
self.packets, [(transport.MSG_UNIMPLEMENTED,
'\x00\x00\x00\x00')])
def test_receiveUnimplemented(self):
"""
Test that unimplemented messages are received correctly. See
test_sendUnimplemented.
"""
self.proto.dispatchMessage(transport.MSG_UNIMPLEMENTED,
'\x00\x00\x00\xff')
self.assertEquals(self.proto.unimplementeds, [255])
def test_sendDisconnect(self):
"""
Test that disconnection messages are sent correctly. Payload::
uint32 reason code
string reason description
string language
"""
disconnected = [False]
def stubLoseConnection():
disconnected[0] = True
self.transport.loseConnection = stubLoseConnection
self.proto.sendDisconnect(0xff, "test")
self.assertEquals(
self.packets,
[(transport.MSG_DISCONNECT,
"\x00\x00\x00\xff\x00\x00\x00\x04test\x00\x00\x00\x00")])
self.assertTrue(disconnected[0])
def test_receiveDisconnect(self):
"""
Test that disconnection messages are received correctly. See
test_sendDisconnect.
"""
disconnected = [False]
def stubLoseConnection():
disconnected[0] = True
self.transport.loseConnection = stubLoseConnection
self.proto.dispatchMessage(transport.MSG_DISCONNECT,
'\x00\x00\x00\xff\x00\x00\x00\x04test')
self.assertEquals(self.proto.errors, [(255, 'test')])
self.assertTrue(disconnected[0])
def test_dataReceived(self):
"""
Test that dataReceived parses packets and dispatches them to
ssh_* methods.
"""
kexInit = [False]
def stubKEXINIT(packet):
kexInit[0] = True
self.proto.ssh_KEXINIT = stubKEXINIT
self.proto.dataReceived(self.transport.value())
self.assertTrue(self.proto.gotVersion)
self.assertEquals(self.proto.ourVersionString,
self.proto.otherVersionString)
self.assertTrue(kexInit[0])
def test_service(self):
"""
Test that the transport can set the running service and dispatches
packets to the service's packetReceived method.
"""
service = MockService()
self.proto.setService(service)
self.assertEquals(self.proto.service, service)
self.assertTrue(service.started)
self.proto.dispatchMessage(0xff, "test")
self.assertEquals(self.packets, [(0xff, "test")])
service2 = MockService()
self.proto.setService(service2)
self.assertTrue(service2.started)
self.assertTrue(service.stopped)
self.proto.connectionLost(None)
self.assertTrue(service2.stopped)
def test_avatar(self):
"""
Test that the transport notifies the avatar of disconnections.
"""
disconnected = [False]
def logout():
disconnected[0] = True
self.proto.logoutFunction = logout
self.proto.avatar = True
self.proto.connectionLost(None)
self.assertTrue(disconnected[0])
def test_isEncrypted(self):
"""
Test that the transport accurately reflects its encrypted status.
"""
self.assertFalse(self.proto.isEncrypted('in'))
self.assertFalse(self.proto.isEncrypted('out'))
self.assertFalse(self.proto.isEncrypted('both'))
self.proto.currentEncryptions = MockCipher()
self.assertTrue(self.proto.isEncrypted('in'))
self.assertTrue(self.proto.isEncrypted('out'))
self.assertTrue(self.proto.isEncrypted('both'))
self.proto.currentEncryptions = transport.SSHCiphers('none', 'none',
'none', 'none')
self.assertFalse(self.proto.isEncrypted('in'))
self.assertFalse(self.proto.isEncrypted('out'))
self.assertFalse(self.proto.isEncrypted('both'))
self.assertRaises(TypeError, self.proto.isEncrypted, 'bad')
def test_isVerified(self):
"""
Test that the transport accurately reflects its verified status.
"""
self.assertFalse(self.proto.isVerified('in'))
self.assertFalse(self.proto.isVerified('out'))
self.assertFalse(self.proto.isVerified('both'))
self.proto.currentEncryptions = MockCipher()
self.assertTrue(self.proto.isVerified('in'))
self.assertTrue(self.proto.isVerified('out'))
self.assertTrue(self.proto.isVerified('both'))
self.proto.currentEncryptions = transport.SSHCiphers('none', 'none',
'none', 'none')
self.assertFalse(self.proto.isVerified('in'))
self.assertFalse(self.proto.isVerified('out'))
self.assertFalse(self.proto.isVerified('both'))
self.assertRaises(TypeError, self.proto.isVerified, 'bad')
def test_loseConnection(self):
"""
Test that loseConnection sends a disconnect message and closes the
connection.
"""
disconnected = [False]
def stubLoseConnection():
disconnected[0] = True
self.transport.loseConnection = stubLoseConnection
self.proto.loseConnection()
self.assertEquals(self.packets[0][0], transport.MSG_DISCONNECT)
self.assertEquals(self.packets[0][1][3],
chr(transport.DISCONNECT_CONNECTION_LOST))
def test_badVersion(self):
"""
Test that the transport disconnects when it receives a bad version.
"""
def testBad(version):
self.packets = []
self.proto.gotVersion = False
disconnected = [False]
def stubLoseConnection():
disconnected[0] = True
self.transport.loseConnection = stubLoseConnection
for c in version + '\r\n':
self.proto.dataReceived(c)
self.assertTrue(disconnected[0])
self.assertEquals(self.packets[0][0], transport.MSG_DISCONNECT)
self.assertEquals(
self.packets[0][1][3],
chr(transport.DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED))
testBad('SSH-1.5-OpenSSH')
testBad('SSH-3.0-Twisted')
testBad('GET / HTTP/1.1')
def test_dataBeforeVersion(self):
"""
Test that the transport ignores data sent before the version string.
"""
proto = MockTransportBase()
proto.makeConnection(proto_helpers.StringTransport())
data = ("""here's some stuff beforehand
here's some other stuff
""" + proto.ourVersionString + "\r\n")
[proto.dataReceived(c) for c in data]
self.assertTrue(proto.gotVersion)
self.assertEquals(proto.otherVersionString, proto.ourVersionString)
def test_compatabilityVersion(self):
"""
Test that the transport treats the compatbility version (1.99)
as equivalent to version 2.0.
"""
proto = MockTransportBase()
proto.makeConnection(proto_helpers.StringTransport())
proto.dataReceived("SSH-1.99-OpenSSH\n")
self.assertTrue(proto.gotVersion)
self.assertEquals(proto.otherVersionString, "SSH-1.99-OpenSSH")
def test_supportedVersionsAreAllowed(self):
"""
If an unusual SSH version is received and is included in
C{supportedVersions}, an unsupported version error is not emitted.
"""
proto = MockTransportBase()
proto.supportedVersions = ("9.99", )
proto.makeConnection(proto_helpers.StringTransport())
proto.dataReceived("SSH-9.99-OpenSSH\n")
self.assertFalse(proto.gotUnsupportedVersion)
def test_unsupportedVersionsCallUnsupportedVersionReceived(self):
"""
If an unusual SSH version is received and is not included in
C{supportedVersions}, an unsupported version error is emitted.
"""
proto = MockTransportBase()
proto.supportedVersions = ("2.0", )
proto.makeConnection(proto_helpers.StringTransport())
proto.dataReceived("SSH-9.99-OpenSSH\n")
self.assertEquals("9.99", proto.gotUnsupportedVersion)
def test_badPackets(self):
"""
Test that the transport disconnects with an error when it receives
bad packets.
"""
def testBad(packet, error=transport.DISCONNECT_PROTOCOL_ERROR):
self.packets = []
self.proto.buf = packet
self.assertEquals(self.proto.getPacket(), None)
self.assertEquals(len(self.packets), 1)
self.assertEquals(self.packets[0][0], transport.MSG_DISCONNECT)
self.assertEquals(self.packets[0][1][3], chr(error))
testBad('\xff' * 8) # big packet
testBad('\x00\x00\x00\x05\x00BCDE') # length not modulo blocksize
oldEncryptions = self.proto.currentEncryptions
self.proto.currentEncryptions = MockCipher()
testBad('\x00\x00\x00\x08\x06AB123456', # bad MAC
transport.DISCONNECT_MAC_ERROR)
self.proto.currentEncryptions.decrypt = lambda x: x[:-1]
testBad('\x00\x00\x00\x08\x06BCDEFGHIJK') # bad decryption
self.proto.currentEncryptions = oldEncryptions
self.proto.incomingCompression = MockCompression()
def stubDecompress(payload):
raise Exception('bad compression')
self.proto.incomingCompression.decompress = stubDecompress
testBad('\x00\x00\x00\x04\x00BCDE', # bad decompression
transport.DISCONNECT_COMPRESSION_ERROR)
self.flushLoggedErrors()
def test_unimplementedPackets(self):
"""
Test that unimplemented packet types cause MSG_UNIMPLEMENTED packets
to be sent.
"""
seqnum = self.proto.incomingPacketSequence
def checkUnimplemented(seqnum=seqnum):
self.assertEquals(self.packets[0][0],
transport.MSG_UNIMPLEMENTED)
self.assertEquals(self.packets[0][1][3], chr(seqnum))
self.proto.packets = []
seqnum += 1
self.proto.dispatchMessage(40, '')
checkUnimplemented()
transport.messages[41] = 'MSG_fiction'
self.proto.dispatchMessage(41, '')
checkUnimplemented()
self.proto.dispatchMessage(60, '')
checkUnimplemented()
self.proto.setService(MockService())
self.proto.dispatchMessage(70, '')
checkUnimplemented()
self.proto.dispatchMessage(71, '')
checkUnimplemented()
def test_getKey(self):
"""
Test that _getKey generates the correct keys.
"""
self.proto.sessionID = 'EF'
k1 = sha1('AB' + 'CD' + 'K' + self.proto.sessionID).digest()
k2 = sha1('ABCD' + k1).digest()
self.assertEquals(self.proto._getKey('K', 'AB', 'CD'), k1 + k2)
def test_multipleClasses(self):
"""
Test that multiple instances have distinct states.
"""
proto = self.proto
proto.dataReceived(self.transport.value())
proto.currentEncryptions = MockCipher()
proto.outgoingCompression = MockCompression()
proto.incomingCompression = MockCompression()
proto.setService(MockService())
proto2 = MockTransportBase()
proto2.makeConnection(proto_helpers.StringTransport())
proto2.sendIgnore('')
self.failIfEquals(proto.gotVersion, proto2.gotVersion)
self.failIfEquals(proto.transport, proto2.transport)
self.failIfEquals(proto.outgoingPacketSequence,
proto2.outgoingPacketSequence)
self.failIfEquals(proto.incomingPacketSequence,
proto2.incomingPacketSequence)
self.failIfEquals(proto.currentEncryptions,
proto2.currentEncryptions)
self.failIfEquals(proto.service, proto2.service)
class ServerAndClientSSHTransportBaseCase:
"""
Tests that need to be run on both the server and the client.
"""
def checkDisconnected(self, kind=None):
"""
Helper function to check if the transport disconnected.
"""
if kind is None:
kind = transport.DISCONNECT_PROTOCOL_ERROR
self.assertEquals(self.packets[-1][0], transport.MSG_DISCONNECT)
self.assertEquals(self.packets[-1][1][3], chr(kind))
def connectModifiedProtocol(self, protoModification,
kind=None):
"""
Helper function to connect a modified protocol to the test protocol
and test for disconnection.
"""
if kind is None:
kind = transport.DISCONNECT_KEY_EXCHANGE_FAILED
proto2 = self.klass()
protoModification(proto2)
proto2.makeConnection(proto_helpers.StringTransport())
self.proto.dataReceived(proto2.transport.value())
if kind:
self.checkDisconnected(kind)
return proto2
def test_disconnectIfCantMatchKex(self):
"""
Test that the transport disconnects if it can't match the key
exchange
"""
def blankKeyExchanges(proto2):
proto2.supportedKeyExchanges = []
self.connectModifiedProtocol(blankKeyExchanges)
def test_disconnectIfCantMatchKeyAlg(self):
"""
Like test_disconnectIfCantMatchKex, but for the key algorithm.
"""
def blankPublicKeys(proto2):
proto2.supportedPublicKeys = []
self.connectModifiedProtocol(blankPublicKeys)
def test_disconnectIfCantMatchCompression(self):
"""
Like test_disconnectIfCantMatchKex, but for the compression.
"""
def blankCompressions(proto2):
proto2.supportedCompressions = []
self.connectModifiedProtocol(blankCompressions)
def test_disconnectIfCantMatchCipher(self):
"""
Like test_disconnectIfCantMatchKex, but for the encryption.
"""
def blankCiphers(proto2):
proto2.supportedCiphers = []
self.connectModifiedProtocol(blankCiphers)
def test_disconnectIfCantMatchMAC(self):
"""
Like test_disconnectIfCantMatchKex, but for the MAC.
"""
def blankMACs(proto2):
proto2.supportedMACs = []
self.connectModifiedProtocol(blankMACs)
class ServerSSHTransportTestCase(ServerAndClientSSHTransportBaseCase,
TransportTestCase):
"""
Tests for the SSHServerTransport.
"""
klass = transport.SSHServerTransport
def setUp(self):
TransportTestCase.setUp(self)
self.proto.factory = MockFactory()
self.proto.factory.startFactory()
def tearDown(self):
TransportTestCase.tearDown(self)
self.proto.factory.stopFactory()
del self.proto.factory
def test_KEXINIT(self):
"""
Test that receiving a KEXINIT packet sets up the correct values on the
server.
"""
self.proto.dataReceived( 'SSH-2.0-Twisted\r\n\x00\x00\x01\xd4\t\x14'
'\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99'
'\x99\x00\x00\x00=diffie-hellman-group1-sha1,diffie-hellman-g'
'roup-exchange-sha1\x00\x00\x00\x0fssh-dss,ssh-rsa\x00\x00\x00'
'\x85aes128-ctr,aes128-cbc,aes192-ctr,aes192-cbc,aes256-ctr,ae'
's256-cbc,cast128-ctr,cast128-cbc,blowfish-ctr,blowfish-cbc,3d'
'es-ctr,3des-cbc\x00\x00\x00\x85aes128-ctr,aes128-cbc,aes192-c'
'tr,aes192-cbc,aes256-ctr,aes256-cbc,cast128-ctr,cast128-cbc,b'
'lowfish-ctr,blowfish-cbc,3des-ctr,3des-cbc\x00\x00\x00\x12hma'
'c-md5,hmac-sha1\x00\x00\x00\x12hmac-md5,hmac-sha1\x00\x00\x00'
'\tnone,zlib\x00\x00\x00\tnone,zlib\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x99\x99\x99\x99\x99\x99\x99\x99'
'\x99')
self.assertEquals(self.proto.kexAlg,
'diffie-hellman-group1-sha1')
self.assertEquals(self.proto.keyAlg,
'ssh-dss')
self.assertEquals(self.proto.outgoingCompressionType,
'none')
self.assertEquals(self.proto.incomingCompressionType,
'none')
ne = self.proto.nextEncryptions
self.assertEquals(ne.outCipType, 'aes128-ctr')
self.assertEquals(ne.inCipType, 'aes128-ctr')
self.assertEquals(ne.outMACType, 'hmac-md5')
self.assertEquals(ne.inMACType, 'hmac-md5')
def test_ignoreGuessPacketKex(self):
"""
The client is allowed to send a guessed key exchange packet
after it sends the KEXINIT packet. However, if the key exchanges
do not match, that guess packet must be ignored. This tests that
the packet is ignored in the case of the key exchange method not
matching.
"""
kexInitPacket = '\x00' * 16 + (
''.join([common.NS(x) for x in
[','.join(y) for y in
[self.proto.supportedKeyExchanges[::-1],
self.proto.supportedPublicKeys,
self.proto.supportedCiphers,
self.proto.supportedCiphers,
self.proto.supportedMACs,
self.proto.supportedMACs,
self.proto.supportedCompressions,
self.proto.supportedCompressions,
self.proto.supportedLanguages,
self.proto.supportedLanguages]]])) + (
'\xff\x00\x00\x00\x00')
self.proto.ssh_KEXINIT(kexInitPacket)
self.assertTrue(self.proto.ignoreNextPacket)
self.proto.ssh_DEBUG("\x01\x00\x00\x00\x04test\x00\x00\x00\x00")
self.assertTrue(self.proto.ignoreNextPacket)
self.proto.ssh_KEX_DH_GEX_REQUEST_OLD('\x00\x00\x08\x00')
self.assertFalse(self.proto.ignoreNextPacket)
self.assertEquals(self.packets, [])
self.proto.ignoreNextPacket = True
self.proto.ssh_KEX_DH_GEX_REQUEST('\x00\x00\x08\x00' * 3)
self.assertFalse(self.proto.ignoreNextPacket)
self.assertEquals(self.packets, [])
def test_ignoreGuessPacketKey(self):
"""
Like test_ignoreGuessPacketKex, but for an incorrectly guessed
public key format.
"""
kexInitPacket = '\x00' * 16 + (
''.join([common.NS(x) for x in
[','.join(y) for y in
[self.proto.supportedKeyExchanges,
self.proto.supportedPublicKeys[::-1],
self.proto.supportedCiphers,
self.proto.supportedCiphers,
self.proto.supportedMACs,
self.proto.supportedMACs,
self.proto.supportedCompressions,
self.proto.supportedCompressions,
self.proto.supportedLanguages,
self.proto.supportedLanguages]]])) + (
'\xff\x00\x00\x00\x00')
self.proto.ssh_KEXINIT(kexInitPacket)
self.assertTrue(self.proto.ignoreNextPacket)
self.proto.ssh_DEBUG("\x01\x00\x00\x00\x04test\x00\x00\x00\x00")
self.assertTrue(self.proto.ignoreNextPacket)
self.proto.ssh_KEX_DH_GEX_REQUEST_OLD('\x00\x00\x08\x00')
self.assertFalse(self.proto.ignoreNextPacket)
self.assertEquals(self.packets, [])
self.proto.ignoreNextPacket = True
self.proto.ssh_KEX_DH_GEX_REQUEST('\x00\x00\x08\x00' * 3)
self.assertFalse(self.proto.ignoreNextPacket)
self.assertEquals(self.packets, [])
def test_KEXDH_INIT(self):
"""
Test that the KEXDH_INIT packet causes the server to send a
KEXDH_REPLY with the server's public key and a signature.
"""
self.proto.supportedKeyExchanges = ['diffie-hellman-group1-sha1']
self.proto.supportedPublicKeys = ['ssh-rsa']
self.proto.dataReceived(self.transport.value())
e = pow(transport.DH_GENERATOR, 5000,
transport.DH_PRIME)
self.proto.ssh_KEX_DH_GEX_REQUEST_OLD(common.MP(e))
y = common.getMP('\x00\x00\x00\x40' + '\x99' * 64)[0]
f = common._MPpow(transport.DH_GENERATOR, y, transport.DH_PRIME)
sharedSecret = common._MPpow(e, y, transport.DH_PRIME)
h = sha1()
h.update(common.NS(self.proto.ourVersionString) * 2)
h.update(common.NS(self.proto.ourKexInitPayload) * 2)
h.update(common.NS(self.proto.factory.publicKeys['ssh-rsa'].blob()))
h.update(common.MP(e))
h.update(f)
h.update(sharedSecret)
exchangeHash = h.digest()
signature = self.proto.factory.privateKeys['ssh-rsa'].sign(
exchangeHash)
self.assertEquals(
self.packets,
[(transport.MSG_KEXDH_REPLY,
common.NS(self.proto.factory.publicKeys['ssh-rsa'].blob())
+ f + common.NS(signature)),
(transport.MSG_NEWKEYS, '')])
def test_KEX_DH_GEX_REQUEST_OLD(self):
"""
Test that the KEX_DH_GEX_REQUEST_OLD message causes the server
to reply with a KEX_DH_GEX_GROUP message with the correct
Diffie-Hellman group.
"""
self.proto.supportedKeyExchanges = [
'diffie-hellman-group-exchange-sha1']
self.proto.supportedPublicKeys = ['ssh-rsa']
self.proto.dataReceived(self.transport.value())
self.proto.ssh_KEX_DH_GEX_REQUEST_OLD('\x00\x00\x04\x00')
self.assertEquals(
self.packets,
[(transport.MSG_KEX_DH_GEX_GROUP,
common.MP(transport.DH_PRIME) + '\x00\x00\x00\x01\x02')])
self.assertEquals(self.proto.g, 2)
self.assertEquals(self.proto.p, transport.DH_PRIME)
def test_KEX_DH_GEX_REQUEST_OLD_badKexAlg(self):
"""
Test that if the server recieves a KEX_DH_GEX_REQUEST_OLD message
and the key exchange algorithm is not 'diffie-hellman-group1-sha1' or
'diffie-hellman-group-exchange-sha1', we raise a ConchError.
"""
self.proto.kexAlg = None
self.assertRaises(ConchError, self.proto.ssh_KEX_DH_GEX_REQUEST_OLD,
None)
def test_KEX_DH_GEX_REQUEST(self):
"""
Test that the KEX_DH_GEX_REQUEST message causes the server to reply
with a KEX_DH_GEX_GROUP message with the correct Diffie-Hellman
group.
"""
self.proto.supportedKeyExchanges = [
'diffie-hellman-group-exchange-sha1']
self.proto.supportedPublicKeys = ['ssh-rsa']
self.proto.dataReceived(self.transport.value())
self.proto.ssh_KEX_DH_GEX_REQUEST('\x00\x00\x04\x00\x00\x00\x08\x00' +
'\x00\x00\x0c\x00')
self.assertEquals(
self.packets,
[(transport.MSG_KEX_DH_GEX_GROUP,
common.MP(transport.DH_PRIME) + '\x00\x00\x00\x01\x03')])
self.assertEquals(self.proto.g, 3)
self.assertEquals(self.proto.p, transport.DH_PRIME)
def test_KEX_DH_GEX_INIT_after_REQUEST(self):
"""
Test that the KEX_DH_GEX_INIT message after the client sends
KEX_DH_GEX_REQUEST causes the server to send a KEX_DH_GEX_INIT message
with a public key and signature.
"""
self.test_KEX_DH_GEX_REQUEST()
e = pow(self.proto.g, 3, self.proto.p)
y = common.getMP('\x00\x00\x00\x80' + '\x99' * 128)[0]
f = common._MPpow(self.proto.g, y, self.proto.p)
sharedSecret = common._MPpow(e, y, self.proto.p)
h = sha1()
h.update(common.NS(self.proto.ourVersionString) * 2)
h.update(common.NS(self.proto.ourKexInitPayload) * 2)
h.update(common.NS(self.proto.factory.publicKeys['ssh-rsa'].blob()))
h.update('\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00\x0c\x00')
h.update(common.MP(self.proto.p))
h.update(common.MP(self.proto.g))
h.update(common.MP(e))
h.update(f)
h.update(sharedSecret)
exchangeHash = h.digest()
self.proto.ssh_KEX_DH_GEX_INIT(common.MP(e))
self.assertEquals(
self.packets[1],
(transport.MSG_KEX_DH_GEX_REPLY,
common.NS(self.proto.factory.publicKeys['ssh-rsa'].blob()) +
f + common.NS(self.proto.factory.privateKeys['ssh-rsa'].sign(
exchangeHash))))
def test_KEX_DH_GEX_INIT_after_REQUEST_OLD(self):
"""
Test that the KEX_DH_GEX_INIT message after the client sends
KEX_DH_GEX_REQUEST_OLD causes the server to sent a KEX_DH_GEX_INIT
message with a public key and signature.
"""
self.test_KEX_DH_GEX_REQUEST_OLD()
e = pow(self.proto.g, 3, self.proto.p)
y = common.getMP('\x00\x00\x00\x80' + '\x99' * 128)[0]
f = common._MPpow(self.proto.g, y, self.proto.p)
sharedSecret = common._MPpow(e, y, self.proto.p)
h = sha1()
h.update(common.NS(self.proto.ourVersionString) * 2)
h.update(common.NS(self.proto.ourKexInitPayload) * 2)
h.update(common.NS(self.proto.factory.publicKeys['ssh-rsa'].blob()))
h.update('\x00\x00\x04\x00')
h.update(common.MP(self.proto.p))
h.update(common.MP(self.proto.g))
h.update(common.MP(e))
h.update(f)
h.update(sharedSecret)
exchangeHash = h.digest()
self.proto.ssh_KEX_DH_GEX_INIT(common.MP(e))
self.assertEquals(
self.packets[1:],
[(transport.MSG_KEX_DH_GEX_REPLY,
common.NS(self.proto.factory.publicKeys['ssh-rsa'].blob()) +
f + common.NS(self.proto.factory.privateKeys['ssh-rsa'].sign(
exchangeHash))),
(transport.MSG_NEWKEYS, '')])
def test_keySetup(self):
"""
Test that _keySetup sets up the next encryption keys.
"""
self.proto.nextEncryptions = MockCipher()
self.proto._keySetup('AB', 'CD')
self.assertEquals(self.proto.sessionID, 'CD')
self.proto._keySetup('AB', 'EF')
self.assertEquals(self.proto.sessionID, 'CD')
self.assertEquals(self.packets[-1], (transport.MSG_NEWKEYS, ''))
newKeys = [self.proto._getKey(c, 'AB', 'EF') for c in 'ABCDEF']
self.assertEquals(
self.proto.nextEncryptions.keys,
(newKeys[1], newKeys[3], newKeys[0], newKeys[2], newKeys[5],
newKeys[4]))
def test_NEWKEYS(self):
"""
Test that NEWKEYS transitions the keys in nextEncryptions to
currentEncryptions.
"""
self.test_KEXINIT()
self.proto.nextEncryptions = transport.SSHCiphers('none', 'none',
'none', 'none')
self.proto.ssh_NEWKEYS('')
self.assertIdentical(self.proto.currentEncryptions,
self.proto.nextEncryptions)
self.assertIdentical(self.proto.outgoingCompression, None)
self.assertIdentical(self.proto.incomingCompression, None)
self.proto.outgoingCompressionType = 'zlib'
self.proto.ssh_NEWKEYS('')
self.failIfIdentical(self.proto.outgoingCompression, None)
self.proto.incomingCompressionType = 'zlib'
self.proto.ssh_NEWKEYS('')
self.failIfIdentical(self.proto.incomingCompression, None)
def test_SERVICE_REQUEST(self):
"""
Test that the SERVICE_REQUEST message requests and starts a
service.
"""
self.proto.ssh_SERVICE_REQUEST(common.NS('ssh-userauth'))
self.assertEquals(self.packets, [(transport.MSG_SERVICE_ACCEPT,
common.NS('ssh-userauth'))])
self.assertEquals(self.proto.service.name, 'MockService')
def test_disconnectNEWKEYSData(self):
"""
Test that NEWKEYS disconnects if it receives data.
"""
self.proto.ssh_NEWKEYS("bad packet")
self.checkDisconnected()
def test_disconnectSERVICE_REQUESTBadService(self):
"""
Test that SERVICE_REQUESTS disconnects if an unknown service is
requested.
"""
self.proto.ssh_SERVICE_REQUEST(common.NS('no service'))
self.checkDisconnected(transport.DISCONNECT_SERVICE_NOT_AVAILABLE)
class ClientSSHTransportTestCase(ServerAndClientSSHTransportBaseCase,
TransportTestCase):
"""
Tests for SSHClientTransport.
"""
klass = transport.SSHClientTransport
def test_KEXINIT(self):
"""
Test that receiving a KEXINIT packet sets up the correct values on the
client. The way algorithms are picks is that the first item in the
client's list that is also in the server's list is chosen.
"""
self.proto.dataReceived( 'SSH-2.0-Twisted\r\n\x00\x00\x01\xd4\t\x14'
'\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99'
'\x99\x00\x00\x00=diffie-hellman-group1-sha1,diffie-hellman-g'
'roup-exchange-sha1\x00\x00\x00\x0fssh-dss,ssh-rsa\x00\x00\x00'
'\x85aes128-ctr,aes128-cbc,aes192-ctr,aes192-cbc,aes256-ctr,ae'
's256-cbc,cast128-ctr,cast128-cbc,blowfish-ctr,blowfish-cbc,3d'
'es-ctr,3des-cbc\x00\x00\x00\x85aes128-ctr,aes128-cbc,aes192-c'
'tr,aes192-cbc,aes256-ctr,aes256-cbc,cast128-ctr,cast128-cbc,b'
'lowfish-ctr,blowfish-cbc,3des-ctr,3des-cbc\x00\x00\x00\x12hma'
'c-md5,hmac-sha1\x00\x00\x00\x12hmac-md5,hmac-sha1\x00\x00\x00'
'\tzlib,none\x00\x00\x00\tzlib,none\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x99\x99\x99\x99\x99\x99\x99\x99'
'\x99')
self.assertEquals(self.proto.kexAlg,
'diffie-hellman-group-exchange-sha1')
self.assertEquals(self.proto.keyAlg,
'ssh-rsa')
self.assertEquals(self.proto.outgoingCompressionType,
'none')
self.assertEquals(self.proto.incomingCompressionType,
'none')
ne = self.proto.nextEncryptions
self.assertEquals(ne.outCipType, 'aes256-ctr')
self.assertEquals(ne.inCipType, 'aes256-ctr')
self.assertEquals(ne.outMACType, 'hmac-sha1')
self.assertEquals(ne.inMACType, 'hmac-sha1')
def verifyHostKey(self, pubKey, fingerprint):
"""
Mock version of SSHClientTransport.verifyHostKey.
"""
self.calledVerifyHostKey = True
self.assertEquals(pubKey, self.blob)
self.assertEquals(fingerprint.replace(':', ''),
md5(pubKey).hexdigest())
return defer.succeed(True)
def setUp(self):
TransportTestCase.setUp(self)
self.blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
self.privObj = keys.Key.fromString(keydata.privateRSA_openssh)
self.calledVerifyHostKey = False
self.proto.verifyHostKey = self.verifyHostKey
def test_notImplementedClientMethods(self):
"""
verifyHostKey() should return a Deferred which fails with a
NotImplementedError exception. connectionSecure() should raise
NotImplementedError().
"""
self.assertRaises(NotImplementedError, self.klass().connectionSecure)
def _checkRaises(f):
f.trap(NotImplementedError)
d = self.klass().verifyHostKey(None, None)
return d.addCallback(self.fail).addErrback(_checkRaises)
def test_KEXINIT_groupexchange(self):
"""
Test that a KEXINIT packet with a group-exchange key exchange results
in a KEX_DH_GEX_REQUEST_OLD message..
"""
self.proto.supportedKeyExchanges = [
'diffie-hellman-group-exchange-sha1']
self.proto.dataReceived(self.transport.value())
self.assertEquals(self.packets, [(transport.MSG_KEX_DH_GEX_REQUEST_OLD,
'\x00\x00\x08\x00')])
def test_KEXINIT_group1(self):
"""
Like test_KEXINIT_groupexchange, but for the group-1 key exchange.
"""
self.proto.supportedKeyExchanges = ['diffie-hellman-group1-sha1']
self.proto.dataReceived(self.transport.value())
self.assertEquals(common.MP(self.proto.x)[5:], '\x99' * 64)
self.assertEquals(self.packets,
[(transport.MSG_KEXDH_INIT, self.proto.e)])
def test_KEXINIT_badKexAlg(self):
"""
Test that the client raises a ConchError if it receives a
KEXINIT message bug doesn't have a key exchange algorithm that we
understand.
"""
self.proto.supportedKeyExchanges = ['diffie-hellman-group2-sha1']
data = self.transport.value().replace('group1', 'group2')
self.assertRaises(ConchError, self.proto.dataReceived, data)
def test_KEXDH_REPLY(self):
"""
Test that the KEXDH_REPLY message verifies the server.
"""
self.test_KEXINIT_group1()
sharedSecret = common._MPpow(transport.DH_GENERATOR,
self.proto.x, transport.DH_PRIME)
h = sha1()
h.update(common.NS(self.proto.ourVersionString) * 2)
h.update(common.NS(self.proto.ourKexInitPayload) * 2)
h.update(common.NS(self.blob))
h.update(self.proto.e)
h.update('\x00\x00\x00\x01\x02') # f
h.update(sharedSecret)
exchangeHash = h.digest()
def _cbTestKEXDH_REPLY(value):
self.assertIdentical(value, None)
self.assertEquals(self.calledVerifyHostKey, True)
self.assertEquals(self.proto.sessionID, exchangeHash)
signature = self.privObj.sign(exchangeHash)
d = self.proto.ssh_KEX_DH_GEX_GROUP(
(common.NS(self.blob) + '\x00\x00\x00\x01\x02' +
common.NS(signature)))
d.addCallback(_cbTestKEXDH_REPLY)
return d
def test_KEX_DH_GEX_GROUP(self):
"""
Test that the KEX_DH_GEX_GROUP message results in a
KEX_DH_GEX_INIT message with the client's Diffie-Hellman public key.
"""
self.test_KEXINIT_groupexchange()
self.proto.ssh_KEX_DH_GEX_GROUP(
'\x00\x00\x00\x01\x0f\x00\x00\x00\x01\x02')
self.assertEquals(self.proto.p, 15)
self.assertEquals(self.proto.g, 2)
self.assertEquals(common.MP(self.proto.x)[5:], '\x99' * 40)
self.assertEquals(self.proto.e,
common.MP(pow(2, self.proto.x, 15)))
self.assertEquals(self.packets[1:], [(transport.MSG_KEX_DH_GEX_INIT,
self.proto.e)])
def test_KEX_DH_GEX_REPLY(self):
"""
Test that the KEX_DH_GEX_REPLY message results in a verified
server.
"""
self.test_KEX_DH_GEX_GROUP()
sharedSecret = common._MPpow(3, self.proto.x, self.proto.p)
h = sha1()
h.update(common.NS(self.proto.ourVersionString) * 2)
h.update(common.NS(self.proto.ourKexInitPayload) * 2)
h.update(common.NS(self.blob))
h.update('\x00\x00\x08\x00\x00\x00\x00\x01\x0f\x00\x00\x00\x01\x02')
h.update(self.proto.e)
h.update('\x00\x00\x00\x01\x03') # f
h.update(sharedSecret)
exchangeHash = h.digest()
def _cbTestKEX_DH_GEX_REPLY(value):
self.assertIdentical(value, None)
self.assertEquals(self.calledVerifyHostKey, True)
self.assertEquals(self.proto.sessionID, exchangeHash)
signature = self.privObj.sign(exchangeHash)
d = self.proto.ssh_KEX_DH_GEX_REPLY(
common.NS(self.blob) +
'\x00\x00\x00\x01\x03' +
common.NS(signature))
d.addCallback(_cbTestKEX_DH_GEX_REPLY)
return d
def test_keySetup(self):
"""
Test that _keySetup sets up the next encryption keys.
"""
self.proto.nextEncryptions = MockCipher()
self.proto._keySetup('AB', 'CD')
self.assertEquals(self.proto.sessionID, 'CD')
self.proto._keySetup('AB', 'EF')
self.assertEquals(self.proto.sessionID, 'CD')
self.assertEquals(self.packets[-1], (transport.MSG_NEWKEYS, ''))
newKeys = [self.proto._getKey(c, 'AB', 'EF') for c in 'ABCDEF']
self.assertEquals(self.proto.nextEncryptions.keys,
(newKeys[0], newKeys[2], newKeys[1], newKeys[3],
newKeys[4], newKeys[5]))
def test_NEWKEYS(self):
"""
Test that NEWKEYS transitions the keys from nextEncryptions to
currentEncryptions.
"""
self.test_KEXINIT()
secure = [False]
def stubConnectionSecure():
secure[0] = True
self.proto.connectionSecure = stubConnectionSecure
self.proto.nextEncryptions = transport.SSHCiphers('none', 'none',
'none', 'none')
self.proto.ssh_NEWKEYS('')
self.failIfIdentical(self.proto.currentEncryptions,
self.proto.nextEncryptions)
self.proto.nextEncryptions = MockCipher()
self.proto._keySetup('AB', 'EF')
self.assertIdentical(self.proto.outgoingCompression, None)
self.assertIdentical(self.proto.incomingCompression, None)
self.assertIdentical(self.proto.currentEncryptions,
self.proto.nextEncryptions)
self.assertTrue(secure[0])
self.proto.outgoingCompressionType = 'zlib'
self.proto.ssh_NEWKEYS('')
self.failIfIdentical(self.proto.outgoingCompression, None)
self.proto.incomingCompressionType = 'zlib'
self.proto.ssh_NEWKEYS('')
self.failIfIdentical(self.proto.incomingCompression, None)
def test_SERVICE_ACCEPT(self):
"""
Test that the SERVICE_ACCEPT packet starts the requested service.
"""
self.proto.instance = MockService()
self.proto.ssh_SERVICE_ACCEPT('\x00\x00\x00\x0bMockService')
self.assertTrue(self.proto.instance.started)
def test_requestService(self):
"""
Test that requesting a service sends a SERVICE_REQUEST packet.
"""
self.proto.requestService(MockService())
self.assertEquals(self.packets, [(transport.MSG_SERVICE_REQUEST,
'\x00\x00\x00\x0bMockService')])
def test_disconnectKEXDH_REPLYBadSignature(self):
"""
Test that KEXDH_REPLY disconnects if the signature is bad.
"""
self.test_KEXDH_REPLY()
self.proto._continueKEXDH_REPLY(None, self.blob, 3, "bad signature")
self.checkDisconnected(transport.DISCONNECT_KEY_EXCHANGE_FAILED)
def test_disconnectGEX_REPLYBadSignature(self):
"""
Like test_disconnectKEXDH_REPLYBadSignature, but for DH_GEX_REPLY.
"""
self.test_KEX_DH_GEX_REPLY()
self.proto._continueGEX_REPLY(None, self.blob, 3, "bad signature")
self.checkDisconnected(transport.DISCONNECT_KEY_EXCHANGE_FAILED)
def test_disconnectNEWKEYSData(self):
"""
Test that NEWKEYS disconnects if it receives data.
"""
self.proto.ssh_NEWKEYS("bad packet")
self.checkDisconnected()
def test_disconnectSERVICE_ACCEPT(self):
"""
Test that SERVICE_ACCEPT disconnects if the accepted protocol is
differet from the asked-for protocol.
"""
self.proto.instance = MockService()
self.proto.ssh_SERVICE_ACCEPT('\x00\x00\x00\x03bad')
self.checkDisconnected()
class SSHCiphersTestCase(unittest.TestCase):
"""
Tests for the SSHCiphers helper class.
"""
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o PyASN1"
def test_init(self):
"""
Test that the initializer sets up the SSHCiphers object.
"""
ciphers = transport.SSHCiphers('A', 'B', 'C', 'D')
self.assertEquals(ciphers.outCipType, 'A')
self.assertEquals(ciphers.inCipType, 'B')
self.assertEquals(ciphers.outMACType, 'C')
self.assertEquals(ciphers.inMACType, 'D')
def test_getCipher(self):
"""
Test that the _getCipher method returns the correct cipher.
"""
ciphers = transport.SSHCiphers('A', 'B', 'C', 'D')
iv = key = '\x00' * 16
for cipName, (modName, keySize, counter) in ciphers.cipherMap.items():
cip = ciphers._getCipher(cipName, iv, key)
if cipName == 'none':
self.assertIsInstance(cip, transport._DummyCipher)
else:
self.assertTrue(str(cip).startswith('<' + modName))
def test_getMAC(self):
"""
Test that the _getMAC method returns the correct MAC.
"""
ciphers = transport.SSHCiphers('A', 'B', 'C', 'D')
key = '\x00' * 64
for macName, mac in ciphers.macMap.items():
mod = ciphers._getMAC(macName, key)
if macName == 'none':
self.assertIdentical(mac, None)
else:
self.assertEquals(mod[0], mac)
self.assertEquals(mod[1],
Crypto.Cipher.XOR.new('\x36').encrypt(key))
self.assertEquals(mod[2],
Crypto.Cipher.XOR.new('\x5c').encrypt(key))
self.assertEquals(mod[3], len(mod[0]().digest()))
def test_setKeysCiphers(self):
"""
Test that setKeys sets up the ciphers.
"""
key = '\x00' * 64
cipherItems = transport.SSHCiphers.cipherMap.items()
for cipName, (modName, keySize, counter) in cipherItems:
encCipher = transport.SSHCiphers(cipName, 'none', 'none', 'none')
decCipher = transport.SSHCiphers('none', cipName, 'none', 'none')
cip = encCipher._getCipher(cipName, key, key)
bs = cip.block_size
encCipher.setKeys(key, key, '', '', '', '')
decCipher.setKeys('', '', key, key, '', '')
self.assertEquals(encCipher.encBlockSize, bs)
self.assertEquals(decCipher.decBlockSize, bs)
enc = cip.encrypt(key[:bs])
enc2 = cip.encrypt(key[:bs])
if counter:
self.failIfEquals(enc, enc2)
self.assertEquals(encCipher.encrypt(key[:bs]), enc)
self.assertEquals(encCipher.encrypt(key[:bs]), enc2)
self.assertEquals(decCipher.decrypt(enc), key[:bs])
self.assertEquals(decCipher.decrypt(enc2), key[:bs])
def test_setKeysMACs(self):
"""
Test that setKeys sets up the MACs.
"""
key = '\x00' * 64
for macName, mod in transport.SSHCiphers.macMap.items():
outMac = transport.SSHCiphers('none', 'none', macName, 'none')
inMac = transport.SSHCiphers('none', 'none', 'none', macName)
outMac.setKeys('', '', '', '', key, '')
inMac.setKeys('', '', '', '', '', key)
if mod:
ds = mod().digest_size
else:
ds = 0
self.assertEquals(inMac.verifyDigestSize, ds)
if mod:
mod, i, o, ds = outMac._getMAC(macName, key)
seqid = 0
data = key
packet = '\x00' * 4 + key
if mod:
mac = mod(o + mod(i + packet).digest()).digest()
else:
mac = ''
self.assertEquals(outMac.makeMAC(seqid, data), mac)
self.assertTrue(inMac.verify(seqid, data, mac))
class CounterTestCase(unittest.TestCase):
"""
Tests for the _Counter helper class.
"""
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o PyASN1"
def test_init(self):
"""
Test that the counter is initialized correctly.
"""
counter = transport._Counter('\x00' * 8 + '\xff' * 8, 8)
self.assertEquals(counter.blockSize, 8)
self.assertEquals(counter.count.tostring(), '\x00' * 8)
def test_count(self):
"""
Test that the counter counts incrementally and wraps at the top.
"""
counter = transport._Counter('\x00', 1)
self.assertEquals(counter(), '\x01')
self.assertEquals(counter(), '\x02')
[counter() for i in range(252)]
self.assertEquals(counter(), '\xff')
self.assertEquals(counter(), '\x00')
class TransportLoopbackTestCase(unittest.TestCase):
"""
Test the server transport and client transport against each other,
"""
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o PyASN1"
def _runClientServer(self, mod):
"""
Run an async client and server, modifying each using the mod function
provided. Returns a Deferred called back when both Protocols have
disconnected.
@type mod: C{func}
@rtype: C{defer.Deferred}
"""
factory = MockFactory()
server = transport.SSHServerTransport()
server.factory = factory
factory.startFactory()
server.errors = []
server.receiveError = lambda code, desc: server.errors.append((
code, desc))
client = transport.SSHClientTransport()
client.verifyHostKey = lambda x, y: defer.succeed(None)
client.errors = []
client.receiveError = lambda code, desc: client.errors.append((
code, desc))
client.connectionSecure = lambda: client.loseConnection()
server = mod(server)
client = mod(client)
def check(ignored, server, client):
name = repr([server.supportedCiphers[0],
server.supportedMACs[0],
server.supportedKeyExchanges[0],
server.supportedCompressions[0]])
self.assertEquals(client.errors, [])
self.assertEquals(server.errors, [(
transport.DISCONNECT_CONNECTION_LOST,
"user closed connection")])
if server.supportedCiphers[0] == 'none':
self.assertFalse(server.isEncrypted(), name)
self.assertFalse(client.isEncrypted(), name)
else:
self.assertTrue(server.isEncrypted(), name)
self.assertTrue(client.isEncrypted(), name)
if server.supportedMACs[0] == 'none':
self.assertFalse(server.isVerified(), name)
self.assertFalse(client.isVerified(), name)
else:
self.assertTrue(server.isVerified(), name)
self.assertTrue(client.isVerified(), name)
d = loopback.loopbackAsync(server, client)
d.addCallback(check, server, client)
return d
def test_ciphers(self):
"""
Test that the client and server play nicely together, in all
the various combinations of ciphers.
"""
deferreds = []
for cipher in transport.SSHTransportBase.supportedCiphers + ['none']:
def setCipher(proto):
proto.supportedCiphers = [cipher]
return proto
deferreds.append(self._runClientServer(setCipher))
return defer.DeferredList(deferreds, fireOnOneErrback=True)
def test_macs(self):
"""
Like test_ciphers, but for the various MACs.
"""
deferreds = []
for mac in transport.SSHTransportBase.supportedMACs + ['none']:
def setMAC(proto):
proto.supportedMACs = [mac]
return proto
deferreds.append(self._runClientServer(setMAC))
return defer.DeferredList(deferreds, fireOnOneErrback=True)
def test_keyexchanges(self):
"""
Like test_ciphers, but for the various key exchanges.
"""
deferreds = []
for kex in transport.SSHTransportBase.supportedKeyExchanges:
def setKeyExchange(proto):
proto.supportedKeyExchanges = [kex]
return proto
deferreds.append(self._runClientServer(setKeyExchange))
return defer.DeferredList(deferreds, fireOnOneErrback=True)
def test_compressions(self):
"""
Like test_ciphers, but for the various compressions.
"""
deferreds = []
for compression in transport.SSHTransportBase.supportedCompressions:
def setCompression(proto):
proto.supportedCompressions = [compression]
return proto
deferreds.append(self._runClientServer(setCompression))
return defer.DeferredList(deferreds, fireOnOneErrback=True)
class RandomNumberTestCase(unittest.TestCase):
"""
Tests for the random number generator L{_getRandomNumber} and private
key generator L{_generateX}.
"""
skip = dependencySkip
def test_usesSuppliedRandomFunction(self):
"""
L{_getRandomNumber} returns an integer constructed directly from the
bytes returned by the random byte generator passed to it.
"""
def random(bytes):
# The number of bytes requested will be the value of each byte
# we return.
return chr(bytes) * bytes
self.assertEquals(
transport._getRandomNumber(random, 32),
4 << 24 | 4 << 16 | 4 << 8 | 4)
def test_rejectsNonByteMultiples(self):
"""
L{_getRandomNumber} raises L{ValueError} if the number of bits
passed to L{_getRandomNumber} is not a multiple of 8.
"""
self.assertRaises(
ValueError,
transport._getRandomNumber, None, 9)
def test_excludesSmall(self):
"""
If the random byte generator passed to L{_generateX} produces bytes
which would result in 0 or 1 being returned, these bytes are
discarded and another attempt is made to produce a larger value.
"""
results = [chr(0), chr(1), chr(127)]
def random(bytes):
return results.pop(0) * bytes
self.assertEquals(
transport._generateX(random, 8),
127)
def test_excludesLarge(self):
"""
If the random byte generator passed to L{_generateX} produces bytes
which would result in C{(2 ** bits) - 1} being returned, these bytes
are discarded and another attempt is made to produce a smaller
value.
"""
results = [chr(255), chr(64)]
def random(bytes):
return results.pop(0) * bytes
self.assertEquals(
transport._generateX(random, 8),
64)
class OldFactoryTestCase(unittest.TestCase):
"""
The old C{SSHFactory.getPublicKeys}() returned mappings of key names to
strings of key blobs and mappings of key names to PyCrypto key objects from
C{SSHFactory.getPrivateKeys}() (they could also be specified with the
C{publicKeys} and C{privateKeys} attributes). This is no longer supported
by the C{SSHServerTransport}, so we warn the user if they create an old
factory.
"""
if Crypto is None:
skip = "cannot run w/o PyCrypto"
if pyasn1 is None:
skip = "cannot run w/o PyASN1"
def test_getPublicKeysWarning(self):
"""
If the return value of C{getPublicKeys}() isn't a mapping from key
names to C{Key} objects, then warn the user and convert the mapping.
"""
sshFactory = MockOldFactoryPublicKeys()
self.assertWarns(DeprecationWarning,
"Returning a mapping from strings to strings from"
" getPublicKeys()/publicKeys (in %s) is deprecated. Return "
"a mapping from strings to Key objects instead." %
(qual(MockOldFactoryPublicKeys),),
factory.__file__, sshFactory.startFactory)
self.assertEquals(sshFactory.publicKeys, MockFactory().getPublicKeys())
def test_getPrivateKeysWarning(self):
"""
If the return value of C{getPrivateKeys}() isn't a mapping from key
names to C{Key} objects, then warn the user and convert the mapping.
"""
sshFactory = MockOldFactoryPrivateKeys()
self.assertWarns(DeprecationWarning,
"Returning a mapping from strings to PyCrypto key objects from"
" getPrivateKeys()/privateKeys (in %s) is deprecated. Return"
" a mapping from strings to Key objects instead." %
(qual(MockOldFactoryPrivateKeys),),
factory.__file__, sshFactory.startFactory)
self.assertEquals(sshFactory.privateKeys,
MockFactory().getPrivateKeys())
def test_publicKeysWarning(self):
"""
If the value of the C{publicKeys} attribute isn't a mapping from key
names to C{Key} objects, then warn the user and convert the mapping.
"""
sshFactory = MockOldFactoryPublicKeys()
sshFactory.publicKeys = sshFactory.getPublicKeys()
self.assertWarns(DeprecationWarning,
"Returning a mapping from strings to strings from"
" getPublicKeys()/publicKeys (in %s) is deprecated. Return "
"a mapping from strings to Key objects instead." %
(qual(MockOldFactoryPublicKeys),),
factory.__file__, sshFactory.startFactory)
self.assertEquals(sshFactory.publicKeys, MockFactory().getPublicKeys())
def test_privateKeysWarning(self):
"""
If the return value of C{privateKeys} attribute isn't a mapping from
key names to C{Key} objects, then warn the user and convert the
mapping.
"""
sshFactory = MockOldFactoryPrivateKeys()
sshFactory.privateKeys = sshFactory.getPrivateKeys()
self.assertWarns(DeprecationWarning,
"Returning a mapping from strings to PyCrypto key objects from"
" getPrivateKeys()/privateKeys (in %s) is deprecated. Return"
" a mapping from strings to Key objects instead." %
(qual(MockOldFactoryPrivateKeys),),
factory.__file__, sshFactory.startFactory)
self.assertEquals(sshFactory.privateKeys,
MockFactory().getPrivateKeys())
|
apache-2.0
|
ThreeSixtyGiving/prototype-tools
|
scripts/genmodel.py
|
1
|
7875
|
import rdflib
from rdflib import URIRef, Literal
from rdflib import RDFS, OWL, XSD
import operator # Used in sorting
from sets import Set
# Defaults
defaultLanguage = 'en'
# Set up the graph
g=rdflib.Graph()
OPDS = rdflib.Namespace('http://joinedupdata.org/ontologies/philanthropy/')
g.namespace_manager.bind('opds', URIRef('http://joinedupdata.org/ontologies/philanthropy/'))
#Load the data
g.load('ontology/360Giving.v0.2.rdf')
def getName(uriRef):
return str(uriRef).rpartition("/")[2] # Get last part of the URL (assumes slash based URLs)
# Put together the basics of a field specification
def createFieldSpec(dataProperty):
fieldSpec = {}
fieldSpec['name'] = getName(dataProperty)
try:
fieldSpec['title'] = str(g.preferredLabel(dataProperty,defaultLanguage)[0][1])
except:
fieldSpec['title'] = "LABEL MISSING"
fieldSpec['description'] = str(g.value(dataProperty,RDFS.comment,default="-"))
fieldSpec['weight'] = float(g.value(dataProperty,OPDS.fieldWeight,default=5))
if ((dataProperty, RDFS.range, RDFS.Literal)) in g:
fieldSpec['values'] = 'Text'
elif ((dataProperty, RDFS.range, XSD.dateTime)) in g:
fieldSpec['values'] = 'DateTime'
elif ((dataProperty, RDFS.range, XSD.float)) in g:
fieldSpec['values'] = 'Number (float)'
elif ((dataProperty, RDFS.range, XSD.integer)) in g:
fieldSpec['values'] = 'Number (integer)'
elif ((dataProperty, RDFS.range, XSD.boolean)) in g:
fieldSpec['values'] = 'Yes/No'
else:
fieldSpec['entity'] = g.value(dataProperty, RDFS.range)
fieldSpec['values'] = 'Entity'
return fieldSpec
def getTopClass(entity):
for subjectOrParent in g.transitive_objects(entity,RDFS.subClassOf):
if(subjectOrParent == OWL.Thing): # Updated to OWL.Thing in updated model (was ODPS.Thing)
break #When we get to 'Thing' we want to go no further.
tableName = g.preferredLabel(subjectOrParent,defaultLanguage)
tableEntity = subjectOrParent
tableName = str(tableName[0][1]) # [0][1] syntax needed to fetch value from list and tuple container
return (tableEntity, tableName)
#Loop through all the available defaultSubjects
#
def generateModel(subjectEntity, depth, output = {},rollUps = True):
subjectEntity = URIRef(subjectEntity)
topTable = getTopClass(subjectEntity)
tableName = topTable[1]
## 2. Set up a new table for us
if tableName in output.keys():
pass
else:
output[tableName] = {}
output[tableName]['_meta'] = {"description":g.value(topTable[0],RDFS.comment,default=""),"types":[],"relationships":[],"related":{}}
## Add the current subject entity to the types this table can take
if getName(subjectEntity) not in output[tableName]['_meta']['types']:
output[tableName]['_meta']['types'].append(getName(subjectEntity))
## Identify any relationships this table can stand in
for relationship in g.subjects(predicate=RDFS.range,object=subjectEntity):
if getName(relationship) not in output[tableName]['_meta']['relationships']:
output[tableName]['_meta']['relationships'].append(getName(relationship))
# 3. Work through all the classes up the tree to be sure we're able to express all the properties we need to
for subjectOrParent in g.transitive_objects(subjectEntity,RDFS.subClassOf):
if(depth > 1):
for relationship in g.subjects(predicate=RDFS.range,object=subjectOrParent):
for domain in g.objects(relationship,RDFS.domain):
topTable = getTopClass(domain)[1]
if(not(topTable == tableName)): # Handle for recursive relationships (e.g. Related Activity)
fieldSpec = {}
fieldSpec['name'] = topTable + ".id"
fieldSpec['title'] = topTable + " ID"
fieldSpec['values'] = "Reference"
fieldSpec['description'] = "The identifier of a related " + topTable + " (optional)"
fieldSpec['weight'] = 0.5
output[tableName][fieldSpec['name']] = fieldSpec
fieldSpec = {}
# fieldSpec['name'] = tableName.lower()+"Type"
# fieldSpec['title'] = tableName +" Type"
fieldSpec['name'] = "relationshipType"
fieldSpec['title'] = "Relationship Type"
fieldSpec['values'] = "Reference"
fieldSpec['description'] = "One of: " + ", ".join(output[tableName]['_meta']['relationships'])
fieldSpec['weight'] = float(0.55)
output[tableName][tableName.lower()+"Type"] = fieldSpec
output[tableName][tableName.lower()+"Type"] = fieldSpec
for dataProperty in g.subjects(predicate=RDFS.domain,object=subjectOrParent):
#Set up the field specification
fieldSpec = createFieldSpec(dataProperty)
#If we're dealing with an entity process that here:
if(not(fieldSpec['values'] == 'Entity')):
output[tableName][fieldSpec['name']] = fieldSpec
else:
# Roll Ups
if(rollUps == True):
for rollUp in g.objects(subject=dataProperty,predicate=OPDS.rollUp):
fieldSpec = createFieldSpec(rollUp) # Set up a field specification, then overwrite what we need to
fieldSpec['name'] = getName(dataProperty) + "." + fieldSpec['name']
fieldSpec['title'] = str(g.preferredLabel(dataProperty,"en")[0][1]) + ":" + fieldSpec['title']
fieldSpec['weight'] = float(str(float(g.value(dataProperty,OPDS.fieldWeight,default=0.0))) + str(fieldSpec['weight']).replace('.',''))
output[tableName][fieldSpec['name']] = fieldSpec
# Total Ups
for totalUp in g.objects(subject=dataProperty,predicate=OPDS.totalUp):
fieldSpec = createFieldSpec(dataProperty) # Set up a field specification, then overwrite what we need to
fieldSpec['name'] = "sum("+fieldSpec['name']+ ")"
fieldSpec['title'] = "Total " + fieldSpec['title'] #Needs I8LN
fieldSpec['weight'] = float(str(float(g.value(dataProperty,OPDS.fieldWeight,default=0.0))) + str(fieldSpec['weight']).replace('.',''))
output[tableName][fieldSpec['name']] = fieldSpec
# For related objects
for subObject in g.objects(subject=dataProperty,predicate=RDFS.range):
#Add some extra information for the documentation output
output[tableName]['_meta']['related'][getName(dataProperty)] = {"relationshipName":getName(dataProperty), "objectName":getName(subObject), "topObject":getTopClass(subObject)[1], "description":g.value(dataProperty,RDFS.comment,default="-"), "title":g.value(dataProperty,RDFS.label,default="-")}
if depth < 2:
subObjectModel = generateModel(subObject,depth+1,output,rollUps)
subObjectType = subObjectModel.keys()[0]
else:
pass
# Sort
#output[tableName].sort(key=lambda field: field['weight'])
return output
|
mit
|
360youlun/django-cms
|
cms/migrations/0016_author_copy.py
|
525
|
20033
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
bsd-3-clause
|
apark263/tensorflow
|
tensorflow/python/kernel_tests/parsing_ops_test.py
|
11
|
62795
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import numpy as np
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape]
if isinstance(v, sparse_tensor.SparseTensor) else [v]
for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_example(**kwargs)
result = flatten_values_tensors_or_sparse(out.values())
# Check values.
tf_result = self.evaluate(result)
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
serialized = kwargs["serialized"]
batch_size = (
self.evaluate(serialized).size if isinstance(serialized, ops.Tensor)
else np.asarray(serialized).size)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (2,))
@test_util.run_deprecated_v1
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array(2 * [[a_default]]),
b_name: np.array(2 * [b_default]),
c_name: np.array(2 * [c_default]),
}
self._test({
"example_names": np.empty((0,), dtype=bytes),
"serialized": ops.convert_to_tensor(["", ""]),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature((2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"example_names": ["in1"],
"serialized": [original.SerializeToString()],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"example_names": ["in1", "in2"],
"serialized": ["", ""],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
example(features=features({
"a": float_feature([-1, -1]),
}))
]
names = ["passing", "failing"]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
expected_err=(errors_impl.OpError,
"Name: failing, Key: a, Index: 1. Number of float val"))
def testDenseDefaultNoShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": ["failing"],
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
@test_util.run_deprecated_v1
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(
features=features({
"st_c": float_feature([]), # empty float list
})),
example(
features=features({
"st_d": feature(), # feature with nothing in it
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_st_c = ( # indices, values, shape
np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
expected_st_d = ( # indices, values, shape
np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
expected_output = {
"st_c": expected_st_c,
"st_d": expected_st_d,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
}
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = ( # indices, values, shape
np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(
features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
serialized = [m.SerializeToString() for m in original]
expected_sp1 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32), np.array(
[2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32), np.array(
[2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
"sp2": expected_sp2,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx", "val2", dtypes.float32, size=7, already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = (
# indices
np.array(
[[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]],
dtype=np.int64),
# values
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
# shape batch == 4, max_elems = 13
np.array([4, 13, 3], dtype=np.int64))
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
}
}, expected_output)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),
),
]
serialized = [
m.SerializeToString() + n.SerializeToString() for (m, n) in original
]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})),
example(features=features({}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
}
}, expected_output)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(
3, 1, 2, 1),
"b":
np.array(["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(
3, 1, 1, 1, 1),
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"),
}
}, expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureAndDenseWithNoDefault(self):
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
np.array(["a", "b", "c"], dtype="|S"), np.array(
[2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": np.array(2 * [[a_default]]),
"b": np.array(2 * [b_default]),
"c": np.array([[3, 4], [1, 2]], dtype=np.float32),
}
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
@test_util.run_deprecated_v1
def testSerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = ( # indices, values, shape
np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
np.array([0, 3, 7, 1]),
np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
np.array(["a", "b", "d", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"idx": expected_idx,
"sp": expected_sp,
}
self._test({
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
}
}, expected_output)
def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size):
# During parsing, data read from the serialized proto is stored in buffers.
# For small batch sizes, a buffer will contain one minibatch entry.
# For larger batch sizes, a buffer may contain several minibatch
# entries. This test identified a bug where the code that copied
# data out of the buffers and into the output tensors assumed each
# buffer only contained one minibatch entry. The bug has since been fixed.
truth_int = [i for i in range(batch_size)]
truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()]
for i in range(batch_size)]
expected_str = copy.deepcopy(truth_str)
# Delete some intermediate entries
for i in range(batch_size):
col = 1
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry
expected_str[i][col] = b"default"
col -= 1
truth_str[i].pop()
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry (possibly again)
expected_str[i][col] = b"default"
truth_str[i].pop()
expected_output = {
# Batch size batch_size, 1 time step.
"a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1),
# Batch size batch_size, 2 time steps.
"b": np.array(expected_str, dtype="|S").reshape(batch_size, 2),
}
original = [
example(
features=features({
"a": int64_feature([truth_int[i]]),
"b": bytes_feature(truth_str[i])
})) for i in range(batch_size)
]
serialized = [m.SerializeToString() for m in original]
self._test({
"serialized": ops.convert_to_tensor(serialized, dtype=dtypes.string),
"features": {
"a":
parsing_ops.FixedLenSequenceFeature(
shape=(),
dtype=dtypes.int64,
allow_missing=True,
default_value=-1),
"b":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.string,
allow_missing=True,
default_value="default"),
}
}, expected_output)
def testSerializedContainingVarLenDenseLargerBatch(self):
np.random.seed(3456)
for batch_size in (1, 10, 20, 100, 256):
self._testSerializedContainingVarLenDenseLargerBatch(batch_size)
@test_util.run_deprecated_v1
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
example_names = ["in1", "in2", "in3", "in4"]
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(
features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[
[0, 0, 0, 0],
[1, 1, 0, 0],
[-1, -1, 2, 2],
[0, 0, 0, 0],
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1),
dname:
np.empty(shape=(4, 0), dtype=bytes),
}
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output)
# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
[-1, -1, 2, 2],
[-2, -2, -2, -2],
],
dtype=np.float32).reshape(4, 2, 2, 1)
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=-2.0),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output_custom_padding)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(
errors_impl.OpError, "Name: in3, Key: b, Index: 2. "
"Number of bytes values is not a multiple of stride length."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature(
(None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
}
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
class ParseSingleExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Check values.
tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes.
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape()), tensor_shape.as_shape(f.shape))
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
original = example(
features=features({
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array([[0], [3]], dtype=np.int64), np.array(["a", "b"], dtype="|S"),
np.array([13], dtype=np.int64)) # max_values = 13
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
}
self._test(
{
"example_names": ops.convert_to_tensor("in1"),
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string,
[13]),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True)
}
},
expected_output)
class ParseSequenceExampleTest(test.TestCase):
def testCreateSequenceExample(self):
value = sequence_example(
context=features({
"global_feature": float_feature([1, 2, 3]),
}),
feature_lists=feature_lists({
"repeated_feature_2_frames":
feature_list([
bytes_feature([b"a", b"b", b"c"]),
bytes_feature([b"a", b"d", b"e"])
]),
"repeated_feature_3_frames":
feature_list([
int64_feature([3, 4, 5, 6, 7]),
int64_feature([-1, 0, 0, 0, 0]),
int64_feature([1, 2, 3, 4, 5])
])
}))
value.SerializeToString() # Smoke test
def _test(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_length_values=None,
expected_err=None,
batch=False):
expected_context_values = expected_context_values or {}
expected_feat_list_values = expected_feat_list_values or {}
expected_length_values = expected_length_values or {}
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
if batch:
c_out, fl_out, _ = parsing_ops.parse_sequence_example(**kwargs)
else:
c_out, fl_out = parsing_ops.parse_single_sequence_example(**kwargs)
if c_out:
sess.run(flatten_values_tensors_or_sparse(c_out.values()))
if fl_out:
sess.run(flatten_values_tensors_or_sparse(fl_out.values()))
else:
# Returns dicts w/ Tensors and SparseTensors.
if batch:
(context_out, feat_list_out,
lengths_out) = parsing_ops.parse_sequence_example(**kwargs)
else:
(context_out,
feat_list_out) = parsing_ops.parse_single_sequence_example(**kwargs)
lengths_out = {}
context_result = sess.run(
flatten_values_tensors_or_sparse(
context_out.values())) if context_out else []
feat_list_result = sess.run(
flatten_values_tensors_or_sparse(
feat_list_out.values())) if feat_list_out else []
lengths_result = sess.run(
flatten_values_tensors_or_sparse(
lengths_out.values())) if lengths_out else []
# Check values.
_compare_output_to_expected(self, context_out, expected_context_values,
context_result)
_compare_output_to_expected(self, feat_list_out,
expected_feat_list_values, feat_list_result)
_compare_output_to_expected(self, lengths_out, expected_length_values,
lengths_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
if "context_features" in kwargs:
for k, f in kwargs["context_features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
if batch:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()[1:]), f.shape)
else:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature) and batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (2,))
elif isinstance(f, parsing_ops.VarLenFeature) and not batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (1,))
def _testBoth(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_err=None):
# Test using tf.parse_single_sequence_example
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_err=expected_err,
batch=False)
# Convert the input to a batch of size 1, and test using
# tf.parse_sequence_example.
# Some replacements are needed for the batch version.
kwargs["serialized"] = [kwargs.pop("serialized")]
kwargs["example_names"] = [kwargs.pop("example_name")
] if "example_name" in kwargs else None
# Disable error string matching; it's not consistent for batch mode.
if expected_err:
expected_err = (expected_err[0], "")
# Add a batch dimension to expected output
if expected_context_values:
new_values = {}
for k in expected_context_values:
v = expected_context_values[k]
if isinstance(kwargs["context_features"][k],
parsing_ops.FixedLenFeature):
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_context_values = new_values
expected_length_values = {}
if expected_feat_list_values:
new_values = {}
for k in expected_feat_list_values:
v = expected_feat_list_values[k]
if isinstance(kwargs["sequence_features"][k],
parsing_ops.FixedLenSequenceFeature):
expected_length_values[k] = [np.shape(v)[0]]
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_feat_list_values = new_values
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_length_values=expected_length_values,
expected_err=expected_err,
batch=True)
@test_util.run_deprecated_v1
def testSequenceExampleWithSparseAndDenseContext(self):
original = sequence_example(
context=features({
"c": float_feature([3, 4]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: num_features = 2
a_default = [[1, 2, 3]]
b_default = np.random.rand(3, 3).astype(bytes)
expected_context_output = {
"st_a": expected_st_a,
"a": a_default,
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"context_features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_context_values=expected_context_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithMultipleSizeFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
]),
"b":
feature_list([bytes_feature([b"r00", b"r01", b"r10", b"r11"])]),
"c":
feature_list([float_feature([3, 4]),
float_feature([-1, 2])]),
}))
serialized = original.SerializeToString()
expected_feature_list_output = {
"a":
np.array(
[ # outer dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
dtype=np.int64),
"b":
np.array(
[ # outer dimension is time, inside are 2x2 matrices
[[b"r00", b"r01"], [b"r10", b"r11"]]
],
dtype=bytes),
"c":
np.array(
[ # outer dimension is time, inside are 2-vectors
[3, 4], [-1, 2]
],
dtype=np.float32),
"d":
np.empty(shape=(0, 5), dtype=np.float32), # empty_allowed_missing
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"b":
parsing_ops.FixedLenSequenceFeature((2, 2), dtypes.string),
"c":
parsing_ops.FixedLenSequenceFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithoutDebugName(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithSparseAndDenseFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
@test_util.run_deprecated_v1
def testSequenceExampleWithEmptyFeatureInFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"st_a":
feature_list([
float_feature([3.0, 4.0]),
feature(),
float_feature([5.0]),
]),
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_feature_list_output = {
"st_a": expected_st_a,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleListWithInconsistentDataFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([int64_feature([-1, 0]),
float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, "Feature list: a, Index: 1."
" Data types don't match. Expected type: int64"))
def testSequenceExampleListWithWrongDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Feature list: a, Index: 0. Data types don't match."
" Expected type: int64"))
def testSequenceExampleListWithWrongSparseDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([3, 4]),
int64_feature([1, 2]),
float_feature([2.0, 3.0])
])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Name: in1, Feature list: a, Index: 2."
" Data types don't match. Expected type: int64"
" Feature is: float_list"))
def testSequenceExampleListWithWrongShapeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([2, 3]),
int64_feature([2, 3, 4])]),
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, r"Name: in1, Key: a, Index: 1."
r" Number of int64 values != expected."
r" values size: 3 but output shape: \[2\]"))
def testSequenceExampleWithMissingFeatureListFails(self):
original = sequence_example(feature_lists=feature_lists({}))
# Test fails because we didn't add:
# feature_list_dense_defaults = {"a": None}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(original.SerializeToString()),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature list 'a' is required but could not be found."
" Did you mean to include it in"
" feature_list_dense_missing_assumed_empty or"
" feature_list_dense_defaults?"))
@test_util.run_deprecated_v1
def testSequenceExampleBatch(self):
first = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
])
}))
second = sequence_example(
feature_lists=feature_lists({
"a": feature_list([
int64_feature([21, 2, 11]),
])
}))
serialized = [first.SerializeToString(), second.SerializeToString()]
expected_feature_list_output = {
"a":
np.array(
[ # outermost dimension is example id
[ # middle dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
[ # middle dimension is time.
[[21, 2, 11]], # inside are 1x3 matrices
[[0, 0, 0]], # additional entries are padded with 0
[[0, 0, 0]],
[[0, 0, 0]]
]
],
dtype=np.int64),
"d":
np.empty(shape=(2, 0, 5), dtype=np.float32), # allowed_missing
}
self._test(
{
"example_names": ops.convert_to_tensor(["in1", "in2"]),
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output,
expected_length_values={
"a": [4, 1],
"d": [0, 0]
},
batch=True)
class DecodeJSONExampleTest(test.TestCase):
def _testRoundTrip(self, examples):
with self.cached_session() as sess:
examples = np.array(examples, dtype=np.object)
json_tensor = constant_op.constant(
[json_format.MessageToJson(m) for m in examples.flatten()],
shape=examples.shape,
dtype=dtypes.string)
binary_tensor = parsing_ops.decode_json_example(json_tensor)
binary_val = self.evaluate(binary_tensor)
if examples.shape:
self.assertShapeEqual(binary_val, json_tensor)
for input_example, output_binary in zip(
np.array(examples).flatten(), binary_val.flatten()):
output_example = example_pb2.Example()
output_example.ParseFromString(output_binary)
self.assertProtoEquals(input_example, output_example)
else:
output_example = example_pb2.Example()
output_example.ParseFromString(binary_val)
self.assertProtoEquals(examples.item(), output_example)
def testEmptyTensor(self):
self._testRoundTrip([])
self._testRoundTrip([[], [], []])
def testEmptyExamples(self):
self._testRoundTrip([example(), example(), example()])
def testDenseFeaturesScalar(self):
self._testRoundTrip(
example(features=features({
"a": float_feature([1, 1, 3])
})))
def testDenseFeaturesVector(self):
self._testRoundTrip([
example(features=features({
"a": float_feature([1, 1, 3])
})),
example(features=features({
"a": float_feature([-1, -1, 2])
})),
])
def testDenseFeaturesMatrix(self):
self._testRoundTrip([
[example(features=features({
"a": float_feature([1, 1, 3])
}))],
[example(features=features({
"a": float_feature([-1, -1, 2])
}))],
])
def testSparseFeatures(self):
self._testRoundTrip([
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([])
})),
example(features=features({
"st_d": feature()
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
})),
])
def testSerializedContainingBytes(self):
aname = "a"
bname = "b*has+a:tricky_name"
self._testRoundTrip([
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"])
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"])
})),
])
@test_util.run_deprecated_v1
def testInvalidSyntax(self):
with self.cached_session() as sess:
json_tensor = constant_op.constant(["{]"])
binary_tensor = parsing_ops.decode_json_example(json_tensor)
with self.assertRaisesOpError("Error while parsing JSON"):
self.evaluate(binary_tensor)
class ParseTensorOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testToFloat32(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.float32)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.float32)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
@test_util.run_deprecated_v1
def testToUint8(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint8)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
@test_util.run_deprecated_v1
def testTypeMismatch(self):
with self.cached_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
r"Type mismatch between parsed tensor \(uint8\) and dtype "
r"\(uint16\)"):
tensor.eval(feed_dict={serialized: tensor_proto.SerializeToString()})
@test_util.run_deprecated_v1
def testInvalidInput(self):
with self.cached_session():
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
"Could not parse `serialized` as TensorProto: 'bogus'"):
tensor.eval(feed_dict={serialized: "bogus"})
with self.assertRaisesOpError(
r"Expected `serialized` to be a scalar, got shape: \[1\]"):
tensor.eval(feed_dict={serialized: ["bogus"]})
if __name__ == "__main__":
test.main()
|
apache-2.0
|
EtienneCmb/brainpipe
|
brainpipe/io/read_json.py
|
1
|
2227
|
"""Load, save and update json files."""
import os
import io
import json
from datetime import datetime
def save_json(filename, config):
"""Save configuration file as JSON.
Parameters
----------
filename : string
Name of the configuration file to save.
config : dict
Dictionary of arguments to save.
"""
# Ensure Python version compatibility
try:
to_unicode = unicode
except NameError:
to_unicode = str
if filename:
with io.open(filename, 'w', encoding='utf8') as f:
str_ = json.dumps(config, indent=4, sort_keys=True,
separators=(',', ': '), # Pretty printing
ensure_ascii=False)
f.write(to_unicode(str_))
def load_json(filename):
"""Load configuration file as JSON.
Parameters
----------
filename : string
Name of the configuration file to load.
Returns
-------
config : dict
Dictionary of config.
"""
with open(filename) as f:
# Load the configuration file :
config = json.load(f)
return config
def update_json(filename, update, backup=None):
"""Update a json file.
Parameters
----------
filename : str
Full path to the json file.
update : dict
Dict for update.
backup : str | None
Backup folder if needed.
"""
assert isinstance(update, dict)
assert os.path.isfile(filename)
config = load_json(filename)
_backup_json(filename, backup)
config.update(update)
save_json(filename, config)
def _backup_json(filename, backup=None):
if isinstance(backup, str):
assert os.path.isfile(filename)
assert os.path.exists(backup)
# Load json file :
config = load_json(filename)
config_backup = config.copy()
# Datetime :
now = datetime.now()
now_lst = [now.year, now.month, now.day, now.hour, now.minute,
now.second]
now_lst = '_'.join([str(k) for k in now_lst])
file, ext = os.path.splitext(os.path.split(filename)[1])
file += now_lst + ext
save_json(os.path.join(backup, file), config_backup)
|
gpl-3.0
|
Kirill1243/-tg-station
|
tools/mapmerge/map_helpers.py
|
29
|
18534
|
import sys
try:
version = sys.version_info
if version.major < 3 or (version.major == 3 and version.minor < 5):
print("ERROR: You are running an incompatible version of Python. The current minimum version required is [3.5].\nYour version: {}".format(sys.version))
sys.exit()
except:
print("ERROR: Something went wrong, you might be running an incompatible version of Python. The current minimum version required is [3.5].\nYour version: {}".format(sys.version))
sys.exit()
import collections
error = {0:"OK", 1:"WARNING: Key lengths are different, all the lines change."}
maxx = 0
maxy = 0
key_length = 1
def reset_globals():
global key_length
global maxx
global maxy
key_length = 1
maxx = 0
maxy = 0
def merge_map(newfile, backupfile, tgm):
reset_globals()
shitmap = parse_map(newfile)
originalmap = parse_map(backupfile)
global key_length
if shitmap["key_length"] != originalmap["key_length"]:
if tgm:
write_dictionary_tgm(newfile, shitmap["dictionary"])
write_grid_coord_small(newfile, shitmap["grid"])
return 1
else:
key_length = originalmap["key_length"]
shitDict = shitmap["dictionary"] #key to tile data dictionary
shitGrid = shitmap["grid"] #x,y coords to tiles (keys) dictionary (the map's layout)
originalDict = sort_dictionary(originalmap["dictionary"])
originalGrid = originalmap["grid"]
mergeGrid = dict() #final map layout
known_keys = dict() #mapping known keys to original keys
tempGrid = dict() #saving tiles with newly generated keys for later processing
temp_keys = dict() #mapping known keys to newly generated keys
unused_keys = list(originalDict.keys()) #list with all existing keys that aren't being used
tempDict = collections.OrderedDict() #mapping new keys to new data
originalDict_size = len(originalDict)
for y in range(1,maxy+1):
for x in range(1,maxx+1):
shitKey = shitGrid[x,y]
#if this key was seen before, add it to the pile immediately
if shitKey in known_keys:
mergeGrid[x,y] = known_keys[shitKey]
continue
#if this key was seen before, add it to the pile immediately
if shitKey in temp_keys:
tempGrid[x,y] = temp_keys[shitKey]
continue
shitData = shitDict[shitKey]
originalKey = originalGrid[x,y]
originalData = originalDict[originalKey]
#if new tile data at x,y is the same as original tile data at x,y, add to the pile
if shitData == originalData:
mergeGrid[x,y] = originalKey
known_keys[shitKey] = originalKey
unused_keys.remove(originalKey)
else:
#search for the new tile data in the original dictionary, if a key is found add it to the pile, else generate a new key
newKey = search_key(originalDict, shitData)
if newKey != None:
try:
unused_keys.remove(newKey)
except ValueError: #caused by a duplicate entry
print("NOTICE: Correcting duplicate dictionary entry. ({})".format(shitKey))
mergeGrid[x,y] = newKey
known_keys[shitKey] = newKey
#if data at original x,y no longer exists we reuse the key immediately
elif search_key(shitDict, originalData) == None:
mergeGrid[x,y] = originalKey
originalDict[originalKey] = shitData
unused_keys.remove(originalKey)
known_keys[shitKey] = originalKey
else:
if len(tempDict) == 0:
newKey = generate_new_key(originalDict)
else:
newKey = generate_new_key(tempDict)
tempGrid[x,y] = newKey
temp_keys[shitKey] = newKey
tempDict[newKey] = shitData
sort = 0
#find gaps in the dictionary keys sequence and add the missing keys to be recycled
dict_list = list(originalDict.keys())
for index in range(0, len(dict_list)):
if index + 1 == len(dict_list):
break
key = dict_list[index]
next_key = dict_list[index+1]
difference = key_difference(key, next_key)
if difference > 1:
i = 1
nextnew = key
while i < difference:
nextnew = get_next_key(nextnew)
unused_keys.append(nextnew)
i += 1
sort = 1
#Recycle outdated keys with any new tile data, starting from the bottom of the dictionary
i = 0
for key, value in reversed(tempDict.items()):
recycled_key = key
if len(unused_keys) > 0:
recycled_key = unused_keys.pop()
for coord, gridkey in tempGrid.items():
if gridkey == None:
continue
if gridkey == key:
mergeGrid[coord] = recycled_key
tempGrid[coord] = None
originalDict[recycled_key] = value
#if gaps in the key sequence were found, sort the dictionary for cleanliness
if sort == 1:
originalDict = sort_dictionary(originalDict)
if tgm:
write_dictionary_tgm(newfile, originalDict)
write_grid_coord_small(newfile, mergeGrid)
else:
write_dictionary(newfile, originalDict)
write_grid(newfile, mergeGrid)
return 0
#write dictionary in tgm format
def write_dictionary_tgm(filename, dictionary):
with open(filename, "w") as output:
output.write("//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE \n")
for key, list_ in dictionary.items():
output.write("\"{}\" = (\n".format(key))
for thing in list_:
buffer = ""
in_quote_block = False
in_varedit_block = False
for char in thing:
if in_quote_block:
if char == "\"":
in_quote_block = False
buffer = buffer + char
continue
elif char == "\"":
in_quote_block = True
buffer = buffer + char
continue
if not in_varedit_block:
if char == "{":
in_varedit_block = True
buffer = buffer + "{\n\t"
continue
else:
if char == ";":
buffer = buffer + ";\n\t"
continue
elif char == "}":
buffer = buffer + "\n\t}"
in_varedit_block = False
continue
buffer = buffer + char
if list_.index(thing) != len(list_) - 1:
buffer = buffer + ",\n"
output.write(buffer)
output.write(")\n")
#thanks to YotaXP for finding out about this one
def write_grid_coord_small(filename, grid):
with open(filename, "a") as output:
output.write("\n")
for x in range(1, maxx+1):
output.write("({},{},1) = {{\"\n".format(x, 1, 1))
for y in range(1, maxy):
output.write("{}\n".format(grid[x,y]))
output.write("{}\n\"}}\n".format(grid[x,maxy]))
def search_key(dictionary, data):
for key, value in dictionary.items():
if value == data:
return key
return None
def generate_new_key(dictionary):
last_key = next(reversed(dictionary))
return get_next_key(last_key)
def get_next_key(key):
if key == "":
return "".join("a" for _ in range(key_length))
length = len(key)
new_key = ""
carry = 1
for char in key[::-1]:
if carry <= 0:
new_key = new_key + char
continue
if char == 'Z':
new_key = new_key + 'a'
carry += 1
length -= 1
if length <= 0:
return "OVERFLOW"
elif char == 'z':
new_key = new_key + 'A'
else:
new_key = new_key + chr(ord(char) + 1)
if carry > 0:
carry -= 1
return new_key[::-1]
def sort_dictionary(dictionary):
sorted_dict = collections.OrderedDict()
next_key = get_next_key("")
while len(sorted_dict) < len(dictionary):
try:
sorted_dict[next_key] = dictionary[next_key]
except KeyError:
pass
next_key = get_next_key(next_key)
return sorted_dict
#still does not support more than one z level per file, but should parse any format
def parse_map(map_file):
with open(map_file, "r") as map_input:
characters = map_input.read()
in_quote_block = False
in_key_block = False
in_data_block = False
in_varedit_block = False
after_data_block = False
escaping = False
skip_whitespace = False
dictionary = collections.OrderedDict()
curr_key = ""
curr_datum = ""
curr_data = list()
in_map_block = False
in_coord_block = False
in_map_string = False
iter_x = 0
adjust_y = True
curr_num = ""
reading_coord = "x"
global maxx
global maxy
key_length_local = 0
curr_x = 0
curr_y = 0
curr_z = 1
grid = dict()
for char in characters:
if not in_map_block:
if char == "\n" or char == "\t":
continue
if in_data_block:
if in_varedit_block:
if in_quote_block:
if char == "\\":
curr_datum = curr_datum + char
escaping = True
continue
if escaping:
curr_datum = curr_datum + char
escaping = False
continue
if char == "\"":
curr_datum = curr_datum + char
in_quote_block = False
continue
curr_datum = curr_datum + char
continue
if skip_whitespace and char == " ":
skip_whitespace = False
continue
skip_whitespace = False
if char == "\"":
curr_datum = curr_datum + char
in_quote_block = True
continue
if char == ";":
skip_whitespace = True
curr_datum = curr_datum + char
continue
if char == "}":
curr_datum = curr_datum + char
in_varedit_block = False
continue
curr_datum = curr_datum + char
continue
if char == "{":
curr_datum = curr_datum + char
in_varedit_block = True
continue
if char == ",":
curr_data.append(curr_datum)
curr_datum = ""
continue
if char == ")":
curr_data.append(curr_datum)
dictionary[curr_key] = tuple(curr_data)
curr_data = list()
curr_datum = ""
curr_key = ""
in_data_block = False
after_data_block = True
continue
curr_datum = curr_datum + char
continue
if in_key_block:
if char == "\"":
in_key_block = False
key_length_local = len(curr_key)
else:
curr_key = curr_key + char
continue
#else we're looking for a key block, a data block or the map block
if char == "\"":
in_key_block = True
after_data_block = False
continue
if char == "(":
if after_data_block:
in_map_block = True
in_coord_block = True
after_data_block = False
curr_key = ""
continue
else:
in_data_block = True
after_data_block = False
continue
else:
if in_coord_block:
if char == ",":
if reading_coord == "x":
curr_x = string_to_num(curr_num)
if curr_x > maxx:
maxx = curr_x
iter_x = 0
curr_num = ""
reading_coord = "y"
elif reading_coord == "y":
curr_y = string_to_num(curr_num)
if curr_y > maxy:
maxy = curr_y
curr_num = ""
reading_coord = "z"
else:
pass
continue
if char == ")":
in_coord_block = False
reading_coord = "x"
curr_num = ""
#read z here if needed
continue
curr_num = curr_num + char
continue
if in_map_string:
if char == "\"":
in_map_string = False
adjust_y = True
curr_y -= 1
continue
if char == "\n":
if adjust_y:
adjust_y = False
else:
curr_y += 1
if curr_x > maxx:
maxx = curr_x
if iter_x > 1:
curr_x = 1
iter_x = 0
continue
curr_key = curr_key + char
if len(curr_key) == key_length_local:
iter_x += 1
if iter_x > 1:
curr_x += 1
grid[curr_x, curr_y] = curr_key
curr_key = ""
continue
#else look for coordinate block or a map string
if char == "(":
in_coord_block = True
continue
if char == "\"":
in_map_string = True
continue
if curr_y > maxy:
maxy = curr_y
data = dict()
data["dictionary"] = dictionary
data["grid"] = grid
data["key_length"] = key_length_local
return data
#subtract keyB from keyA
def key_difference(keyA, keyB):
if len(keyA) != len(keyB):
return "you fucked up"
Ayek = keyA[::-1]
Byek = keyB[::-1]
result = 0
for i in range(0, len(keyA)):
base = 52**i
A = 26 if Ayek[i].isupper() else 0
B = 26 if Byek[i].isupper() else 0
result += ( (ord(Byek[i].lower()) + B) - (ord(Ayek[i].lower()) + A) ) * base
return result
def string_to_num(s):
try:
return int(s)
except ValueError:
return -1
#writes a tile data dictionary the same way Dreammaker does
def write_dictionary(filename, dictionary):
with open(filename, "w") as output:
for key, value in dictionary.items():
output.write("\"{}\" = ({})\n".format(key, ",".join(value)))
#writes a map grid the same way Dreammaker does
def write_grid(filename, grid):
with open(filename, "a") as output:
output.write("\n")
output.write("(1,1,1) = {\"\n")
for y in range(1, maxy+1):
for x in range(1, maxx+1):
try:
output.write(grid[x,y])
except KeyError:
print("Key error: ({},{})".format(x,y))
output.write("\n")
output.write("\"}")
output.write("\n")
#inflated map grid; unused
def write_grid_coord(filename, grid):
with open(filename, "a") as output:
output.write("\n")
for y in range(1, maxy+1):
for x in range(1, maxx+1):
output.write("({},{},1) = {{\"{}\"}}\n".format(x, y, grid[x,y]))
def key_compare(keyA, keyB): #thanks byond for not respecting ascii
pos = 0
for a in keyA:
pos += 1
count = pos
for b in keyB:
if(count > 1):
count -= 1
continue
if a.islower() and b.islower():
if(a < b):
return -1
if(a > b):
return 1
break
if a.islower() and b.isupper():
return -1
if a.isupper() and b.islower():
return 1
if a.isupper() and b.isupper():
if(a < b):
return -1
if(a > b):
return 1
break
return 0
|
agpl-3.0
|
rizzatti/luigi
|
test/contrib/scalding_test.py
|
68
|
2092
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.contrib import scalding
import mock
import os
import random
import shutil
import tempfile
import unittest
class MyScaldingTask(scalding.ScaldingJobTask):
scala_source = luigi.Parameter()
def source(self):
return self.scala_source
class ScaldingTest(unittest.TestCase):
def setUp(self):
self.scalding_home = os.path.join(tempfile.gettempdir(), 'scalding-%09d' % random.randint(0, 999999999))
os.mkdir(self.scalding_home)
self.lib_dir = os.path.join(self.scalding_home, 'lib')
os.mkdir(self.lib_dir)
os.mkdir(os.path.join(self.scalding_home, 'provided'))
os.mkdir(os.path.join(self.scalding_home, 'libjars'))
f = open(os.path.join(self.lib_dir, 'scalding-core-foo'), 'w')
f.close()
self.scala_source = os.path.join(self.scalding_home, 'my_source.scala')
f = open(self.scala_source, 'w')
f.write('class foo extends Job')
f.close()
os.environ['SCALDING_HOME'] = self.scalding_home
def tearDown(self):
shutil.rmtree(self.scalding_home)
@mock.patch('subprocess.check_call')
@mock.patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_scalding(self, check_call, track_job):
success = luigi.run(['MyScaldingTask', '--scala-source', self.scala_source, '--local-scheduler', '--no-lock'])
self.assertTrue(success)
# TODO: check more stuff
if __name__ == '__main__':
luigi.run()
|
apache-2.0
|
pytroll/satpy
|
satpy/readers/maia.py
|
1
|
5259
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Reader for NWPSAF AAPP MAIA Cloud product.
https://nwpsaf.eu/site/software/aapp/
Documentation reference:
[NWPSAF-MF-UD-003] DATA Formats
[NWPSAF-MF-UD-009] MAIA version 4 Scientific User Manual
"""
import logging
import h5py
import numpy as np
from xarray import DataArray
import dask.array as da
from satpy.readers.file_handlers import BaseFileHandler
from satpy import CHUNK_SIZE
logger = logging.getLogger(__name__)
class MAIAFileHandler(BaseFileHandler):
"""File handler for Maia files."""
def __init__(self, filename, filename_info, filetype_info):
"""Init the file handler."""
super(MAIAFileHandler, self).__init__(
filename, filename_info, filetype_info)
self.finfo = filename_info
# set the day date part for end_time from the file name
self.finfo['end_time'] = self.finfo['end_time'].replace(
year=self.finfo['start_time'].year,
month=self.finfo['start_time'].month,
day=self.finfo['start_time'].day)
if self.finfo['end_time'] < self.finfo['start_time']:
myday = self.finfo['end_time'].day
self.finfo['end_time'] = self.finfo['end_time'].replace(
day=myday + 1)
self.selected = None
self.read(self.filename)
def read(self, filename):
"""Read the file."""
self.h5 = h5py.File(filename, 'r')
missing = -9999.
self.Lat = da.from_array(self.h5[u'DATA/Latitude'], chunks=CHUNK_SIZE) / 10000.
self.Lon = da.from_array(self.h5[u'DATA/Longitude'], chunks=CHUNK_SIZE) / 10000.
self.selected = (self.Lon > missing)
self.file_content = {}
for key in self.h5['DATA'].keys():
self.file_content[key] = da.from_array(self.h5[u'DATA/' + key], chunks=CHUNK_SIZE)
for key in self.h5[u'HEADER'].keys():
self.file_content[key] = self.h5[u'HEADER/' + key][:]
# Cloud Mask on pixel
mask = 2**0 + 2**1 + 2**2
lst = self.file_content[u'CloudMask'] & mask
lst = lst / 2**0
self.file_content[u"cma"] = lst
# Cloud Mask confidence
mask = 2**5 + 2**6
lst = self.file_content[u'CloudMask'] & mask
lst = lst / 2**5
self.file_content[u"cma_conf"] = lst
# Cloud Mask Quality
mask = 2**3 + 2**4
lst = self.file_content[u'CloudMask'] & mask
lst = lst / 2**3
self.file_content[u'cma_qual'] = lst
# Opaque Cloud
mask = 2**21
lst = self.file_content[u'CloudMask'] & mask
lst = lst / 2**21
self.file_content[u'opaq_cloud'] = lst
# land /water Background
mask = 2**15 + 2**16 + 2**17
lst = self.file_content[u'CloudMask'] & mask
lst = lst / 2**15
self.file_content[u'land_water_background'] = lst
# CT (Actual CloudType)
mask = 2**4 + 2**5 + 2**6 + 2**7 + 2**8
classif = self.file_content[u'CloudType'] & mask
classif = classif / 2**4
self.file_content['ct'] = classif.astype(np.uint8)
def get_platform(self, platform):
"""Get the platform."""
if self.file_content['sat_id'] in (14,):
return "viirs"
else:
return "avhrr"
@property
def start_time(self):
"""Get the start time."""
return self.finfo['start_time']
@property
def end_time(self):
"""Get the end time."""
return self.finfo['end_time']
def get_dataset(self, key, info, out=None):
"""Get a dataset from the file."""
logger.debug("Reading %s.", key['name'])
values = self.file_content[key['name']]
selected = np.array(self.selected)
if key['name'] in ("Latitude", "Longitude"):
values = values / 10000.
if key['name'] in ('Tsurf', 'CloudTopPres', 'CloudTopTemp'):
goods = values > -9998.
selected = np.array(selected & goods)
if key['name'] in ('Tsurf', "Alt_surface", "CloudTopTemp"):
values = values / 100.
if key['name'] in ("CloudTopPres"):
values = values / 10.
else:
selected = self.selected
info.update(self.finfo)
fill_value = np.nan
if key['name'] == 'ct':
fill_value = 0
info['_FillValue'] = 0
ds = DataArray(values, dims=['y', 'x'], attrs=info).where(selected, fill_value)
# update dataset info with file_info
return ds
|
gpl-3.0
|
karthik-suresh/horizon
|
openstack_dashboard/test/tests/templates.py
|
79
|
3317
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import loader
from openstack_dashboard.test import helpers as test
class FakeUser(object):
username = "cool user"
class TemplateRenderTest(test.TestCase):
"""Tests for templates render."""
def test_openrc_html_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "ENG Perf R&D"}
out = loader.render_to_string(
'project/access_and_security/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertFalse("&" in out)
self.assertTrue("ENG Perf R&D" in out)
def test_openrc_html_evil_shell_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": 'o"; sudo rm -rf /'}
out = loader.render_to_string(
'project/access_and_security/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertFalse('o"' in out)
self.assertTrue('\"' in out)
def test_openrc_html_evil_shell_backslash_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": 'o\"; sudo rm -rf /'}
out = loader.render_to_string(
'project/access_and_security/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertFalse('o\"' in out)
self.assertFalse('o"' in out)
self.assertTrue('\\"' in out)
def test_openrc_set_region(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "Tenant",
"region": "Colorado"}
out = loader.render_to_string(
'project/access_and_security/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertTrue("OS_REGION_NAME=\"Colorado\"" in out)
def test_openrc_region_not_set(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "Tenant"}
out = loader.render_to_string(
'project/access_and_security/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertTrue("OS_REGION_NAME=\"\"" in out)
|
apache-2.0
|
mozilla/fjord
|
vendor/packages/urllib3/urllib3/poolmanager.py
|
550
|
8977
|
# urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
bsd-3-clause
|
heke123/chromium-crosswalk
|
tools/android/loading/request_track.py
|
3
|
29765
|
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The request data track.
When executed, parses a JSON dump of DevTools messages.
"""
import bisect
import collections
import copy
import datetime
import email.utils
import hashlib
import json
import logging
import re
import urlparse
import devtools_monitor
class Timing(object):
"""Collects the timing data for a request."""
_TIMING_NAMES = (
('connectEnd', 'connect_end'), ('connectStart', 'connect_start'),
('dnsEnd', 'dns_end'), ('dnsStart', 'dns_start'),
('proxyEnd', 'proxy_end'), ('proxyStart', 'proxy_start'),
('receiveHeadersEnd', 'receive_headers_end'),
('requestTime', 'request_time'), ('sendEnd', 'send_end'),
('sendStart', 'send_start'), ('sslEnd', 'ssl_end'),
('sslStart', 'ssl_start'), ('workerReady', 'worker_ready'),
('workerStart', 'worker_start'),
('loadingFinished', 'loading_finished'), ('pushStart', 'push_start'),
('pushEnd', 'push_end'))
_TIMING_NAMES_MAPPING = dict(_TIMING_NAMES)
__slots__ = tuple(x[1] for x in _TIMING_NAMES)
def __init__(self, **kwargs):
"""Constructor.
Initialize with keywords arguments from __slots__.
"""
for slot in self.__slots__:
setattr(self, slot, -1)
for (attr, value) in kwargs.items():
setattr(self, attr, value)
def __eq__(self, o):
return all(getattr(self, attr) == getattr(o, attr)
for attr in self.__slots__)
def __str__(self):
return str(self.ToJsonDict())
def LargestOffset(self):
"""Returns the largest offset in the available timings."""
return max(0, max(
getattr(self, attr) for attr in self.__slots__
if attr != 'request_time'))
def ToJsonDict(self):
return {attr: getattr(self, attr)
for attr in self.__slots__ if getattr(self, attr) != -1}
@classmethod
def FromJsonDict(cls, json_dict):
return cls(**json_dict)
@classmethod
def FromDevToolsDict(cls, json_dict):
"""Returns an instance of Timing from a dict, as passed by DevTools."""
timing_dict = {
cls._TIMING_NAMES_MAPPING[k]: v for (k, v) in json_dict.items()}
return cls(**timing_dict)
def ShortName(url):
"""Returns a shortened version of a URL."""
parsed = urlparse.urlparse(url)
path = parsed.path
hostname = parsed.hostname if parsed.hostname else '?.?.?'
if path != '' and path != '/':
last_path = parsed.path.split('/')[-1]
if len(last_path) < 10:
if len(path) < 10:
return hostname + '/' + path
else:
return hostname + '/..' + parsed.path[-10:]
else:
return hostname + '/..' + last_path[:5]
else:
return hostname
def IntervalBetween(first, second, reason):
"""Returns the start and end of the inteval between two requests, in ms.
This is defined as:
- [first.headers, second.start] if reason is 'parser'. This is to account
for incremental parsing.
- [first.end, second.start] if reason is 'script', 'redirect' or 'other'.
Args:
first: (Request) First request.
second: (Request) Second request.
reason: (str) Link between the two requests, in Request.INITIATORS.
Returns:
(start_msec (float), end_msec (float)),
"""
assert reason in Request.INITIATORS
second_ms = second.timing.request_time * 1000
if reason == 'parser':
first_offset_ms = first.timing.receive_headers_end
else:
first_offset_ms = first.timing.LargestOffset()
return (first.timing.request_time * 1000 + first_offset_ms, second_ms)
def TimeBetween(first, second, reason):
"""(end_msec - start_msec), with the values as returned by IntervalBetween().
"""
(first_ms, second_ms) = IntervalBetween(first, second, reason)
return second_ms - first_ms
def TimingAsList(timing):
"""Transform Timing to a list, eg as is used in JSON output.
Args:
timing: a Timing.
Returns:
A list identical to what the eventual JSON output will be (eg,
Request.ToJsonDict).
"""
return json.loads(json.dumps(timing))
class Request(object):
"""Represents a single request.
Generally speaking, fields here closely mirror those documented in
third_party/WebKit/Source/devtools/protocol.json.
Fields:
request_id: (str) unique request ID. Postfixed with _REDIRECT_SUFFIX for
redirects.
frame_id: (str) unique frame identifier.
loader_id: (str) unique frame identifier.
document_url: (str) URL of the document this request is loaded for.
url: (str) Request URL.
protocol: (str) protocol used for the request.
method: (str) HTTP method, such as POST or GET.
request_headers: (dict) {'header': 'value'} Request headers.
response_headers: (dict) {'header': 'value'} Response headers.
initial_priority: (str) Initial request priority, in REQUEST_PRIORITIES.
timestamp: (float) Request timestamp, in s.
wall_time: (float) Request timestamp, UTC timestamp in s.
initiator: (dict) Request initiator, in INITIATORS.
resource_type: (str) Resource type, in RESOURCE_TYPES
served_from_cache: (bool) Whether the request was served from cache.
from_disk_cache: (bool) Whether the request was served from the disk cache.
from_service_worker: (bool) Whether the request was served by a Service
Worker.
timing: (Timing) Request timing, extended with loading_finished.
status: (int) Response status code.
status_text: (str) Response status text received in the status line.
encoded_data_length: (int) Total encoded data length.
data_chunks: (list) [(offset, encoded_data_length), ...] List of data
chunks received, with their offset in ms relative to
Timing.requestTime.
failed: (bool) Whether the request failed.
start_msec: (float) Request start time, in milliseconds from chrome start.
end_msec: (float) Request end time, in milliseconds from chrome start.
start_msec.
"""
REQUEST_PRIORITIES = ('VeryLow', 'Low', 'Medium', 'High', 'VeryHigh')
RESOURCE_TYPES = ('Document', 'Stylesheet', 'Image', 'Media', 'Font',
'Script', 'TextTrack', 'XHR', 'Fetch', 'EventSource',
'WebSocket', 'Manifest', 'Other')
INITIATORS = ('parser', 'script', 'other', 'redirect')
INITIATING_REQUEST = 'initiating_request'
ORIGINAL_INITIATOR = 'original_initiator'
def __init__(self):
self.request_id = None
self.frame_id = None
self.loader_id = None
self.document_url = None
self.url = None
self.protocol = None
self.method = None
self.mime_type = None
self.request_headers = None
self.response_headers = None
self.initial_priority = None
self.timestamp = -1
self.wall_time = -1
self.initiator = None
self.resource_type = None
self.served_from_cache = False
self.from_disk_cache = False
self.from_service_worker = False
self.timing = None
self.status = None
self.status_text = None
self.encoded_data_length = 0
self.data_chunks = []
self.failed = False
@property
def start_msec(self):
return self.timing.request_time * 1000
@property
def end_msec(self):
if self.start_msec is None:
return None
return self.start_msec + self.timing.LargestOffset()
@property
def fingerprint(self):
h = hashlib.sha256()
h.update(self.url)
return h.hexdigest()[:10]
def _TimestampOffsetFromStartMs(self, timestamp):
assert self.timing.request_time != -1
request_time = self.timing.request_time
return (timestamp - request_time) * 1000
def ToJsonDict(self):
result = copy.deepcopy(self.__dict__)
result['timing'] = self.timing.ToJsonDict() if self.timing else {}
return result
@classmethod
def FromJsonDict(cls, data_dict):
result = Request()
for (k, v) in data_dict.items():
setattr(result, k, v)
if not result.response_headers:
result.response_headers = {}
if result.timing:
result.timing = Timing.FromJsonDict(result.timing)
else:
result.timing = Timing(request_time=result.timestamp)
return result
def GetHTTPResponseHeader(self, header_name):
"""Gets the value of a HTTP response header.
Does a case-insensitive search for the header name in the HTTP response
headers, in order to support servers that use a wrong capitalization.
"""
lower_case_name = header_name.lower()
result = None
for name, value in self.response_headers.iteritems():
if name.lower() == lower_case_name:
result = value
break
return result
def GetResponseHeaderValue(self, header, value):
"""Returns True iff the response headers |header| contains |value|."""
header_values = self.GetHTTPResponseHeader(header)
if not header_values:
return None
values = header_values.split(',')
for header_value in values:
if header_value.lower() == value.lower():
return header_value
return None
def HasResponseHeaderValue(self, header, value):
"""Returns True iff the response headers |header| contains |value|."""
return self.GetResponseHeaderValue(header, value) is not None
def GetContentType(self):
"""Returns the content type, or None."""
# Check for redirects. Use the "Location" header, because the HTTP status is
# not reliable.
if self.GetHTTPResponseHeader('Location') is not None:
return 'redirect'
# Check if the response is empty.
if (self.GetHTTPResponseHeader('Content-Length') == '0' or
self.status == 204):
return 'ping'
if self.mime_type:
return self.mime_type
content_type = self.GetHTTPResponseHeader('Content-Type')
if not content_type or ';' not in content_type:
return content_type
else:
return content_type[:content_type.index(';')]
def IsDataRequest(self):
return self.protocol == 'data'
def HasReceivedResponse(self):
return self.status is not None
def GetCacheControlDirective(self, directive_name):
"""Returns the value of a Cache-Control directive, or None."""
cache_control_str = self.GetHTTPResponseHeader('Cache-Control')
if cache_control_str is None:
return None
directives = [s.strip() for s in cache_control_str.split(',')]
for directive in directives:
parts = directive.split('=')
if len(parts) == 1:
continue
(name, value) = parts
if name == directive_name:
return value
return None
def MaxAge(self):
"""Returns the max-age of a resource, or -1."""
# TODO(lizeb): Handle the "Expires" header as well.
cache_control = {}
if not self.response_headers:
return -1
cache_control_str = self.GetHTTPResponseHeader('Cache-Control')
if cache_control_str is not None:
directives = [s.strip() for s in cache_control_str.split(',')]
for directive in directives:
parts = [s.strip() for s in directive.split('=')]
if len(parts) == 1:
cache_control[parts[0]] = True
else:
cache_control[parts[0]] = parts[1]
if (u'no-store' in cache_control
or u'no-cache' in cache_control
or len(cache_control) == 0):
return -1
max_age = self.GetCacheControlDirective('max-age')
if max_age:
return int(max_age)
return -1
def Cost(self):
"""Returns the cost of this request in ms, defined as time between
request_time and the latest timing event.
"""
# All fields in timing are millis relative to request_time.
return self.timing.LargestOffset()
def GetRawResponseHeaders(self):
"""Gets the request's raw response headers compatible with
net::HttpResponseHeaders's constructor.
"""
assert not self.IsDataRequest()
headers = '{} {} {}\x00'.format(
self.protocol.upper(), self.status, self.status_text)
for key in sorted(self.response_headers.keys()):
headers += '{}: {}\x00'.format(key, self.response_headers[key])
return headers
def __eq__(self, o):
return self.__dict__ == o.__dict__
def __hash__(self):
return hash(self.request_id)
def __str__(self):
return json.dumps(self.ToJsonDict(), sort_keys=True, indent=2)
class CachingPolicy(object):
"""Represents the caching policy at an arbitrary time for a cached response.
"""
FETCH = 'FETCH'
VALIDATION_NONE = 'VALIDATION_NONE'
VALIDATION_SYNC = 'VALIDATION_SYNC'
VALIDATION_ASYNC = 'VALIDATION_ASYNC'
POLICIES = (FETCH, VALIDATION_NONE, VALIDATION_SYNC, VALIDATION_ASYNC)
def __init__(self, request):
"""Constructor.
Args:
request: (Request)
"""
assert request.response_headers is not None
self.request = request
# This is incorrect, as the timestamp corresponds to when devtools is made
# aware of the request, not when it was sent. However, this is good enough
# for computing cache expiration, which doesn't need sub-second precision.
self._request_time = self.request.wall_time
# Used when the date is not available.
self._response_time = (
self._request_time + self.request.timing.receive_headers_end)
def HasValidators(self):
"""Returns wether the request has a validator."""
# Assuming HTTP 1.1+.
return (self.request.GetHTTPResponseHeader('Last-Modified')
or self.request.GetHTTPResponseHeader('Etag'))
def IsCacheable(self):
"""Returns whether the request could be stored in the cache."""
return not self.request.HasResponseHeaderValue('Cache-Control', 'no-store')
def PolicyAtDate(self, timestamp):
"""Returns the caching policy at an aribitrary timestamp.
Args:
timestamp: (float) Seconds since Epoch.
Returns:
A policy in POLICIES.
"""
# Note: the implementation is largely transcribed from
# net/http/http_response_headers.cc, itself following RFC 2616.
if not self.IsCacheable():
return self.FETCH
freshness = self._GetFreshnessLifetimes()
if freshness[0] == 0 and freshness[1] == 0:
return self.VALIDATION_SYNC
age = self._GetCurrentAge(timestamp)
if freshness[0] > age:
return self.VALIDATION_NONE
if freshness[1] > age:
return self.VALIDATION_ASYNC
return self.VALIDATION_SYNC
def _GetFreshnessLifetimes(self):
"""Returns [freshness, stale-while-revalidate freshness] in seconds."""
# This is adapted from GetFreshnessLifetimes() in
# //net/http/http_response_headers.cc (which follows the RFC).
r = self.request
result = [0, 0]
if (r.HasResponseHeaderValue('Cache-Control', 'no-cache')
or r.HasResponseHeaderValue('Cache-Control', 'no-store')
or r.HasResponseHeaderValue('Vary', '*')): # RFC 2616, 13.6.
return result
must_revalidate = r.HasResponseHeaderValue(
'Cache-Control', 'must-revalidate')
swr_header = r.GetCacheControlDirective('stale-while-revalidate')
if not must_revalidate and swr_header:
result[1] = int(swr_header)
max_age_header = r.GetCacheControlDirective('max-age')
if max_age_header:
result[0] = int(max_age_header)
return result
date = self._GetDateValue('Date') or self._response_time
expires = self._GetDateValue('Expires')
if expires:
result[0] = expires - date
return result
if self.request.status in (200, 203, 206) and not must_revalidate:
last_modified = self._GetDateValue('Last-Modified')
if last_modified and last_modified < date:
result[0] = (date - last_modified) / 10
return result
if self.request.status in (300, 301, 308, 410):
return [2**48, 0] # ~forever.
# No header -> not fresh.
return result
def _GetDateValue(self, name):
date_str = self.request.GetHTTPResponseHeader(name)
if not date_str:
return None
parsed_date = email.utils.parsedate_tz(date_str)
if parsed_date is None:
return None
return email.utils.mktime_tz(parsed_date)
def _GetCurrentAge(self, current_time):
# See GetCurrentAge() in //net/http/http_response_headers.cc.
r = self.request
date_value = self._GetDateValue('Date') or self._response_time
age_value = int(r.GetHTTPResponseHeader('Age') or '0')
apparent_age = max(0, self._response_time - date_value)
corrected_received_age = max(apparent_age, age_value)
response_delay = self._response_time - self._request_time
corrected_initial_age = corrected_received_age + response_delay
resident_time = current_time - self._response_time
current_age = corrected_initial_age + resident_time
return current_age
class RequestTrack(devtools_monitor.Track):
"""Aggregates request data."""
_REDIRECT_SUFFIX = '.redirect'
# Request status
_STATUS_SENT = 0
_STATUS_RESPONSE = 1
_STATUS_DATA = 2
_STATUS_FINISHED = 3
_STATUS_FAILED = 4
# Serialization KEYS
_EVENTS_KEY = 'events'
_METADATA_KEY = 'metadata'
_DUPLICATES_KEY = 'duplicates_count'
_INCONSISTENT_INITIATORS_KEY = 'inconsistent_initiators'
def __init__(self, connection):
super(RequestTrack, self).__init__(connection)
self._connection = connection
self._requests = []
self._requests_in_flight = {} # requestId -> (request, status)
self._completed_requests_by_id = {}
self._redirects_count_by_id = collections.defaultdict(int)
self._indexed = False
self._request_start_timestamps = None
self._request_end_timestamps = None
self._requests_by_start = None
self._requests_by_end = None
if connection: # Optional for testing.
for method in RequestTrack._METHOD_TO_HANDLER:
self._connection.RegisterListener(method, self)
# Enable asynchronous callstacks to get full javascript callstacks in
# initiators
self._connection.SetScopedState('Debugger.setAsyncCallStackDepth',
{'maxDepth': 4}, {'maxDepth': 0}, True)
# responseReceived message are sometimes duplicated. Records the message to
# detect this.
self._request_id_to_response_received = {}
self.duplicates_count = 0
self.inconsistent_initiators_count = 0
def Handle(self, method, msg):
assert method in RequestTrack._METHOD_TO_HANDLER
self._indexed = False
params = msg['params']
request_id = params['requestId']
RequestTrack._METHOD_TO_HANDLER[method](self, request_id, params)
def GetEvents(self):
if self._requests_in_flight:
logging.warning('Number of requests still in flight: %d.'
% len(self._requests_in_flight))
return self._requests
def GetFirstResourceRequest(self):
return self.GetEvents()[0]
def GetFirstRequestMillis(self):
"""Find the canonical start time for this track.
Returns:
The millisecond timestamp of the first request.
"""
assert self._requests, "No requests to analyze."
self._IndexRequests()
return self._request_start_timestamps[0]
def GetLastRequestMillis(self):
"""Find the canonical start time for this track.
Returns:
The millisecond timestamp of the first request.
"""
assert self._requests, "No requests to analyze."
self._IndexRequests()
return self._request_end_timestamps[-1]
def GetEventsStartingBetween(self, start_ms, end_ms):
"""Return events that started in a range.
Args:
start_ms: the start time to query, in milliseconds from the first request.
end_ms: the end time to query, in milliseconds from the first request.
Returns:
A list of requests whose start time is in [start_ms, end_ms].
"""
self._IndexRequests()
low = bisect.bisect_left(self._request_start_timestamps, start_ms)
high = bisect.bisect_right(self._request_start_timestamps, end_ms)
return self._requests_by_start[low:high]
def GetEventsEndingBetween(self, start_ms, end_ms):
"""Return events that ended in a range.
Args:
start_ms: the start time to query, in milliseconds from the first request.
end_ms: the end time to query, in milliseconds from the first request.
Returns:
A list of requests whose end time is in [start_ms, end_ms].
"""
self._IndexRequests()
low = bisect.bisect_left(self._request_end_timestamps, start_ms)
high = bisect.bisect_right(self._request_end_timestamps, end_ms)
return self._requests_by_end[low:high]
def ToJsonDict(self):
if self._requests_in_flight:
logging.warning('Requests in flight, will be ignored in the dump')
return {self._EVENTS_KEY: [
request.ToJsonDict() for request in self._requests],
self._METADATA_KEY: {
self._DUPLICATES_KEY: self.duplicates_count,
self._INCONSISTENT_INITIATORS_KEY:
self.inconsistent_initiators_count}}
@classmethod
def FromJsonDict(cls, json_dict):
assert cls._EVENTS_KEY in json_dict
assert cls._METADATA_KEY in json_dict
result = RequestTrack(None)
requests = [Request.FromJsonDict(request)
for request in json_dict[cls._EVENTS_KEY]]
result._requests = requests
metadata = json_dict[cls._METADATA_KEY]
result.duplicates_count = metadata.get(cls._DUPLICATES_KEY, 0)
result.inconsistent_initiators_count = metadata.get(
cls._INCONSISTENT_INITIATORS_KEY, 0)
return result
def _IndexRequests(self):
# TODO(mattcary): if we ever have requests without timing then we either
# need a default, or to make an index that only includes requests with
# timings.
if self._indexed:
return
valid_requests = [r for r in self._requests
if r.start_msec is not None]
self._requests_by_start = sorted(valid_requests,
key=lambda r: r.start_msec)
self._request_start_timestamps = [r.start_msec
for r in self._requests_by_start]
self._requests_by_end = sorted(valid_requests,
key=lambda r: r.end_msec)
self._request_end_timestamps = [r.end_msec
for r in self._requests_by_end]
self._indexed = True
def _RequestWillBeSent(self, request_id, params):
# Several "requestWillBeSent" events can be dispatched in a row in the case
# of redirects.
redirect_initiator = None
if request_id in self._requests_in_flight:
redirect_initiator = self._HandleRedirect(request_id, params)
assert (request_id not in self._requests_in_flight
and request_id not in self._completed_requests_by_id)
r = Request()
r.request_id = request_id
_CopyFromDictToObject(
params, r, (('frameId', 'frame_id'), ('loaderId', 'loader_id'),
('documentURL', 'document_url'),
('timestamp', 'timestamp'), ('wallTime', 'wall_time'),
('initiator', 'initiator')))
request = params['request']
_CopyFromDictToObject(
request, r, (('url', 'url'), ('method', 'method'),
('headers', 'headers'),
('initialPriority', 'initial_priority')))
r.resource_type = params.get('type', 'Other')
if redirect_initiator:
original_initiator = r.initiator
r.initiator = redirect_initiator
r.initiator[Request.ORIGINAL_INITIATOR] = original_initiator
initiating_request = self._completed_requests_by_id[
redirect_initiator[Request.INITIATING_REQUEST]]
initiating_initiator = initiating_request.initiator.get(
Request.ORIGINAL_INITIATOR, initiating_request.initiator)
if initiating_initiator != original_initiator:
self.inconsistent_initiators_count += 1
self._requests_in_flight[request_id] = (r, RequestTrack._STATUS_SENT)
def _HandleRedirect(self, request_id, params):
(r, status) = self._requests_in_flight[request_id]
assert status == RequestTrack._STATUS_SENT
# The second request contains timing information pertaining to the first
# one. Finalize the first request.
assert 'redirectResponse' in params
redirect_response = params['redirectResponse']
_CopyFromDictToObject(redirect_response, r,
(('headers', 'response_headers'),
('encodedDataLength', 'encoded_data_length'),
('fromDiskCache', 'from_disk_cache'),
('protocol', 'protocol'), ('status', 'status'),
('statusText', 'status_text')))
r.timing = Timing.FromDevToolsDict(redirect_response['timing'])
redirect_index = self._redirects_count_by_id[request_id]
self._redirects_count_by_id[request_id] += 1
r.request_id = '%s%s.%d' % (request_id, self._REDIRECT_SUFFIX,
redirect_index + 1)
initiator = {
'type': 'redirect', Request.INITIATING_REQUEST: r.request_id}
self._requests_in_flight[r.request_id] = (r, RequestTrack._STATUS_FINISHED)
del self._requests_in_flight[request_id]
self._FinalizeRequest(r.request_id)
return initiator
def _RequestServedFromCache(self, request_id, _):
assert request_id in self._requests_in_flight
(request, status) = self._requests_in_flight[request_id]
assert status == RequestTrack._STATUS_SENT
request.served_from_cache = True
def _ResponseReceived(self, request_id, params):
assert request_id in self._requests_in_flight
(r, status) = self._requests_in_flight[request_id]
if status == RequestTrack._STATUS_RESPONSE:
# Duplicated messages (apart from the timestamp) are OK.
old_params = self._request_id_to_response_received[request_id]
params_copy = copy.deepcopy(params)
params_copy['timestamp'] = None
old_params['timestamp'] = None
assert params_copy == old_params
self.duplicates_count += 1
return
assert status == RequestTrack._STATUS_SENT
assert r.frame_id == params['frameId']
assert r.timestamp <= params['timestamp']
if r.resource_type == 'Other':
r.resource_type = params.get('type', 'Other')
else:
assert r.resource_type == params.get('type', 'Other')
response = params['response']
_CopyFromDictToObject(
response, r, (('status', 'status'), ('mimeType', 'mime_type'),
('fromDiskCache', 'from_disk_cache'),
('fromServiceWorker', 'from_service_worker'),
('protocol', 'protocol'), ('statusText', 'status_text'),
# Actual request headers are not known before reaching the
# network stack.
('requestHeaders', 'request_headers'),
('headers', 'response_headers')))
timing_dict = {}
# data URLs don't have a timing dict, and timings for cached requests are
# stale.
# TODO(droger): the timestamp is inacurate, get the real timings instead.
if r.protocol in ('data', 'about') or r.served_from_cache:
timing_dict = {'requestTime': r.timestamp}
else:
timing_dict = response['timing']
r.timing = Timing.FromDevToolsDict(timing_dict)
self._requests_in_flight[request_id] = (r, RequestTrack._STATUS_RESPONSE)
self._request_id_to_response_received[request_id] = params
def _DataReceived(self, request_id, params):
(r, status) = self._requests_in_flight[request_id]
assert (status == RequestTrack._STATUS_RESPONSE
or status == RequestTrack._STATUS_DATA)
offset = r._TimestampOffsetFromStartMs(params['timestamp'])
r.data_chunks.append((offset, params['encodedDataLength']))
self._requests_in_flight[request_id] = (r, RequestTrack._STATUS_DATA)
def _LoadingFinished(self, request_id, params):
assert request_id in self._requests_in_flight
(r, status) = self._requests_in_flight[request_id]
assert (status == RequestTrack._STATUS_RESPONSE
or status == RequestTrack._STATUS_DATA)
r.encoded_data_length = params['encodedDataLength']
r.timing.loading_finished = r._TimestampOffsetFromStartMs(
params['timestamp'])
self._requests_in_flight[request_id] = (r, RequestTrack._STATUS_FINISHED)
self._FinalizeRequest(request_id)
def _LoadingFailed(self, request_id, _):
assert request_id in self._requests_in_flight
(r, _) = self._requests_in_flight[request_id]
r.failed = True
self._requests_in_flight[request_id] = (r, RequestTrack._STATUS_FINISHED)
self._FinalizeRequest(request_id)
def _FinalizeRequest(self, request_id):
assert request_id in self._requests_in_flight
(request, status) = self._requests_in_flight[request_id]
assert status == RequestTrack._STATUS_FINISHED
del self._requests_in_flight[request_id]
self._completed_requests_by_id[request_id] = request
self._requests.append(request)
def __eq__(self, o):
return self._requests == o._requests
RequestTrack._METHOD_TO_HANDLER = {
'Network.requestWillBeSent': RequestTrack._RequestWillBeSent,
'Network.requestServedFromCache': RequestTrack._RequestServedFromCache,
'Network.responseReceived': RequestTrack._ResponseReceived,
'Network.dataReceived': RequestTrack._DataReceived,
'Network.loadingFinished': RequestTrack._LoadingFinished,
'Network.loadingFailed': RequestTrack._LoadingFailed}
def _CopyFromDictToObject(d, o, key_attrs):
for (key, attr) in key_attrs:
if key in d:
setattr(o, attr, d[key])
if __name__ == '__main__':
import json
import sys
events = json.load(open(sys.argv[1], 'r'))
request_track = RequestTrack(None)
for event in events:
event_method = event['method']
request_track.Handle(event_method, event)
|
bsd-3-clause
|
munnerz/CouchPotatoServer
|
couchpotato/core/notifications/nmj.py
|
75
|
4379
|
import re
import telnetlib
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
autoload = 'NMJ'
class NMJ(Notification):
# noinspection PyMissingConstructor
def __init__(self):
addApiView(self.testNotifyName(), self.test)
addApiView('notify.nmj.auto_config', self.autoConfig)
addEvent('renamer.after', self.addToLibrary)
def autoConfig(self, host = 'localhost', **kwargs):
mount = ''
try:
terminal = telnetlib.Telnet(host)
except Exception:
log.error('Warning: unable to get a telnet session to %s', host)
return self.failed()
log.debug('Connected to %s via telnet', host)
terminal.read_until('sh-3.00# ')
terminal.write('cat /tmp/source\n')
terminal.write('cat /tmp/netshare\n')
terminal.write('exit\n')
tnoutput = terminal.read_all()
match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput)
if match:
database = match.group(1)
device = match.group(2)
log.info('Found NMJ database %s on device %s', (database, device))
else:
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host)
return self.failed()
if device.startswith('NETWORK_SHARE/'):
match = re.search('.*(?=\r\n?%s)' % (re.escape(device[14:])), tnoutput)
if match:
mount = match.group().replace('127.0.0.1', host)
log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount)
else:
log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url')
return self.failed()
return {
'success': True,
'database': database,
'mount': mount,
}
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
host = self.conf('host')
mount = self.conf('mount')
database = self.conf('database')
if mount:
log.debug('Try to mount network drive via url: %s', mount)
try:
self.urlopen(mount)
except:
return False
params = {
'arg0': 'scanner_start',
'arg1': database,
'arg2': 'background',
'arg3': '',
}
params = tryUrlencode(params)
update_url = 'http://%(host)s:8008/metadata_database?%(params)s' % {'host': host, 'params': params}
try:
response = self.urlopen(update_url)
except:
return False
try:
et = etree.fromstring(response)
result = et.findtext('returnValue')
except SyntaxError as e:
log.error('Unable to parse XML returned from the Popcorn Hour: %s', e)
return False
if int(result) > 0:
log.error('Popcorn Hour returned an errorcode: %s', result)
return False
else:
log.info('NMJ started background scan')
return True
def failed(self):
return {
'success': False
}
def test(self, **kwargs):
return {
'success': self.addToLibrary()
}
config = [{
'name': 'nmj',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'nmj',
'label': 'NMJ',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'host',
'default': 'localhost',
},
{
'name': 'database',
},
{
'name': 'mount',
},
],
}
],
}]
|
gpl-3.0
|
luiseduardohdbackup/odoo
|
addons/hr/res_config.py
|
377
|
3452
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_config_settings(osv.osv_memory):
_name = 'hr.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_hr_timesheet_sheet': fields.boolean('Allow timesheets validation by managers',
help ="""This installs the module hr_timesheet_sheet."""),
'module_hr_attendance': fields.boolean('Install attendances feature',
help ="""This installs the module hr_attendance."""),
'module_hr_timesheet': fields.boolean('Manage timesheets',
help ="""This installs the module hr_timesheet."""),
'module_hr_holidays': fields.boolean('Manage holidays, leaves and allocation requests',
help ="""This installs the module hr_holidays."""),
'module_hr_expense': fields.boolean('Manage employees expenses',
help ="""This installs the module hr_expense."""),
'module_hr_recruitment': fields.boolean('Manage the recruitment process',
help ="""This installs the module hr_recruitment."""),
'module_hr_contract': fields.boolean('Record contracts per employee',
help ="""This installs the module hr_contract."""),
'module_hr_evaluation': fields.boolean('Organize employees periodic evaluation',
help ="""This installs the module hr_evaluation."""),
'module_hr_gamification': fields.boolean('Drive engagement with challenges and badges',
help ="""This installs the module hr_gamification."""),
'module_account_analytic_analysis': fields.boolean('Allow invoicing based on timesheets (the sale application will be installed)',
help ="""This installs the module account_analytic_analysis, which will install sales management too."""),
'module_hr_payroll': fields.boolean('Manage payroll',
help ="""This installs the module hr_payroll."""),
}
def onchange_hr_timesheet(self, cr, uid, ids, timesheet, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if timesheet:
return {'value': {'module_hr_attendance': True}}
return {}
def onchange_hr_attendance(self, cr, uid, ids, attendance, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if not attendance:
return {'value': {'module_hr_timesheet': False}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
cschenck/blender_sim
|
fluid_sim_deps/blender-2.69/2.69/scripts/freestyle/style_modules/Functions0D.py
|
1
|
3995
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : Functions0D.py
# Authors : Fredo Durand, Stephane Grabli, Francois Sillion, Emmanuel Turquin
# Date : 30/06/2005
# Purpose : Functions (functors) to be used for 0D elements
from freestyle import Curvature2DAngleF0D, CurvePoint, ReadCompleteViewMapPixelF0D, \
ReadSteerableViewMapPixelF0D, UnaryFunction0DDouble, UnaryFunction0DMaterial, \
UnaryFunction0DVec2f
from freestyle import ContextFunctions as CF
import math
import mathutils
class CurveMaterialF0D(UnaryFunction0DMaterial):
# A replacement of the built-in MaterialF0D for stroke creation.
# MaterialF0D does not work with Curves and Strokes.
def __call__(self, inter):
cp = inter.object
assert(isinstance(cp, CurvePoint))
fe = cp.first_svertex.get_fedge(cp.second_svertex)
assert(fe is not None)
return fe.material if fe.is_smooth else fe.material_left
class pyInverseCurvature2DAngleF0D(UnaryFunction0DDouble):
def __call__(self, inter):
func = Curvature2DAngleF0D()
c = func(inter)
return (3.1415 - c)
class pyCurvilinearLengthF0D(UnaryFunction0DDouble):
def __call__(self, inter):
cp = inter.object
assert(isinstance(cp, CurvePoint))
return cp.t2d
## estimate anisotropy of density
class pyDensityAnisotropyF0D(UnaryFunction0DDouble):
def __init__(self,level):
UnaryFunction0DDouble.__init__(self)
self.IsoDensity = ReadCompleteViewMapPixelF0D(level)
self.d0Density = ReadSteerableViewMapPixelF0D(0, level)
self.d1Density = ReadSteerableViewMapPixelF0D(1, level)
self.d2Density = ReadSteerableViewMapPixelF0D(2, level)
self.d3Density = ReadSteerableViewMapPixelF0D(3, level)
def __call__(self, inter):
c_iso = self.IsoDensity(inter)
c_0 = self.d0Density(inter)
c_1 = self.d1Density(inter)
c_2 = self.d2Density(inter)
c_3 = self.d3Density(inter)
cMax = max(max(c_0,c_1), max(c_2,c_3))
cMin = min(min(c_0,c_1), min(c_2,c_3))
if c_iso == 0:
v = 0
else:
v = (cMax-cMin)/c_iso
return v
## Returns the gradient vector for a pixel
## l
## the level at which one wants to compute the gradient
class pyViewMapGradientVectorF0D(UnaryFunction0DVec2f):
def __init__(self, l):
UnaryFunction0DVec2f.__init__(self)
self._l = l
self._step = math.pow(2,self._l)
def __call__(self, iter):
p = iter.object.point_2d
gx = CF.read_complete_view_map_pixel(self._l, int(p.x+self._step), int(p.y)) - \
CF.read_complete_view_map_pixel(self._l, int(p.x), int(p.y))
gy = CF.read_complete_view_map_pixel(self._l, int(p.x), int(p.y+self._step)) - \
CF.read_complete_view_map_pixel(self._l, int(p.x), int(p.y))
return mathutils.Vector([gx, gy])
class pyViewMapGradientNormF0D(UnaryFunction0DDouble):
def __init__(self, l):
UnaryFunction0DDouble.__init__(self)
self._l = l
self._step = math.pow(2,self._l)
def __call__(self, iter):
p = iter.object.point_2d
gx = CF.read_complete_view_map_pixel(self._l, int(p.x+self._step), int(p.y)) - \
CF.read_complete_view_map_pixel(self._l, int(p.x), int(p.y))
gy = CF.read_complete_view_map_pixel(self._l, int(p.x), int(p.y+self._step)) - \
CF.read_complete_view_map_pixel(self._l, int(p.x), int(p.y))
grad = mathutils.Vector([gx, gy])
return grad.length
|
gpl-3.0
|
ademmers/ansible
|
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py
|
47
|
4042
|
#
# (c) 2018 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import json
from copy import deepcopy
from contextlib import contextmanager
try:
from lxml.etree import fromstring, tostring
except ImportError:
from xml.etree.ElementTree import fromstring, tostring
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import (
NetconfConnection,
)
IGNORE_XML_ATTRIBUTE = ()
def get_connection(module):
if hasattr(module, "_netconf_connection"):
return module._netconf_connection
capabilities = get_capabilities(module)
network_api = capabilities.get("network_api")
if network_api == "netconf":
module._netconf_connection = NetconfConnection(module._socket_path)
else:
module.fail_json(msg="Invalid connection type %s" % network_api)
return module._netconf_connection
def get_capabilities(module):
if hasattr(module, "_netconf_capabilities"):
return module._netconf_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._netconf_capabilities = json.loads(capabilities)
return module._netconf_capabilities
def lock_configuration(module, target=None):
conn = get_connection(module)
return conn.lock(target=target)
def unlock_configuration(module, target=None):
conn = get_connection(module)
return conn.unlock(target=target)
@contextmanager
def locked_config(module, target=None):
try:
lock_configuration(module, target=target)
yield
finally:
unlock_configuration(module, target=target)
def get_config(module, source, filter=None, lock=False):
conn = get_connection(module)
try:
locked = False
if lock:
conn.lock(target=source)
locked = True
response = conn.get_config(source=source, filter=filter)
except ConnectionError as e:
module.fail_json(
msg=to_text(e, errors="surrogate_then_replace").strip()
)
finally:
if locked:
conn.unlock(target=source)
return response
def get(module, filter, lock=False):
conn = get_connection(module)
try:
locked = False
if lock:
conn.lock(target="running")
locked = True
response = conn.get(filter=filter)
except ConnectionError as e:
module.fail_json(
msg=to_text(e, errors="surrogate_then_replace").strip()
)
finally:
if locked:
conn.unlock(target="running")
return response
def dispatch(module, request):
conn = get_connection(module)
try:
response = conn.dispatch(request)
except ConnectionError as e:
module.fail_json(
msg=to_text(e, errors="surrogate_then_replace").strip()
)
return response
def sanitize_xml(data):
tree = fromstring(
to_bytes(deepcopy(data), errors="surrogate_then_replace")
)
for element in tree.getiterator():
# remove attributes
attribute = element.attrib
if attribute:
for key in list(attribute):
if key not in IGNORE_XML_ATTRIBUTE:
attribute.pop(key)
return to_text(tostring(tree), errors="surrogate_then_replace").strip()
|
gpl-3.0
|
amondot/QGIS
|
python/plugins/processing/algs/lidar/lastools/lastilePro.py
|
9
|
3739
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
lastilePro.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterString
class lastilePro(LAStoolsAlgorithm):
TILE_SIZE = "TILE_SIZE"
BUFFER = "BUFFER"
EXTRA_PASS = "EXTRA_PASS"
BASE_NAME = "BASE_NAME"
def defineCharacteristics(self):
self.name = "lastilePro"
self.group = "LAStools Production"
self.addParametersPointInputFolderGUI()
self.addParametersFilesAreFlightlinesGUI()
self.addParametersApplyFileSourceIdGUI()
self.addParameter(ParameterNumber(lastilePro.TILE_SIZE,
self.tr("tile size (side length of square tile)"),
None, None, 1000.0))
self.addParameter(ParameterNumber(lastilePro.BUFFER,
self.tr("buffer around each tile (avoids edge artifacts)"),
None, None, 25.0))
self.addParameter(ParameterBoolean(lastilePro.EXTRA_PASS,
self.tr("more than 2000 tiles"), False))
self.addParametersOutputDirectoryGUI()
self.addParameter(ParameterString(lastilePro.BASE_NAME,
self.tr("tile base name (using sydney.laz creates sydney_274000_4714000.laz)")))
self.addParametersPointOutputFormatGUI()
self.addParametersAdditionalGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lastile")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
self.addParametersFilesAreFlightlinesCommands(commands)
self.addParametersApplyFileSourceIdCommands(commands)
tile_size = self.getParameterValue(lastilePro.TILE_SIZE)
commands.append("-tile_size")
commands.append(str(tile_size))
buffer = self.getParameterValue(lastilePro.BUFFER)
if buffer != 0.0:
commands.append("-buffer")
commands.append(str(buffer))
if self.getParameterValue(lastilePro.EXTRA_PASS):
commands.append("-extra_pass")
self.addParametersOutputDirectoryCommands(commands)
base_name = self.getParameterValue(lastilePro.BASE_NAME)
if base_name is not None:
commands.append("-o")
commands.append(base_name)
self.addParametersPointOutputFormatCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
gpl-2.0
|
jkleckner/ansible
|
lib/ansible/inventory/__init__.py
|
1
|
15950
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import fnmatch
import os
import re
import subprocess
import ansible.constants as C
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible import errors
from ansible import utils
class Inventory(object):
"""
Host inventory for ansible.
"""
__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
'_pattern_cache', '_vars_plugins', '_playbook_basedir']
def __init__(self, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._groups_list = {}
self._pattern_cache = {}
# to be set by calling set_playbook_basedir by ansible-playbook
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = []
# a list of host(names) to contain current inquiries to
self._restriction = None
self._also_restriction = None
self._subset = None
if isinstance(host_list, basestring):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
if host_list is None:
self.parser = None
elif isinstance(host_list, list):
self.parser = None
all = Group('all')
self.groups = [ all ]
ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
for x in host_list:
m = ipv6_re.match(x)
if m:
all.add_host(Host(m.groups()[0], m.groups()[1]))
else:
if ":" in x:
tokens = x.rsplit(":", 1)
# if there is ':' in the address, then this is a ipv6
if ':' in tokens[0]:
all.add_host(Host(x))
else:
all.add_host(Host(tokens[0], tokens[1]))
else:
all.add_host(Host(x))
elif os.path.exists(host_list):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(filename=host_list)
self.groups = self.parser.groups.values()
elif utils.is_executable(host_list):
self.parser = InventoryScript(filename=host_list)
self.groups = self.parser.groups.values()
else:
self.parser = InventoryParser(filename=host_list)
self.groups = self.parser.groups.values()
utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
def _match(self, str, pattern_str):
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
def get_hosts(self, pattern="all"):
"""
find all host names matching a pattern string, taking into account any inventory restrictions or
applied subsets.
"""
# process patterns
if isinstance(pattern, list):
pattern = ';'.join(pattern)
patterns = pattern.replace(";",":").split(":")
hosts = self._get_hosts(patterns)
# exclude hosts not in a subset, if defined
if self._subset:
subset = self._get_hosts(self._subset)
hosts = [ h for h in hosts if h in subset ]
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
hosts = [ h for h in hosts if h.name in self._restriction ]
if self._also_restriction is not None:
hosts = [ h for h in hosts if h.name in self._also_restriction ]
return hosts
def _get_hosts(self, patterns):
"""
finds hosts that match a list of patterns. Handles negative
matches as well as intersection matches.
"""
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
else:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
patterns = pattern_regular + pattern_intersection + pattern_exclude
hosts = []
for p in patterns:
that = self.__get_hosts(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
hosts.extend([ h for h in that if h not in hosts ])
return hosts
def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
"""
if pattern in self._pattern_cache:
return self._pattern_cache[pattern]
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
result = self._apply_ranges(pattern, hpat)
self._pattern_cache[pattern] = result
return result
def _enumeration_info(self, pattern):
"""
returns (pattern, limits) taking a regular pattern and finding out
which parts of it correspond to start/stop offsets. limits is
a tuple of (start, stop) or None
"""
if not "[" in pattern or pattern.startswith('~'):
return (pattern, None)
(first, rest) = pattern.split("[")
rest = rest.replace("]","")
try:
# support selectors like webservers[0]
x = int(rest)
return (first, (x,x))
except:
pass
if "-" in rest:
(left, right) = rest.split("-",1)
return (first, (left, right))
elif ":" in rest:
(left, right) = rest.split(":",1)
return (first, (left, right))
else:
return (first, (rest, rest))
def _apply_ranges(self, pat, hosts):
"""
given a pattern like foo, that matches hosts, return all of hosts
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
"""
(loose_pattern, limits) = self._enumeration_info(pat)
if not limits:
return hosts
(left, right) = limits
if left == '':
left = 0
if right == '':
right = 0
left=int(left)
right=int(right)
if left != right:
return hosts[left:right]
else:
return [ hosts[left] ]
def _hosts_in_unenumerated_pattern(self, pattern):
""" Get all host names matching the pattern """
hosts = []
hostnames = set()
# ignore any negative checks here, this is handled elsewhere
pattern = pattern.replace("!","").replace("&", "")
results = []
groups = self.get_groups()
for group in groups:
for host in group.get_hosts():
if pattern == 'all' or self._match(group.name, pattern) or self._match(host.name, pattern):
if host not in results and host.name not in hostnames:
results.append(host)
hostnames.add(host.name)
return results
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
self._pattern_cache = {}
def groups_for_host(self, host):
results = []
groups = self.get_groups()
for group in groups:
for hostn in group.get_hosts():
if host == hostn.name:
results.append(group)
continue
return results
def groups_list(self):
if not self._groups_list:
groups = {}
for g in self.groups:
groups[g.name] = [h.name for h in g.get_hosts()]
ancestors = g.get_ancestors()
for a in ancestors:
if a.name not in groups:
groups[a.name] = [h.name for h in a.get_hosts()]
self._groups_list = groups
return self._groups_list
def get_groups(self):
return self.groups
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
return self._hosts_cache[hostname]
def _get_host(self, hostname):
if hostname in ['localhost','127.0.0.1']:
for host in self.get_group('all').get_hosts():
if host.name in ['localhost', '127.0.0.1']:
return host
else:
for group in self.groups:
for host in group.get_hosts():
if hostname == host.name:
return host
return None
def get_group(self, groupname):
for group in self.groups:
if group.name == groupname:
return group
return None
def get_group_variables(self, groupname):
if groupname not in self._vars_per_group:
self._vars_per_group[groupname] = self._get_group_variables(groupname)
return self._vars_per_group[groupname]
def _get_group_variables(self, groupname):
group = self.get_group(groupname)
if group is None:
raise Exception("group not found: %s" % groupname)
return group.get_variables()
def get_variables(self, hostname):
if hostname not in self._vars_per_host:
self._vars_per_host[hostname] = self._get_variables(hostname)
return self._vars_per_host[hostname]
def _get_variables(self, hostname):
host = self.get_host(hostname)
if host is None:
raise errors.AnsibleError("host not found: %s" % hostname)
vars = {}
vars_results = [ plugin.run(host) for plugin in self._vars_plugins ]
for updated in vars_results:
if updated is not None:
vars.update(updated)
vars.update(host.get_variables())
if self.parser is not None:
vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
return vars
def add_group(self, group):
self.groups.append(group)
self._groups_list = None # invalidate internal cache
def list_hosts(self, pattern="all"):
return [ h.name for h in self.get_hosts(pattern) ]
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
# TODO: remove this function
def get_restriction(self):
return self._restriction
def restrict_to(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other
reasons.
"""
if not isinstance(restriction, list):
restriction = [ restriction ]
self._restriction = restriction
def also_restrict_to(self, restriction):
"""
Works like restict_to but offers an additional restriction. Playbooks use this
to implement serial behavior.
"""
if not isinstance(restriction, list):
restriction = [ restriction ]
self._also_restriction = restriction
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_pattern = subset_pattern.replace(',',':')
subset_pattern = subset_pattern.replace(";",":").split(":")
results = []
# allow Unix style @filename data
for x in subset_pattern:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def lift_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def lift_also_restriction(self):
""" Clears the also restriction """
self._also_restriction = None
def is_file(self):
""" did inventory come from a file? """
if not isinstance(self.host_list, basestring):
return False
return os.path.exists(self.host_list)
def basedir(self):
""" if inventory came from a file, what's the directory? """
if not self.is_file():
return None
dname = os.path.dirname(self.host_list)
if dname is None or dname == '' or dname == '.':
cwd = os.getcwd()
return os.path.abspath(cwd)
return os.path.abspath(dname)
def src(self):
""" if inventory came from a file, what's the directory and file name? """
if not self.is_file():
return None
return self.host_list
def playbook_basedir(self):
""" returns the directory of the current playbook """
return self._playbook_basedir
def set_playbook_basedir(self, dir):
"""
sets the base directory of the playbook so inventory plugins can use it to find
variable files and other things.
"""
self._playbook_basedir = dir
|
gpl-3.0
|
longmen21/edx-platform
|
common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py
|
81
|
6098
|
"""
Unit tests for stub XQueue implementation.
"""
import mock
import unittest
import json
import requests
from ..xqueue import StubXQueueService
class FakeTimer(object):
"""
Fake timer implementation that executes immediately.
"""
def __init__(self, delay, func):
self.func = func
def start(self):
self.func()
class StubXQueueServiceTest(unittest.TestCase):
def setUp(self):
super(StubXQueueServiceTest, self).setUp()
self.server = StubXQueueService()
self.url = "http://127.0.0.1:{0}/xqueue/submit".format(self.server.port)
self.addCleanup(self.server.shutdown)
# Patch the timer async calls
patcher = mock.patch('terrain.stubs.xqueue.post')
self.post = patcher.start()
self.addCleanup(patcher.stop)
# Patch POST requests
patcher = mock.patch('terrain.stubs.xqueue.Timer')
timer = patcher.start()
timer.side_effect = FakeTimer
self.addCleanup(patcher.stop)
def test_grade_request(self):
# Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
# Check the response we receive
# (Should be the default grading response)
expected_body = json.dumps({'correct': True, 'score': 1, 'msg': '<div></div>'})
self._check_grade_response(callback_url, expected_header, expected_body)
def test_configure_default_response(self):
# Configure the default response for submissions to any queue
response_content = {'test_response': 'test_content'}
self.server.config['default'] = response_content
# Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
# Check the response we receive
# (Should be the default grading response)
self._check_grade_response(callback_url, expected_header, json.dumps(response_content))
def test_configure_specific_response(self):
# Configure the XQueue stub response to any submission to the test queue
response_content = {'test_response': 'test_content'}
self.server.config['This is only a test.'] = response_content
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'This is only a test.'})
)
# Check that we receive the response we configured
self._check_grade_response(callback_url, expected_header, json.dumps(response_content))
def test_multiple_response_matches(self):
# Configure the XQueue stub with two responses that
# match the same submission
self.server.config['test_1'] = {'response': True}
self.server.config['test_2'] = {'response': False}
with mock.patch('terrain.stubs.http.LOGGER') as logger:
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'test_1 and test_2'})
)
# Expect that we do NOT receive a response
# and that an error message is logged
self.assertFalse(self.post.called)
self.assertTrue(logger.error.called)
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
"""
Post a submission to the stub XQueue implementation.
`callback_url` is the URL at which we expect to receive a grade response
`lms_key` is the authentication key sent in the header
`queue_name` is the name of the queue in which to send put the submission
`xqueue_body` is the content of the submission
Returns the header (a string) we send with the submission, which can
be used to validate the response we receive from the stub.
"""
# Post a submission to the XQueue stub
grade_request = {
'xqueue_header': json.dumps({
'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'
}),
'xqueue_body': xqueue_body
}
resp = requests.post(self.url, data=grade_request)
# Expect that the response is success
self.assertEqual(resp.status_code, 200)
# Return back the header, so we can authenticate the response we receive
return grade_request['xqueue_header']
def _check_grade_response(self, callback_url, expected_header, expected_body):
"""
Verify that the stub sent a POST request back to us
with the expected data.
`callback_url` is the URL we expect the stub to POST to
`expected_header` is the header (a string) we expect to receive with the grade.
`expected_body` is the content (a string) we expect to receive with the grade.
Raises an `AssertionError` if the check fails.
"""
# Check the response posted back to us
# This is the default response
expected_callback_dict = {
'xqueue_header': expected_header,
'xqueue_body': expected_body,
}
# Check that the POST request was made with the correct params
self.post.assert_called_with(callback_url, data=expected_callback_dict)
|
agpl-3.0
|
Rover-Yu/ali_kernel
|
tools/perf/scripts/python/syscall-counts-by-pid.py
|
34
|
1923
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
SBTMLab/CyworldClubPictureCrawler
|
cyworldpicture.py
|
1
|
1268
|
#-*- coding: utf-8 -*-
import requests
import json
club_id = ##Set Club ID
mainurl = "http://club.cyworld.com/club/board/PhotoViewer/index.asp?club_id=%d"%club_id
header = {
"Content-Type" :"application/x-www-form-urlencoded; charset=utf-8",
"charset" : "utf=8",
"Referer" : "http://club.cyworld.com/club/board/PhotoViewer/index.asp?club_id=%d"%club_id
}
session = requests.Session()
session.get("http://club.cyworld.com/club/board/PhotoViewer/index.asp?club_id=%d"%club_id)
def download_file(url,local_filename):
r = session.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return local_filename
lastseq = ""
while 1:
r = session.get("http://club.cyworld.com/CLUB/Board/PhotoViewer/GetPhotoListByDateJson.asp?lastseq="+lastseq+"&imgcount=30", headers =header)
data = json.loads(r.text)
if "msg" in data:
break
from collections import OrderedDict
od = OrderedDict(sorted(data.items(),reverse=True))
for d in od:
for img in od[d]["items"]:
print (download_file(img["photoUrl"],img["writeDate"] +u"_" + img["title"].replace("/","-") +u"_" +str(img["itemSeq"]) +".jpg"))
lastseq = str(img["itemSeq"])
|
mit
|
mancoast/CPythonPyc_test
|
crash/270_test_mhlib.py
|
3
|
11147
|
"""
Tests for the mhlib module
Nick Mathewson
"""
### BUG: This suite doesn't currently test the mime functionality of
### mhlib. It should.
import unittest
from test.test_support import run_unittest, TESTFN, import_module
import os, StringIO
import sys
mhlib = import_module('mhlib', deprecated=True)
if (sys.platform.startswith("win") or sys.platform=="riscos" or
sys.platform.startswith("atheos")):
# mhlib.updateline() renames a file to the name of a file that already
# exists. That causes a reasonable OS <wink> to complain in test_sequence
# here, like the "OSError: [Errno 17] File exists" raised on Windows.
# mhlib's listsubfolders() and listallfolders() do something with
# link counts, and that causes test_listfolders() here to get back
# an empty list from its call of listallfolders().
# The other tests here pass on Windows.
raise unittest.SkipTest("skipped on %s -- " % sys.platform +
"too many Unix assumptions")
_mhroot = TESTFN+"_MH"
_mhpath = os.path.join(_mhroot, "MH")
_mhprofile = os.path.join(_mhroot, ".mh_profile")
def normF(f):
return os.path.join(*f.split('/'))
def writeFile(fname, contents):
dir = os.path.split(fname)[0]
if dir and not os.path.exists(dir):
mkdirs(dir)
f = open(fname, 'w')
f.write(contents)
f.close()
def readFile(fname):
f = open(fname)
r = f.read()
f.close()
return r
def writeProfile(dict):
contents = [ "%s: %s\n" % (k, v) for k, v in dict.iteritems() ]
writeFile(_mhprofile, "".join(contents))
def writeContext(folder):
folder = normF(folder)
writeFile(os.path.join(_mhpath, "context"),
"Current-Folder: %s\n" % folder)
def writeCurMessage(folder, cur):
folder = normF(folder)
writeFile(os.path.join(_mhpath, folder, ".mh_sequences"),
"cur: %s\n"%cur)
def writeMessage(folder, n, headers, body):
folder = normF(folder)
headers = "".join([ "%s: %s\n" % (k, v) for k, v in headers.iteritems() ])
contents = "%s\n%s\n" % (headers,body)
mkdirs(os.path.join(_mhpath, folder))
writeFile(os.path.join(_mhpath, folder, str(n)), contents)
def getMH():
return mhlib.MH(os.path.abspath(_mhpath), _mhprofile)
def sortLines(s):
lines = s.split("\n")
lines = [ line.strip() for line in lines if len(line) >= 2 ]
lines.sort()
return lines
# These next 2 functions are copied from test_glob.py.
def mkdirs(fname):
if os.path.exists(fname) or fname == '':
return
base, file = os.path.split(fname)
mkdirs(base)
os.mkdir(fname)
def deltree(fname):
if not os.path.exists(fname):
return
for f in os.listdir(fname):
fullname = os.path.join(fname, f)
if os.path.isdir(fullname):
deltree(fullname)
else:
try:
os.unlink(fullname)
except:
pass
try:
os.rmdir(fname)
except:
pass
class MhlibTests(unittest.TestCase):
def setUp(self):
deltree(_mhroot)
mkdirs(_mhpath)
writeProfile({'Path' : os.path.abspath(_mhpath),
'Editor': 'emacs',
'ignored-attribute': 'camping holiday'})
# Note: These headers aren't really conformant to RFC822, but
# mhlib shouldn't care about that.
# An inbox with a couple of messages.
writeMessage('inbox', 1,
{'From': 'Mrs. Premise',
'To': 'Mrs. Conclusion',
'Date': '18 July 2001'}, "Hullo, Mrs. Conclusion!\n")
writeMessage('inbox', 2,
{'From': 'Mrs. Conclusion',
'To': 'Mrs. Premise',
'Date': '29 July 2001'}, "Hullo, Mrs. Premise!\n")
# A folder with many messages
for i in range(5, 101)+range(101, 201, 2):
writeMessage('wide', i,
{'From': 'nowhere', 'Subject': 'message #%s' % i},
"This is message number %s\n" % i)
# A deeply nested folder
def deep(folder, n):
writeMessage(folder, n,
{'Subject': 'Message %s/%s' % (folder, n) },
"This is message number %s in %s\n" % (n, folder) )
deep('deep/f1', 1)
deep('deep/f1', 2)
deep('deep/f1', 3)
deep('deep/f2', 4)
deep('deep/f2', 6)
deep('deep', 3)
deep('deep/f2/f3', 1)
deep('deep/f2/f3', 2)
def tearDown(self):
deltree(_mhroot)
def test_basic(self):
writeContext('inbox')
writeCurMessage('inbox', 2)
mh = getMH()
eq = self.assertEquals
eq(mh.getprofile('Editor'), 'emacs')
eq(mh.getprofile('not-set'), None)
eq(mh.getpath(), os.path.abspath(_mhpath))
eq(mh.getcontext(), 'inbox')
mh.setcontext('wide')
eq(mh.getcontext(), 'wide')
eq(readFile(os.path.join(_mhpath, 'context')),
"Current-Folder: wide\n")
mh.setcontext('inbox')
inbox = mh.openfolder('inbox')
eq(inbox.getfullname(),
os.path.join(os.path.abspath(_mhpath), 'inbox'))
eq(inbox.getsequencesfilename(),
os.path.join(os.path.abspath(_mhpath), 'inbox', '.mh_sequences'))
eq(inbox.getmessagefilename(1),
os.path.join(os.path.abspath(_mhpath), 'inbox', '1'))
def test_listfolders(self):
mh = getMH()
eq = self.assertEquals
folders = mh.listfolders()
folders.sort()
eq(folders, ['deep', 'inbox', 'wide'])
folders = mh.listallfolders()
folders.sort()
tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
'inbox', 'wide'])
tfolders.sort()
eq(folders, tfolders)
folders = mh.listsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2']))
folders = mh.listallsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
eq(mh.listsubfolders('inbox'), [])
eq(mh.listallsubfolders('inbox'), [])
def test_sequence(self):
mh = getMH()
eq = self.assertEquals
writeCurMessage('wide', 55)
f = mh.openfolder('wide')
all = f.listmessages()
eq(all, range(5, 101)+range(101, 201, 2))
eq(f.getcurrent(), 55)
f.setcurrent(99)
eq(readFile(os.path.join(_mhpath, 'wide', '.mh_sequences')),
'cur: 99\n')
def seqeq(seq, val):
eq(f.parsesequence(seq), val)
seqeq('5-55', range(5, 56))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('10:10', range(10, 20))
seqeq('10:+10', range(10, 20))
seqeq('101:10', range(101, 121, 2))
seqeq('cur', [99])
seqeq('.', [99])
seqeq('prev', [98])
seqeq('next', [100])
seqeq('cur:-3', [97, 98, 99])
seqeq('first-cur', range(5, 100))
seqeq('150-last', range(151, 201, 2))
seqeq('prev-next', [98, 99, 100])
lowprimes = [5, 7, 11, 13, 17, 19, 23, 29]
lowcompos = [x for x in range(5, 31) if not x in lowprimes ]
f.putsequences({'cur': [5],
'lowprime': lowprimes,
'lowcompos': lowcompos})
seqs = readFile(os.path.join(_mhpath, 'wide', '.mh_sequences'))
seqs = sortLines(seqs)
eq(seqs, ["cur: 5",
"lowcompos: 6 8-10 12 14-16 18 20-22 24-28 30",
"lowprime: 5 7 11 13 17 19 23 29"])
seqeq('lowprime', lowprimes)
seqeq('lowprime:1', [5])
seqeq('lowprime:2', [5, 7])
seqeq('lowprime:-2', [23, 29])
## Not supported
#seqeq('lowprime:first', [5])
#seqeq('lowprime:last', [29])
#seqeq('lowprime:prev', [29])
#seqeq('lowprime:next', [29])
def test_modify(self):
mh = getMH()
eq = self.assertEquals
mh.makefolder("dummy1")
self.assertIn("dummy1", mh.listfolders())
path = os.path.join(_mhpath, "dummy1")
self.assertTrue(os.path.exists(path))
f = mh.openfolder('dummy1')
def create(n):
msg = "From: foo\nSubject: %s\n\nDummy Message %s\n" % (n,n)
f.createmessage(n, StringIO.StringIO(msg))
create(7)
create(8)
create(9)
eq(readFile(f.getmessagefilename(9)),
"From: foo\nSubject: 9\n\nDummy Message 9\n")
eq(f.listmessages(), [7, 8, 9])
files = os.listdir(path)
files.sort()
eq(files, ['7', '8', '9'])
f.removemessages(['7', '8'])
files = os.listdir(path)
files.sort()
eq(files, [',7', ',8', '9'])
eq(f.listmessages(), [9])
create(10)
create(11)
create(12)
mh.makefolder("dummy2")
f2 = mh.openfolder("dummy2")
eq(f2.listmessages(), [])
f.movemessage(10, f2, 3)
f.movemessage(11, f2, 5)
eq(f.listmessages(), [9, 12])
eq(f2.listmessages(), [3, 5])
eq(readFile(f2.getmessagefilename(3)),
"From: foo\nSubject: 10\n\nDummy Message 10\n")
f.copymessage(9, f2, 4)
eq(f.listmessages(), [9, 12])
eq(readFile(f2.getmessagefilename(4)),
"From: foo\nSubject: 9\n\nDummy Message 9\n")
f.refilemessages([9, 12], f2)
eq(f.listmessages(), [])
eq(f2.listmessages(), [3, 4, 5, 6, 7])
eq(readFile(f2.getmessagefilename(7)),
"From: foo\nSubject: 12\n\nDummy Message 12\n")
# XXX This should check that _copysequences does the right thing.
mh.deletefolder('dummy1')
mh.deletefolder('dummy2')
self.assertNotIn('dummy1', mh.listfolders())
self.assertTrue(not os.path.exists(path))
def test_read(self):
mh = getMH()
eq = self.assertEquals
f = mh.openfolder('inbox')
msg = f.openmessage(1)
# Check some basic stuff from rfc822
eq(msg.getheader('From'), "Mrs. Premise")
eq(msg.getheader('To'), "Mrs. Conclusion")
# Okay, we have the right message. Let's check the stuff from
# mhlib.
lines = sortLines(msg.getheadertext())
eq(lines, ["Date: 18 July 2001",
"From: Mrs. Premise",
"To: Mrs. Conclusion"])
lines = sortLines(msg.getheadertext(lambda h: len(h)==4))
eq(lines, ["Date: 18 July 2001",
"From: Mrs. Premise"])
eq(msg.getbodytext(), "Hullo, Mrs. Conclusion!\n\n")
eq(msg.getbodytext(0), "Hullo, Mrs. Conclusion!\n\n")
# XXXX there should be a better way to reclaim the file handle
msg.fp.close()
del msg
def test_main():
run_unittest(MhlibTests)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
quheng/scikit-learn
|
sklearn/neighbors/base.py
|
71
|
31147
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
bsd-3-clause
|
Gateworks/platform-external-chromium_org
|
tools/telemetry/telemetry/page/actions/play.py
|
24
|
1937
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Telemetry page_action that performs the "play" action on media elements.
Media elements can be specified by a selector attribute. If no selector is
defined then then the action attempts to play the first video element or audio
element on the page. A selector can also be 'all' to play all media elements.
Other attributes to use are: wait_for_playing and wait_for_ended, which forces
the action to wait until playing and ended events get fired respectively.
"""
from telemetry.core import exceptions
from telemetry.page.actions import media_action
from telemetry.page.actions import page_action
class PlayAction(media_action.MediaAction):
def __init__(self, attributes=None):
super(PlayAction, self).__init__(attributes)
def WillRunAction(self, page, tab):
"""Load the media metrics JS code prior to running the action."""
super(PlayAction, self).WillRunAction(page, tab)
self.LoadJS(tab, 'play.js')
def RunAction(self, page, tab, previous_action):
try:
selector = self.selector if hasattr(self, 'selector') else ''
tab.ExecuteJavaScript('window.__playMedia("%s");' % selector)
timeout = self.wait_timeout if hasattr(self, 'wait_timeout') else 60
# Check if we need to wait for 'playing' event to fire.
if hasattr(self, 'wait_for_playing') and self.wait_for_playing:
self.WaitForEvent(tab, selector, 'playing', timeout)
# Check if we need to wait for 'ended' event to fire.
if hasattr(self, 'wait_for_ended') and self.wait_for_ended:
self.WaitForEvent(tab, selector, 'ended', timeout)
except exceptions.EvaluateException:
raise page_action.PageActionFailed('Cannot play media element(s) with '
'selector = %s.' % selector)
|
bsd-3-clause
|
sernst/cauldron
|
cauldron/cli/commands/listing/__init__.py
|
1
|
1967
|
import typing
from argparse import ArgumentParser
from cauldron import cli
from cauldron import environ
from cauldron.cli.commands.listing import _lister
from cauldron.cli.commands.listing import _remover
from cauldron.cli.commands.listing import discovery
from cauldron.cli.interaction import autocompletion
NAME = 'list'
DESCRIPTION = (
"""
Displays known Cauldron projects on the local system.
"""
)
def populate(
parser: ArgumentParser,
raw_args: typing.List[str],
assigned_args: dict
):
"""
Populates the commend execution argument parser with the arguments
for the command.
:param parser:
ArgumentParser created for the invocation of this command.
:param raw_args:
Raw arguments list parsed from the command line input.
:param assigned_args:
A dictionary of arguments that can be assigned separately from
the ArugmentParser. These can be useful for complex command
situations that a standard ArgumentParser is not adept at
handling.
"""
subs = parser.add_subparsers(dest='action')
subs.add_parser('all')
subs.add_parser('recent')
remover = subs.add_parser('erase')
remover.add_argument('identifier', nargs='?')
remover.add_argument('-y', '--yes', action='store_true')
def execute(
context: cli.CommandContext,
action: str = 'list',
**kwargs
) -> environ.Response:
"""..."""
environ.configs.load()
if action in ['erase']:
return _remover.execute_removal(context, kwargs)
elif action in ['all']:
return discovery.echo_known_projects(context.response)
return _lister.execute_list(context)
def autocomplete(segment: str, line: str, parts: typing.List[str]):
"""..."""
if len(parts) < 2:
return autocompletion.matches(
segment,
parts[-1] if parts else '',
['erase', 'all', 'recent']
)
return []
|
mit
|
Manojkumar91/odoo_inresto
|
addons/account/report/account_invoice_report.py
|
7
|
12408
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import openerp.addons.decimal_precision as dp
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_name = "account.invoice.report"
_description = "Invoices Statistics"
_auto = False
_rec_name = 'date'
def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):
"""Compute the amounts in the currency of the user
"""
if context is None:
context={}
currency_obj = self.pool.get('res.currency')
currency_rate_obj = self.pool.get('res.currency.rate')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
user_currency_id = user.company_id.currency_id.id
currency_rate_id = currency_rate_obj.search(
cr, uid, [
('rate', '=', 1),
'|',
('currency_id.company_id', '=', user.company_id.id),
('currency_id.company_id', '=', False)
], limit=1, context=context)[0]
base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id
res = {}
ctx = context.copy()
for item in self.browse(cr, uid, ids, context=context):
ctx['date'] = item.date
price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)
price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)
residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)
res[item.id] = {
'user_currency_price_total': price_total,
'user_currency_price_average': price_average,
'user_currency_residual': residual,
}
return res
_columns = {
'date': fields.date('Date', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True),
'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'categ_id': fields.many2one('product.category','Product Category', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'commercial_partner_id': fields.many2one('res.partner', 'Partner Company', help="Commercial Entity"),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'currency_rate': fields.float('Currency Rate', readonly=True),
'nbr': fields.integer('# of Invoices', readonly=True), # TDE FIXME master: rename into nbr_lines
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
], 'Invoice Status', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True),
'residual': fields.float('Total Residual', readonly=True),
'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'country_id': fields.many2one('res.country', 'Country of the Partner Company'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
}
_order = 'date desc'
_depends = {
'account.invoice': [
'account_id', 'amount_total', 'commercial_partner_id', 'company_id',
'currency_id', 'date_due', 'date_invoice', 'fiscal_position',
'journal_id', 'partner_bank_id', 'partner_id', 'payment_term',
'period_id', 'residual', 'state', 'type', 'user_id',
],
'account.invoice.line': [
'account_id', 'invoice_id', 'price_subtotal', 'product_id',
'quantity', 'uos_id', 'account_analytic_id',
],
'product.product': ['product_tmpl_id'],
'product.template': ['categ_id'],
'product.uom': ['category_id', 'factor', 'name', 'uom_type'],
'res.currency.rate': ['currency_id', 'name'],
'res.partner': ['country_id'],
}
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.product_id, sub.partner_id, sub.country_id, sub.account_analytic_id,
sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average,
cr.rate as currency_rate, sub.residual / cr.rate as residual, sub.commercial_partner_id as commercial_partner_id
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
ail.product_id, ai.partner_id, ai.payment_term, ai.period_id, ail.account_analytic_id,
u2.name AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor * u2.factor
ELSE ail.quantity / u.factor * u2.factor
END) AS product_qty,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ail.price_subtotal
ELSE ail.price_subtotal
END) AS price_total,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM(- ail.price_subtotal)
ELSE SUM(ail.price_subtotal)
END / CASE
WHEN SUM(ail.quantity / u.factor * u2.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor * u2.factor)
ELSE SUM(ail.quantity / u.factor * u2.factor)
END
ELSE 1::numeric
END AS price_average,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ai.residual
ELSE ai.residual
END / (SELECT count(*) FROM account_invoice_line l where invoice_id = ai.id) *
count(*) AS residual,
ai.commercial_partner_id as commercial_partner_id,
partner.country_id
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
JOIN res_partner partner ON ai.commercial_partner_id = partner.id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uos_id
LEFT JOIN product_uom u2 ON u2.id = pt.uom_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ail.account_analytic_id, ai.date_invoice, ai.id,
ai.partner_id, ai.payment_term, ai.period_id, u2.name, u2.id, ai.currency_id, ai.journal_id,
ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual,
ai.amount_total, ai.commercial_partner_id, partner.country_id
"""
return group_by_str
def init(self, cr):
# self._table = account_invoice_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM (
%s %s %s
) AS sub
JOIN res_currency_rate cr ON (cr.currency_id = sub.currency_id)
WHERE
cr.id IN (SELECT id
FROM res_currency_rate cr2
WHERE (cr2.currency_id = sub.currency_id)
AND ((sub.date IS NOT NULL AND cr2.name <= sub.date)
OR (sub.date IS NULL AND cr2.name <= NOW()))
ORDER BY name DESC LIMIT 1)
)""" % (
self._table,
self._select(), self._sub_select(), self._from(), self._group_by()))
|
agpl-3.0
|
carlgao/lenga
|
images/lenny64-peon/usr/share/python-support/python-django/django/contrib/auth/management/commands/changepassword.py
|
320
|
1527
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
import getpass
class Command(BaseCommand):
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=prompt)
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
try:
u = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
print "Changing password for user '%s'" % u.username
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
print "Passwords do not match. Please try again."
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (username, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u.username
|
mit
|
mariansoban/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32F767xx.py
|
2
|
31744
|
#!/usr/bin/env python
'''
these tables are generated from the STM32 datasheet DM00273119.pdf for the
STM32F765xx, STM32F767xx, STM32F768aX and STM32F769xx
'''
# additional build information for ChibiOS
build = {
"CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f7xx.mk",
"CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32F7xx/platform.mk"
}
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 16,
'I': 16,
'J': 0,
'K': 0
}
# MCU parameters
mcu = {
# location of MCU serial number
'UDID_START' : 0x1FF0F420,
# ram map, as list of (address, size-kb, flags)
# flags of 1 means DMA-capable
# flags of 2 means faster memory for CPU intensive work
'RAM_MAP' : [
(0x20020000, 384, 0), # SRAM1/SRAM2
# split DTCM in two to allow for fast checking of IS_DMA_SAFE in bouncebuffer code
(0x20000000, 64, 1), # DTCM, DMA safe
(0x20010000, 64, 2), # DTCM, 2nd half, used as fast memory. This lowers memory contention in the EKF code
]
}
DMA_Map = {
# format is (DMA_TABLE, StreamNum, Channel)
"ADC1" : [(2,0,0),(2,4,0)],
"ADC2" : [(2,2,1),(2,3,1)],
"ADC3" : [(2,0,2),(2,1,2)],
"CRYP_IN" : [(2,6,2)],
"CRYP_OUT" : [(2,5,2)],
"DAC1" : [(1,5,7)],
"DAC2" : [(1,6,7)],
"DCMI" : [(2,1,1),(2,7,1)],
"DFSDM1_FLT0" : [(2,0,8),(2,4,8)],
"DFSDM1_FLT1" : [(2,1,8),(2,5,8)],
"DFSDM1_FLT2" : [(2,2,8),(2,6,8)],
"DFSDM1_FLT3" : [(2,3,8),(2,7,8)],
"HASH_IN" : [(2,7,2)],
"I2C1_RX" : [(1,0,1),(1,5,1)],
"I2C1_TX" : [(1,6,1),(1,7,1)],
"I2C2_RX" : [(1,2,7),(1,3,7)],
"I2C2_TX" : [(1,7,7),(1,4,8)],
"I2C3_RX" : [(1,2,3),(1,1,1)],
"I2C3_TX" : [(1,4,3),(1,0,8)],
"I2C4_RX" : [(1,2,2),(1,1,8)],
"I2C4_TX" : [(1,6,8),(1,5,2)],
"JPEG_IN" : [(2,0,9),(2,3,9)],
"JPEG_OUT" : [(2,1,9),(2,4,9)],
"QUADSPI" : [(2,2,11),(2,7,3)],
"SAI1_A" : [(2,1,0),(2,3,0),(2,6,10)],
"SAI1_B" : [(2,0,10),(2,4,1),(2,5,0)],
"SAI2_A" : [(2,2,10),(2,4,3)],
"SAI2_B" : [(2,1,10),(2,6,3),(2,7,0)],
"SDMMC1" : [(2,3,4),(2,6,4)],
"SDMMC2" : [(2,0,11),(2,5,11)],
"SPDIFRX_CS" : [(1,6,0)],
"SPDIFRX_DT" : [(1,1,0)],
"SPI1_RX" : [(2,0,3),(2,2,3)],
"SPI1_TX" : [(2,3,3),(2,5,3)],
"SPI2_RX" : [(1,1,9),(1,3,0)],
"SPI2_TX" : [(1,4,0),(1,6,9)],
"SPI3_RX" : [(1,0,0),(1,2,0)],
"SPI3_TX" : [(1,5,0),(1,7,0)],
"SPI4_RX" : [(2,0,4),(2,3,5)],
"SPI4_TX" : [(2,1,4),(2,2,9),(2,4,5)],
"SPI5_RX" : [(2,3,2),(2,5,7),(2,5,9)],
"SPI5_TX" : [(2,4,2),(2,6,7)],
"SPI6_RX" : [(2,6,1)],
"SPI6_TX" : [(2,5,1)],
"TIM1_CH1" : [(2,1,6),(2,3,6),(2,6,0)],
"TIM1_CH2" : [(2,2,6),(2,6,0)],
"TIM1_CH3" : [(2,6,0),(2,6,6)],
"TIM1_CH4" : [(2,4,6)],
"TIM1_COM" : [(2,4,6)],
"TIM1_TRIG" : [(2,0,6),(2,4,6)],
"TIM1_UP" : [(2,5,6)],
"TIM2_CH1" : [(1,5,3)],
"TIM2_CH2" : [(1,6,3)],
"TIM2_CH3" : [(1,1,3)],
"TIM2_CH4" : [(1,6,3),(1,7,3)],
"TIM2_UP" : [(1,1,3),(1,7,3)],
"TIM3_CH1" : [(1,4,5)],
"TIM3_CH2" : [(1,5,5)],
"TIM3_CH3" : [(1,7,5)],
"TIM3_CH4" : [(1,2,5)],
"TIM3_TRIG" : [(1,4,5)],
"TIM3_UP" : [(1,2,5)],
"TIM4_CH1" : [(1,0,2)],
"TIM4_CH2" : [(1,3,2)],
"TIM4_CH3" : [(1,7,2)],
"TIM4_UP" : [(1,6,2)],
"TIM5_CH1" : [(1,2,6)],
"TIM5_CH2" : [(1,4,6)],
"TIM5_CH3" : [(1,0,6)],
"TIM5_CH4" : [(1,1,6),(1,3,6)],
"TIM5_TRIG" : [(1,1,6),(1,3,6)],
"TIM5_UP" : [(1,0,6),(1,6,6)],
"TIM6_UP" : [(1,1,7)],
"TIM7_UP" : [(1,2,1),(1,4,1)],
"TIM8_CH1" : [(2,2,0),(2,2,7)],
"TIM8_CH2" : [(2,2,0),(2,3,7)],
"TIM8_CH3" : [(2,2,0),(2,4,7)],
"TIM8_CH4" : [(2,7,7)],
"TIM8_COM" : [(2,7,7)],
"TIM8_TRIG" : [(2,7,7)],
"TIM8_UP" : [(2,1,7)],
"UART4_RX" : [(1,2,4)],
"UART4_TX" : [(1,4,4)],
"UART5_RX" : [(1,0,4)],
"UART5_TX" : [(1,7,4)],
"UART7_RX" : [(1,3,5)],
"UART7_TX" : [(1,1,5)],
"UART8_RX" : [(1,6,5)],
"UART8_TX" : [(1,0,5)],
"USART1_RX" : [(2,2,4),(2,5,4)],
"USART1_TX" : [(2,7,4)],
"USART2_RX" : [(1,5,4)],
"USART2_TX" : [(1,6,4)],
"USART3_RX" : [(1,1,4)],
"USART3_TX" : [(1,3,4),(1,4,7)],
"USART6_RX" : [(2,1,5),(2,2,5)],
"USART6_TX" : [(2,6,5),(2,7,5)],
}
AltFunction_map = {
# format is PIN:FUNCTION : AFNUM
"PA0:ETH_MII_CRS" : 11,
"PA0:EVENTOUT" : 15,
"PA0:TIM2_CH1" : 1,
"PA0:TIM2_ETR" : 1,
"PA0:TIM5_CH1" : 2,
"PA0:TIM8_ETR" : 3,
"PA0:UART4_TX" : 8,
"PA0:USART2_CTS" : 7,
"PA0:SAI2_SD_B" : 10,
"PA10:DCMI_D1" : 13,
"PA10:EVENTOUT" : 15,
"PA10:OTG_FS_ID" : 10,
"PA10:TIM1_CH3" : 1,
"PA10:USART1_RX" : 7,
"PA10:LCD_B4" : 9,
"PA10:LCD_B1" : 14,
"PA10:MDIOS_MDIO" : 12,
"PA11:CAN1_RX" : 9,
"PA11:EVENTOUT" : 15,
"PA11:LCD_R4" : 14,
"PA11:OTG_FS_DM" : 10,
"PA11:TIM1_CH4" : 1,
"PA11:USART1_CTS" : 7,
"PA11:SPI2_NSS" : 5,
"PA11:I2S2_WS" : 5,
"PA11:UART4_RX" : 6,
"PA12:CAN1_TX" : 9,
"PA12:EVENTOUT" : 15,
"PA12:LCD_R5" : 14,
"PA12:OTG_FS_DP" : 10,
"PA12:TIM1_ETR" : 1,
"PA12:USART1_RTS" : 7,
"PA12:SPI2_SCK" : 5,
"PA12:I2S2_CK" : 5,
"PA12:UART4_TX" : 6,
"PA12:SAI2_FS_B" : 8,
"PA13:EVENTOUT" : 15,
"PA13:JTMS-SWDIO" : 0,
"PA14:EVENTOUT" : 15,
"PA14:JTCK-SWCLK" : 0,
"PA15:EVENTOUT" : 15,
"PA15:I2S3_WS" : 6,
"PA15:JTDI" : 0,
"PA15:SPI1_NSS" : 5,
"PA15:SPI3_NSS" : 6,
"PA15:TIM2_CH1" : 1,
"PA15:TIM2_ETR" : 1,
"PA15:HDMI-CEC" : 4,
"PA15:SPI6_NSS" : 7,
"PA15:UART4_RTS" : 8,
"PA15:CAN3_TX" : 11,
"PA15:UART7_TX" : 12,
"PA1:ETH_MII_RX_CLK" : 11,
"PA1:ETH_RMII_REF_CLK" : 11,
"PA1:EVENTOUT" : 15,
"PA1:TIM2_CH2" : 1,
"PA1:TIM5_CH2" : 2,
"PA1:UART4_RX" : 8,
"PA1:USART2_RTS" : 7,
"PA1:QUADSPI_BK1_IO3" : 9,
"PA1:SAI2_MCK_B" : 10,
"PA1:LCD_R2" : 14,
"PA2:ETH_MDIO" : 11,
"PA2:EVENTOUT" : 15,
"PA2:TIM2_CH3" : 1,
"PA2:TIM5_CH3" : 2,
"PA2:TIM9_CH1" : 3,
"PA2:USART2_TX" : 7,
"PA2:SAI2_SCK_B" : 8,
"PA2:MDIOS_MDIO" : 12,
"PA2:LCD_R1" : 14,
"PA3:ETH_MII_COL" : 11,
"PA3:EVENTOUT" : 15,
"PA3:LCD_B5" : 14,
"PA3:OTG_HS_ULPI_D0" : 10,
"PA3:TIM2_CH4" : 1,
"PA3:TIM5_CH4" : 2,
"PA3:TIM9_CH2" : 3,
"PA3:USART2_RX" : 7,
"PA3:LCD_B2" : 9,
"PA4:DCMI_HSYNC" : 13,
"PA4:EVENTOUT" : 15,
"PA4:I2S3_WS" : 6,
"PA4:LCD_VSYNC" : 14,
"PA4:OTG_HS_SOF" : 12,
"PA4:SPI1_NSS" : 5,
"PA4:SPI3_NSS" : 6,
"PA4:USART2_CK" : 7,
"PA4:I2S1_WS" : 5,
"PA4:SPI6_NSS" : 8,
"PA5:EVENTOUT" : 15,
"PA5:OTG_HS_ULPI_CK" : 10,
"PA5:SPI1_SCK" : 5,
"PA5:TIM2_CH1" : 1,
"PA5:TIM2_ETR" : 1,
"PA5:TIM8_CH1N" : 3,
"PA5:I2S1_CK" : 5,
"PA5:SPI6_SCK" : 8,
"PA5:LCD_R4" : 14,
"PA6:DCMI_PIXCLK" : 13,
"PA6:EVENTOUT" : 15,
"PA6:LCD_G2" : 14,
"PA6:SPI1_MISO" : 5,
"PA6:TIM13_CH1" : 9,
"PA6:TIM1_BKIN" : 1,
"PA6:TIM3_CH1" : 2,
"PA6:TIM8_BKIN" : 3,
"PA6:SPI6_MISO" : 8,
"PA6:MDIOS_MDC" : 12,
"PA7:ETH_MII_RX_DV" : 11,
"PA7:ETH_RMII_CRS_DV" : 11,
"PA7:EVENTOUT" : 15,
"PA7:SPI1_MOSI" : 5,
"PA7:TIM14_CH1" : 9,
"PA7:TIM1_CH1N" : 1,
"PA7:TIM3_CH2" : 2,
"PA7:TIM8_CH1N" : 3,
"PA7:I2S1_SD" : 5,
"PA7:SPI6_MOSI" : 8,
"PA7:FMC_SDNWE" : 12,
"PA8:EVENTOUT" : 15,
"PA8:I2C3_SCL" : 4,
"PA8:LCD_R6" : 14,
"PA8:MCO1" : 0,
"PA8:OTG_FS_SOF" : 10,
"PA8:TIM1_CH1" : 1,
"PA8:USART1_CK" : 7,
"PA8:TIM8_BKIN2" : 3,
"PA8:CAN3_RX" : 11,
"PA8:UART7_RX" : 12,
"PA8:LCD_B3" : 13,
"PA9:DCMI_D0" : 13,
"PA9:EVENTOUT" : 15,
"PA9:I2C3_SMBA" : 4,
"PA9:TIM1_CH2" : 1,
"PA9:USART1_TX" : 7,
"PA9:SPI2_SCK" : 5,
"PA9:I2S2_CK" : 5,
"PA9:LCD_R5" : 14,
"PB0:ETH_MII_RXD2" : 11,
"PB0:EVENTOUT" : 15,
"PB0:LCD_R3" : 9,
"PB0:OTG_HS_ULPI_D1" : 10,
"PB0:TIM3_CH3" : 2,
"PB0:TIM8_CH2N" : 3,
"PB0:DFSDM1_CKOUT" : 6,
"PB0:UART4_CTS" : 8,
"PB0:LCD_G1" : 14,
"PB10:ETH_MII_RX_ER" : 11,
"PB10:EVENTOUT" : 15,
"PB10:I2C2_SCL" : 4,
"PB10:I2S2_CK" : 5,
"PB10:LCD_G4" : 14,
"PB10:OTG_HS_ULPI_D3" : 10,
"PB10:SPI2_SCK" : 5,
"PB10:TIM2_CH3" : 1,
"PB10:USART3_TX" : 7,
"PB10:DFSDM1_DATIN7" : 6,
"PB10:QUADSPI_BK1_NCS" : 9,
"PB11:ETH_MII_TX_EN" : 11,
"PB11:ETH_RMII_TX_EN" : 11,
"PB11:EVENTOUT" : 15,
"PB11:I2C2_SDA" : 4,
"PB11:LCD_G5" : 14,
"PB11:OTG_HS_ULPI_D4" : 10,
"PB11:TIM2_CH4" : 1,
"PB11:USART3_RX" : 7,
"PB11:DFSDM1_CKIN7" : 6,
"PB11:DSI_TE" : 13,
"PB12:CAN2_RX" : 9,
"PB12:ETH_MII_TXD0" : 11,
"PB12:ETH_RMII_TXD0" : 11,
"PB12:EVENTOUT" : 15,
"PB12:I2C2_SMBA" : 4,
"PB12:I2S2_WS" : 5,
"PB12:OTG_HS_ID" : 12,
"PB12:OTG_HS_ULPI_D5" : 10,
"PB12:SPI2_NSS" : 5,
"PB12:TIM1_BKIN" : 1,
"PB12:USART3_CK" : 7,
"PB12:DFSDM1_DATIN1" : 6,
"PB12:UART5_RX" : 8,
"PB13:CAN2_TX" : 9,
"PB13:ETH_MII_TXD1" : 11,
"PB13:ETH_RMII_TXD1" : 11,
"PB13:EVENTOUT" : 15,
"PB13:I2S2_CK" : 5,
"PB13:OTG_HS_ULPI_D6" : 10,
"PB13:SPI2_SCK" : 5,
"PB13:TIM1_CH1N" : 1,
"PB13:USART3_CTS" : 7,
"PB13:DFSDM1_CKIN1" : 6,
"PB13:UART5_TX" : 8,
"PB14:EVENTOUT" : 15,
"PB14:DFSDM1_DATIN2" : 6,
"PB14:OTG_HS_DM" : 12,
"PB14:SPI2_MISO" : 5,
"PB14:TIM12_CH1" : 9,
"PB14:TIM1_CH2N" : 1,
"PB14:TIM8_CH2N" : 3,
"PB14:USART3_RTS" : 7,
"PB14:USART1_TX" : 4,
"PB14:UART4_RTS" : 8,
"PB14:SDMMC2_D0" : 10,
"PB15:EVENTOUT" : 15,
"PB15:I2S2_SD" : 5,
"PB15:OTG_HS_DP" : 12,
"PB15:RTC_REFIN" : 0,
"PB15:SPI2_MOSI" : 5,
"PB15:TIM12_CH2" : 9,
"PB15:TIM1_CH3N" : 1,
"PB15:TIM8_CH3N" : 3,
"PB15:USART1_RX" : 4,
"PB15:DFSDM1_CKIN2" : 6,
"PB15:UART4_CTS" : 8,
"PB15:SDMMC2_D1" : 10,
"PB1:ETH_MII_RXD3" : 11,
"PB1:EVENTOUT" : 15,
"PB1:LCD_R6" : 9,
"PB1:OTG_HS_ULPI_D2" : 10,
"PB1:TIM3_CH4" : 2,
"PB1:TIM8_CH3N" : 3,
"PB1:DFSDM1_DATIN1" : 6,
"PB1:LCD_G0" : 14,
"PB2:EVENTOUT" : 15,
"PB2:SAI1_SD_A" : 6,
"PB2:SPI3_MOSI" : 7,
"PB2:I2S3_SD" : 7,
"PB2:QUADSPI_CLK" : 9,
"PB2:DFSDM1_CKIN1" : 10,
"PB3:EVENTOUT" : 15,
"PB3:I2S3_CK" : 6,
"PB3:JTDO" : 0,
"PB3:SPI1_SCK" : 5,
"PB3:SPI3_SCK" : 6,
"PB3:TIM2_CH2" : 1,
"PB3:TRACESWO" : 0,
"PB3:I2S1_CK" : 5,
"PB3:SPI6_SCK" : 8,
"PB3:SDMMC2_D2" : 10,
"PB3:CAN3_RX" : 11,
"PB3:UART7_RX" : 12,
"PB4:EVENTOUT" : 15,
"PB4:I2S2_WS" : 7,
"PB4:SPI2_NSS" : 7,
"PB4:NJTRST" : 0,
"PB4:SPI1_MISO" : 5,
"PB4:SPI3_MISO" : 6,
"PB4:TIM3_CH1" : 2,
"PB4:SPI6_MISO" : 8,
"PB4:SDMMC2_D3" : 10,
"PB4:CAN3_TX" : 11,
"PB4:UART7_TX" : 12,
"PB5:CAN2_RX" : 9,
"PB5:DCMI_D10" : 13,
"PB5:ETH_PPS_OUT" : 11,
"PB5:EVENTOUT" : 15,
"PB5:FMC_SDCKE1" : 12,
"PB5:I2C1_SMBA" : 4,
"PB5:I2S3_SD" : 6,
"PB5:OTG_HS_ULPI_D7" : 10,
"PB5:SPI1_MOSI" : 5,
"PB5:SPI3_MOSI" : 6,
"PB5:TIM3_CH2" : 2,
"PB5:UART5_RX" : 1,
"PB5:I2S1_SD" : 5,
"PB5:SPI6_MOSI" : 8,
"PB5:LCD_G7" : 14,
"PB6:CAN2_TX" : 9,
"PB6:DCMI_D5" : 13,
"PB6:EVENTOUT" : 15,
"PB6:FMC_SDNE1" : 12,
"PB6:I2C1_SCL" : 4,
"PB6:TIM4_CH1" : 2,
"PB6:USART1_TX" : 7,
"PB6:UART5_TX" : 1,
"PB6:HDMI-CEC" : 3,
"PB6:DFSDM1_DATIN5" : 6,
"PB6:QUADSPI_BK1_NCS" : 10,
"PB6:I2C4_SCL" : 11,
"PB7:DCMI_VSYNC" : 13,
"PB7:EVENTOUT" : 15,
"PB7:FMC_NL" : 12,
"PB7:I2C1_SDA" : 4,
"PB7:TIM4_CH2" : 2,
"PB7:USART1_RX" : 7,
"PB7:DFSDM1_CKIN5" : 6,
"PB7:I2S4_SDA" : 11,
"PB8:CAN1_RX" : 9,
"PB8:DCMI_D6" : 13,
"PB8:ETH_MII_TXD3" : 11,
"PB8:EVENTOUT" : 15,
"PB8:I2C1_SCL" : 4,
"PB8:LCD_B6" : 14,
"PB8:SDMMC_D4" : 12,
"PB8:TIM10_CH1" : 3,
"PB8:TIM4_CH3" : 2,
"PB8:I2C4_SCL" : 1,
"PB8:DFSDM1_CKIN7" : 6,
"PB8:UART5_RX" : 7,
"PB8:SDMMC2_D4" : 10,
"PB9:CAN1_TX" : 9,
"PB9:DCMI_D7" : 13,
"PB9:EVENTOUT" : 15,
"PB9:I2C1_SDA" : 4,
"PB9:I2S2_WS" : 5,
"PB9:LCD_B7" : 14,
"PB9:SDMMC_D5" : 12,
"PB9:SPI2_NSS" : 5,
"PB9:TIM11_CH1" : 3,
"PB9:TIM4_CH4" : 2,
"PB9:I2C4_SDA" : 1,
"PB9:DFSDM1_DATIN7" : 6,
"PB9:UART5_TX" : 7,
"PB9:SDMMC2_D5" : 10,
"PB9:I2C4_SMBA" : 11,
"PC0:EVENTOUT" : 15,
"PC0:FMC_SDNWE" : 12,
"PC0:OTG_HS_ULPI_STP" : 10,
"PC0:DFSDM1_DATAIN0" : 3,
"PC0:DFSDM1_DATAIN4" : 6,
"PC0:SAI2_FS_B" : 8,
"PC0:LCD_R5" : 14,
"PC10:DCMI_D8" : 13,
"PC10:EVENTOUT" : 15,
"PC10:I2S3_CK" : 6,
"PC10:LCD_R2" : 14,
"PC10:SDMMC_D2" : 12,
"PC10:SPI3_SCK" : 6,
"PC10:UART4_TX" : 8,
"PC10:USART3_TX" : 7,
"PC10:DFSDM1_CKIN5" : 3,
"PC10:QUADSPI_BK1_IO1" : 9,
"PC11:DCMI_D4" : 13,
"PC11:EVENTOUT" : 15,
"PC11:DFSDM1_DATAIN5" : 3,
"PC11:SDMMC_D3" : 12,
"PC11:SPI3_MISO" : 6,
"PC11:UART4_RX" : 8,
"PC11:USART3_RX" : 7,
"PC11:QUADSPI_BK2_NCS" : 9,
"PC12:DCMI_D9" : 13,
"PC12:EVENTOUT" : 15,
"PC12:I2S3_SD" : 6,
"PC12:SDMMC_CK" : 12,
"PC12:SPI3_MOSI" : 6,
"PC12:UART5_TX" : 8,
"PC12:USART3_CK" : 7,
"PC12:TRACED3" : 0,
"PC13:EVENTOUT" : 15,
"PC14:EVENTOUT" : 15,
"PC15:EVENTOUT" : 15,
"PC1:ETH_MDC" : 11,
"PC1:EVENTOUT" : 15,
"PC1:TRACED0" : 0,
"PC1:DFSDM1_DATAIN0" : 3,
"PC1:SPI2_MOSI" : 5,
"PC1:I2S2_SD" : 5,
"PC1:SAI1_SD_A" : 6,
"PC1:DFSDM1_CKIN4" : 10,
"PC1:MDIOS_MDC" : 12,
"PC2:ETH_MII_TXD2" : 11,
"PC2:EVENTOUT" : 15,
"PC2:FMC_SDNE0" : 12,
"PC2:DFSDM1_CKOUT" : 6,
"PC2:OTG_HS_ULPI_DIR" : 10,
"PC2:SPI2_MISO" : 5,
"PC2:DFSDM1_CKIN1" : 3,
"PC3:ETH_MII_TX_CLK" : 11,
"PC3:EVENTOUT" : 15,
"PC3:FMC_SDCKE0" : 12,
"PC3:I2S2_SD" : 5,
"PC3:OTG_HS_ULPI_NXT" : 10,
"PC3:SPI2_MOSI" : 5,
"PC3:DFSDM1_DATAIN1" : 3,
"PC4:ETH_MII_RXD0" : 11,
"PC4:ETH_RMII_RXD0" : 11,
"PC4:EVENTOUT" : 15,
"PC4:DFSDM1_CKIN2" : 3,
"PC4:I2S1_MCK" : 5,
"PC4:SPDIF_RX2" : 8,
"PC4:FMC_SDNE0" : 12,
"PC5:ETH_MII_RXD1" : 11,
"PC5:ETH_RMII_RXD1" : 11,
"PC5:EVENTOUT" : 15,
"PC5:DFSDM1_DATAIN2" : 3,
"PC5:SPDIF_RX3" : 8,
"PC5:FMC_SDCKE0" : 12,
"PC6:DCMI_D0" : 13,
"PC6:EVENTOUT" : 15,
"PC6:I2S2_MCK" : 5,
"PC6:LCD_HSYNC" : 14,
"PC6:SDMMC_D6" : 12,
"PC6:TIM3_CH1" : 2,
"PC6:TIM8_CH1" : 3,
"PC6:USART6_TX" : 8,
"PC6:DFSDM1_CKIN3" : 7,
"PC6:FMC_NWAIT" : 9,
"PC6:SDMMC2_D6" : 10,
"PC7:DCMI_D1" : 13,
"PC7:EVENTOUT" : 15,
"PC7:I2S3_MCK" : 6,
"PC7:LCD_G6" : 14,
"PC7:SDMMC_D7" : 12,
"PC7:TIM3_CH2" : 2,
"PC7:TIM8_CH2" : 3,
"PC7:USART6_RX" : 8,
"PC7:DFSDM1_DATAIN3" : 7,
"PC7:FMC_NE1" : 9,
"PC7:SDMMC2_D7" : 10,
"PC8:DCMI_D2" : 13,
"PC8:EVENTOUT" : 15,
"PC8:SDMMC_D0" : 12,
"PC8:TIM3_CH3" : 2,
"PC8:TIM8_CH3" : 3,
"PC8:USART6_CK" : 8,
"PC8:TRACED1" : 0,
"PC8:UART5_RTS" : 7,
"PC8:FMC_NE2" : 9,
"PC8:FMC_NCE" : 9,
"PC9:DCMI_D3" : 13,
"PC9:EVENTOUT" : 15,
"PC9:I2C3_SDA" : 4,
"PC9:I2S_CKIN" : 5,
"PC9:MCO2" : 0,
"PC9:SDMMC_D1" : 12,
"PC9:TIM3_CH4" : 2,
"PC9:TIM8_CH4" : 3,
"PC9:UART5_CTS" : 7,
"PC9:QUADSPI_BK1_IO0" : 9,
"PC9:LCD_G3" : 10,
"PC9:LCD_B2" : 14,
"PD0:CAN1_RX" : 9,
"PD0:EVENTOUT" : 15,
"PD0:FMC_D2" : 12,
"PD0:DFSDM1_DATAIN6" : 3,
"PD0:DFSDM1_DATAIN7" : 6,
"PD0:UART4_RX" : 8,
"PD10:EVENTOUT" : 15,
"PD10:FMC_D15" : 12,
"PD10:LCD_B3" : 14,
"PD10:USART3_CK" : 7,
"PD10:DFSDM1_CKOUT" : 3,
"PD11:EVENTOUT" : 15,
"PD11:FMC_A16" : 12,
"PD11:USART3_CTS" : 7,
"PD11:I2C4_SMBA" : 4,
"PD11:QUADSPI_BK1_IO0" : 9,
"PD11:SAI2_SD_A" : 10,
"PD11:FMC_CLE" : 12,
"PD12:EVENTOUT" : 15,
"PD12:FMC_A17" : 12,
"PD12:TIM4_CH1" : 2,
"PD12:USART3_RTS" : 7,
"PD12:LPTIM1_IN1" : 3,
"PD12:I2C4_SCL" : 4,
"PD12:QUADSPI_BK1_IO1" : 9,
"PD12:SAI2_FS_A" : 10,
"PD12:FMC_ALE" : 12,
"PD13:EVENTOUT" : 15,
"PD13:FMC_A18" : 12,
"PD13:TIM4_CH2" : 2,
"PD13:LPTIM1_OUT" : 3,
"PD13:I2C4_SDA" : 4,
"PD13:QUADSPI_BK1_IO3" : 9,
"PD13:SAI2_SCK_A" : 10,
"PD14:EVENTOUT" : 15,
"PD14:FMC_D0" : 12,
"PD14:TIM4_CH3" : 2,
"PD14:UART8_CTS" : 8,
"PD15:EVENTOUT" : 15,
"PD15:FMC_D1" : 12,
"PD15:TIM4_CH4" : 2,
"PD15:UART8_RTS" : 8,
"PD1:CAN1_TX" : 9,
"PD1:EVENTOUT" : 15,
"PD1:FMC_D3" : 12,
"PD1:DFSDM1_DATAIN6" : 3,
"PD1:DFSDM1_CKIN7" : 6,
"PD1:UART4_TX" : 8,
"PD2:DCMI_D11" : 13,
"PD2:EVENTOUT" : 15,
"PD2:SDMMC_CMD" : 12,
"PD2:TIM3_ETR" : 2,
"PD2:UART5_RX" : 8,
"PD2:TRACED2" : 0,
"PD3:DCMI_D5" : 13,
"PD3:EVENTOUT" : 15,
"PD3:FMC_CLK" : 12,
"PD3:I2S2_CK" : 5,
"PD3:LCD_G7" : 14,
"PD3:SPI2_SCK" : 5,
"PD3:USART2_CTS" : 7,
"PD3:DFSDM1_CKOUT" : 3,
"PD3:DFSDM1_DATAIN0" : 6,
"PD4:EVENTOUT" : 15,
"PD4:FMC_NOE" : 12,
"PD4:USART2_RTS" : 7,
"PD4:DFSDM1_CKIN0" : 6,
"PD5:EVENTOUT" : 15,
"PD5:FMC_NWE" : 12,
"PD5:USART2_TX" : 7,
"PD6:DCMI_D10" : 13,
"PD6:EVENTOUT" : 15,
"PD6:FMC_NWAIT" : 12,
"PD6:I2S3_SD" : 5,
"PD6:LCD_B2" : 14,
"PD6:SAI1_SD_A" : 6,
"PD6:SPI3_MOSI" : 5,
"PD6:USART2_RX" : 7,
"PD6:DFSDM1_CKIN4" : 3,
"PD6:DFSDM1_DATAIN1" : 10,
"PD6:SDMMC2_CK" : 11,
"PD7:EVENTOUT" : 15,
"PD7:SDMMC2_CMD" : 11,
"PD7:FMC_NE1" : 12,
"PD7:USART2_CK" : 7,
"PD7:DFSDM1_DATAIN4" : 3,
"PD7:SPI1_MOSI" : 5,
"PD7:I2S1_SD" : 5,
"PD7:DFSDM1_CKIN1" : 6,
"PD7:SPDIF_RX0" : 8,
"PD8:EVENTOUT" : 15,
"PD8:FMC_D13" : 12,
"PD8:USART3_TX" : 7,
"PD8:DFSDM1_CKIN3" : 3,
"PD8:SPDIF_RX1" : 8,
"PD9:EVENTOUT" : 15,
"PD9:FMC_D14" : 12,
"PD9:USART3_RX" : 7,
"PD9:DFSDM1_DATAIN3" : 3,
"PE0:DCMI_D2" : 13,
"PE0:EVENTOUT" : 15,
"PE0:FMC_NBL0" : 12,
"PE0:TIM4_ETR" : 2,
"PE0:UART8_RX" : 8,
"PE0:LPTIM1_ETR" : 3,
"PE0:SAI2_MCK_A" : 10,
"PE10:EVENTOUT" : 15,
"PE10:FMC_D7" : 12,
"PE10:TIM1_CH2N" : 1,
"PE10:DFSDM1_DATAIN4" : 6,
"PE10:UART7_CTS" : 8,
"PE10:QUADSPI_BK2_IO3" : 10,
"PE11:EVENTOUT" : 15,
"PE11:FMC_D8" : 12,
"PE11:LCD_G3" : 14,
"PE11:SPI4_NSS" : 5,
"PE11:TIM1_CH2" : 1,
"PE11:DFSDM1_CKIN4" : 6,
"PE11:SAI2_SD_B" : 10,
"PE12:EVENTOUT" : 15,
"PE12:FMC_D9" : 12,
"PE12:LCD_B4" : 14,
"PE12:SPI4_SCK" : 5,
"PE12:TIM1_CH3N" : 1,
"PE12:DFSDM1_DATAIN5" : 6,
"PE12:SAI2_SCK_B" : 10,
"PE13:EVENTOUT" : 15,
"PE13:FMC_D10" : 12,
"PE13:LCD_DE" : 14,
"PE13:SPI4_MISO" : 5,
"PE13:TIM1_CH3" : 1,
"PE13:DFSDM1_CKIN5" : 6,
"PE13:SAI2_FS_B" : 10,
"PE14:EVENTOUT" : 15,
"PE14:FMC_D11" : 12,
"PE14:LCD_CLK" : 14,
"PE14:SPI4_MOSI" : 5,
"PE14:TIM1_CH4" : 1,
"PE14:SAI2_MCK_B" : 10,
"PE15:EVENTOUT" : 15,
"PE15:FMC_D12" : 12,
"PE15:LCD_R7" : 14,
"PE15:TIM1_BKIN" : 1,
"PE1:DCMI_D3" : 13,
"PE1:EVENTOUT" : 15,
"PE1:FMC_NBL1" : 12,
"PE1:UART8_TX" : 8,
"PE1:LPTIM1_IN2" : 3,
"PE2:ETH_MII_TXD3" : 11,
"PE2:EVENTOUT" : 15,
"PE2:FMC_A23" : 12,
"PE2:SAI1_MCLK_A" : 6,
"PE2:SPI4_SCK" : 5,
"PE2:TRACECLK" : 0,
"PE2:QUADSPI_BK1_IO2" : 9,
"PE3:EVENTOUT" : 15,
"PE3:FMC_A19" : 12,
"PE3:SAI1_SD_B" : 6,
"PE3:TRACED0" : 0,
"PE4:DCMI_D4" : 13,
"PE4:EVENTOUT" : 15,
"PE4:FMC_A20" : 12,
"PE4:LCD_B0" : 14,
"PE4:SAI1_FS_A" : 6,
"PE4:SPI4_NSS" : 5,
"PE4:TRACED1" : 0,
"PE4:DFSDM1_DATAIN3" : 10,
"PE5:DCMI_D6" : 13,
"PE5:EVENTOUT" : 15,
"PE5:FMC_A21" : 12,
"PE5:LCD_G0" : 14,
"PE5:SAI1_SCK_A" : 6,
"PE5:SPI4_MISO" : 5,
"PE5:TIM9_CH1" : 3,
"PE5:TRACED2" : 0,
"PE5:DFSDM1_CKIN3" : 10,
"PE6:DCMI_D7" : 13,
"PE6:EVENTOUT" : 15,
"PE6:FMC_A22" : 12,
"PE6:LCD_G1" : 14,
"PE6:SAI1_SD_A" : 6,
"PE6:SPI4_MOSI" : 5,
"PE6:TIM9_CH2" : 3,
"PE6:TRACED3" : 0,
"PE6:TIM1_BKIN2" : 1,
"PE6:SAI2_MCK_B" : 10,
"PE7:EVENTOUT" : 15,
"PE7:FMC_D4" : 12,
"PE7:TIM1_ETR" : 1,
"PE7:UART7_RX" : 8,
"PE7:DFSDM1_DATAIN2" : 6,
"PE7:QUADSPI_BK2_IO0" : 10,
"PE8:EVENTOUT" : 15,
"PE8:FMC_D5" : 12,
"PE8:TIM1_CH1N" : 1,
"PE8:UART7_TX" : 8,
"PE8:DFSDM1_CKIN2" : 6,
"PE8:QUADSPI_BK2_IO1" : 10,
"PE9:EVENTOUT" : 15,
"PE9:FMC_D6" : 12,
"PE9:TIM1_CH1" : 1,
"PE9:DFSDM1_CKOUT" : 6,
"PE9:UART7_RTS" : 8,
"PE9:QUADSPI_BK2_IO2" : 10,
"PF0:EVENTOUT" : 15,
"PF0:FMC_A0" : 12,
"PF0:I2C2_SDA" : 4,
"PF10:DCMI_D11" : 13,
"PF10:EVENTOUT" : 15,
"PF10:QUADSPI_CLK" : 9,
"PF10:LCD_DE" : 14,
"PF11:DCMI_D12" : 13,
"PF11:EVENTOUT" : 15,
"PF11:FMC_SDNRAS" : 12,
"PF11:SPI5_MOSI" : 5,
"PF11:SAI2_SD_B" : 10,
"PF12:EVENTOUT" : 15,
"PF12:FMC_A6" : 12,
"PF13:EVENTOUT" : 15,
"PF13:FMC_A7" : 12,
"PF13:I2C4_SMBA" : 4,
"PF13:DFSDM1_DATAIN6" : 6,
"PF14:EVENTOUT" : 15,
"PF14:FMC_A8" : 12,
"PF14:I2C4_SCL" : 4,
"PF14:DFSDM1_CKIN6" : 6,
"PF15:EVENTOUT" : 15,
"PF15:FMC_A9" : 12,
"PF15:I2C4_SDA" : 4,
"PF1:EVENTOUT" : 15,
"PF1:FMC_A1" : 12,
"PF1:I2C2_SCL" : 4,
"PF2:EVENTOUT" : 15,
"PF2:FMC_A2" : 12,
"PF2:I2C2_SMBA" : 4,
"PF3:EVENTOUT" : 15,
"PF3:FMC_A3" : 12,
"PF4:EVENTOUT" : 15,
"PF4:FMC_A4" : 12,
"PF5:EVENTOUT" : 15,
"PF5:FMC_A5" : 12,
"PF6:EVENTOUT" : 15,
"PF6:QUADSPI_BK1_IO3" : 9,
"PF6:SAI1_SD_B" : 6,
"PF6:SPI5_NSS" : 5,
"PF6:TIM10_CH1" : 3,
"PF6:UART7_RX" : 8,
"PF7:EVENTOUT" : 15,
"PF7:QUADSPI_BK1_IO2" : 9,
"PF7:SAI1_MCLK_B" : 6,
"PF7:SPI5_SCK" : 5,
"PF7:TIM11_CH1" : 3,
"PF7:UART7_TX" : 8,
"PF8:EVENTOUT" : 15,
"PF8:QUADSPI_BK1_IO0" : 10,
"PF8:SAI1_SCK_B" : 6,
"PF8:SPI5_MISO" : 5,
"PF8:TIM13_CH1" : 9,
"PF8:UART7_RTS" : 8,
"PF9:EVENTOUT" : 15,
"PF9:QUADSPI_BK1_IO1" : 10,
"PF9:SAI1_FS_B" : 6,
"PF9:SPI5_MOSI" : 5,
"PF9:TIM14_CH1" : 9,
"PF9:UART7_CTS" : 8,
"PG0:EVENTOUT" : 15,
"PG0:FMC_A10" : 12,
"PG10:DCMI_D2" : 13,
"PG10:EVENTOUT" : 15,
"PG10:SPI1_NSS" : 5,
"PG10:FMC_NE3" : 12,
"PG10:LCD_B2" : 14,
"PG10:LCD_G3" : 9,
"PG10:I2S1_WS" : 5,
"PG10:SAI2_SD_B" : 10,
"PG10:SDMMC2_D1" : 11,
"PG11:DCMI_D3" : 13,
"PG11:ETH_MII_TX_EN" : 11,
"PG11:ETH_RMII_TX_EN" : 11,
"PG11:EVENTOUT" : 15,
"PG11:LCD_B3" : 14,
"PG11:SPI1_SCK" : 5,
"PG11:I2S1_CK" : 5,
"PG11:SPDIF_RX0" : 7,
"PG11:SDMMC2_D2" : 10,
"PG12:EVENTOUT" : 15,
"PG12:FMC_NE4" : 12,
"PG12:LCD_B1" : 14,
"PG12:LCD_B4" : 9,
"PG12:SPI6_MISO" : 5,
"PG12:USART6_RTS" : 8,
"PG12:LPTIM1_IN1" : 3,
"PG12:SPDIF_RX1" : 7,
"PG12:SDMMC2_D3" : 11,
"PG13:ETH_MII_TXD0" : 11,
"PG13:ETH_RMII_TXD0" : 11,
"PG13:EVENTOUT" : 15,
"PG13:FMC_A24" : 12,
"PG13:SPI6_SCK" : 5,
"PG13:USART6_CTS" : 8,
"PG13:TRACED0" : 0,
"PG13:LPTIM1_OUT" : 3,
"PG13:LCD_R0" : 14,
"PG14:ETH_MII_TXD1" : 11,
"PG14:ETH_RMII_TXD1" : 11,
"PG14:EVENTOUT" : 15,
"PG14:FMC_A25" : 12,
"PG14:SPI6_MOSI" : 5,
"PG14:USART6_TX" : 8,
"PG14:QUADSPI_BK2_IO3" : 9,
"PG14:LCD_B0" : 14,
"PG14:TRACED1" : 0,
"PG14:LPTIM1_ETR" : 3,
"PG15:DCMI_D13" : 13,
"PG15:EVENTOUT" : 15,
"PG15:FMC_SDNCAS" : 12,
"PG15:USART6_CTS" : 8,
"PG1:EVENTOUT" : 15,
"PG1:FMC_A11" : 12,
"PG2:EVENTOUT" : 15,
"PG2:FMC_A12" : 12,
"PG3:EVENTOUT" : 15,
"PG3:FMC_A13" : 12,
"PG4:EVENTOUT" : 15,
"PG4:FMC_A14" : 12,
"PG4:FMC_BA0" : 12,
"PG5:EVENTOUT" : 15,
"PG5:FMC_A15" : 12,
"PG5:FMC_BA1" : 12,
"PG6:DCMI_D12" : 13,
"PG6:EVENTOUT" : 15,
"PG6:FMC_NE3" : 12,
"PG6:LCD_R7" : 14,
"PG7:DCMI_D13" : 13,
"PG7:EVENTOUT" : 15,
"PG7:FMC_INT" : 12,
"PG7:LCD_CLK" : 14,
"PG7:USART6_CK" : 8,
"PG7:SAI1_MCLK_A" : 6,
"PG8:ETH_PPS_OUT" : 11,
"PG8:EVENTOUT" : 15,
"PG8:FMC_SDCLK" : 12,
"PG8:SPI6_NSS" : 5,
"PG8:USART6_RTS" : 8,
"PG8:SPDIF_RX2" : 7,
"PG8:LCD_G7" : 14,
"PG9:DCMI_VSYNC" : 13,
"PG9:EVENTOUT" : 15,
"PG9:FMC_NCE3" : 12,
"PG9:FMC_NE2" : 12,
"PG9:USART6_RX" : 8,
"PG9:SPI1_MISO" : 5,
"PG9:SPDIF_RX3" : 7,
"PG9:QUADSPI_BK2_IO2" : 9,
"PG9:SAI2_FS_B" : 10,
"PG9:SDMMC2_D0" : 11,
"PH0:EVENTOUT" : 15,
"PH10:DCMI_D1" : 13,
"PH10:EVENTOUT" : 15,
"PH10:FMC_D18" : 12,
"PH10:LCD_R4" : 14,
"PH10:TIM5_CH1" : 2,
"PH10:I2C4_SMBA" : 4,
"PH11:DCMI_D2" : 13,
"PH11:EVENTOUT" : 15,
"PH11:FMC_D19" : 12,
"PH11:LCD_R5" : 14,
"PH11:TIM5_CH2" : 2,
"PH11:I2C4_SCL" : 4,
"PH12:DCMI_D3" : 13,
"PH12:EVENTOUT" : 15,
"PH12:FMC_D20" : 12,
"PH12:LCD_R6" : 14,
"PH12:TIM5_CH3" : 2,
"PH12:I2C4_SDA" : 4,
"PH13:CAN1_TX" : 9,
"PH13:EVENTOUT" : 15,
"PH13:FMC_D21" : 12,
"PH13:LCD_G2" : 14,
"PH13:TIM8_CH1N" : 3,
"PH13:UART4_TX" : 8,
"PH14:DCMI_D4" : 13,
"PH14:EVENTOUT" : 15,
"PH14:FMC_D22" : 12,
"PH14:LCD_G3" : 14,
"PH14:TIM8_CH2N" : 3,
"PH14:UART4_RX" : 8,
"PH14:CAN1_RX" : 9,
"PH15:DCMI_D11" : 13,
"PH15:EVENTOUT" : 15,
"PH15:FMC_D23" : 12,
"PH15:LCD_G4" : 14,
"PH15:TIM8_CH3N" : 3,
"PH1:EVENTOUT" : 15,
"PH2:ETH_MII_CRS" : 11,
"PH2:EVENTOUT" : 15,
"PH2:FMC_SDCKE0" : 12,
"PH2:LCD_R0" : 14,
"PH2:LPTIM1_IN2" : 3,
"PH2:QUADSPI_BK2_IO1" : 9,
"PH2:SAI2_SCK_B" : 10,
"PH3:ETH_MII_COL" : 11,
"PH3:EVENTOUT" : 15,
"PH3:FMC_SDNE0" : 12,
"PH3:LCD_R1" : 14,
"PH3:QUADSPI_BK2_IO1" : 9,
"PH3:SAI2_MCK_B" : 10,
"PH4:EVENTOUT" : 15,
"PH4:I2C2_SCL" : 4,
"PH4:OTG_HS_ULPI_NXT" : 10,
"PH4:LCD_G5" : 9,
"PH4:LCD_G4" : 14,
"PH5:EVENTOUT" : 15,
"PH5:FMC_SDNWE" : 12,
"PH5:I2C2_SDA" : 4,
"PH5:SPI5_NSS" : 5,
"PH6:DCMI_D8" : 13,
"PH6:FMC_SDNE1" : 12,
"PH6:I2C2_SMBA" : 4,
"PH6:SPI5_SCK" : 5,
"PH6:TIM12_CH1" : 9,
"PH6:ETH_MII_RXD2" : 11,
"PH6:EVENTOUT" : 15,
"PH7:DCMI_D9" : 13,
"PH7:ETH_MII_RXD3" : 11,
"PH7:FMC_SDCKE1" : 12,
"PH7:I2C3_SCL" : 4,
"PH7:SPI5_MISO" : 5,
"PH8:DCMI_HSYNC" : 13,
"PH8:EVENTOUT" : 15,
"PH8:FMC_D16" : 12,
"PH8:I2C3_SDA" : 4,
"PH8:LCD_R2" : 14,
"PH9:DCMI_D0" : 13,
"PH9:EVENTOUT" : 15,
"PH9:FMC_D17" : 12,
"PH9:I2C3_SMBA" : 4,
"PH9:LCD_R3" : 14,
"PH9:TIM12_CH2" : 9,
"PI0:TIM5_CH4" : 2,
"PI0:SPI2_NSS" : 5,
"PI0:I2S2_WS" : 5,
"PI0:FMC_D24" : 12,
"PI0:DCMI_D13" : 13,
"PI0:LCD_G5" : 14,
"PI0:EVENTOUT" : 15,
"PI1:TIM8_BKIN2" : 3,
"PI1:SPI2_SCK" : 5,
"PI1:I2S2_CK" : 5,
"PI1:FMC_D25" : 12,
"PI1:DCMI_D8" : 13,
"PI1:LCD_G6" : 14,
"PI1:EVENTOUT" : 15,
"PI2:TIM8_CH4" : 3,
"PI2:SPI2_MISO" : 5,
"PI2:FMC_D26" : 12,
"PI2:DCMI_D9" : 13,
"PI2:LCD_G7" : 14,
"PI2:EVENTOUT" : 15,
"PI3:TIM8_ETR" : 3,
"PI3:SPI2_MOSI" : 5,
"PI3:I2S2_SD" : 5,
"PI3:FMC_D27" : 12,
"PI3:DCMI_D10" : 13,
"PI3:EVENTOUT" : 15,
"PI4:TIM8_BKIN" : 3,
"PI4:SAI2_MCK_A" : 10,
"PI4:FMC_NBL2" : 12,
"PI4:DCMI_D5" : 13,
"PI4:LCD_B4" : 14,
"PI4:EVENTOUT" : 15,
"PI5:TIM8_CH1" : 3,
"PI5:SAI2_SCA_A" : 10,
"PI5:FMC_NBL3" : 12,
"PI5:DCMI_VSYNC" : 13,
"PI5:LCD_B5" : 14,
"PI5:EVENTOUT" : 15,
"PI6:TIM8_CH2" : 3,
"PI6:SAI2_SD_A" : 10,
"PI6:FMC_D28" : 12,
"PI6:DCMI_D6" : 13,
"PI6:LCD_B6" : 14,
"PI6:EVENTOUT" : 15,
"PI7:TIM8_CH3" : 3,
"PI7:SAI2_FS_A" : 10,
"PI7:FMC_D29" : 12,
"PI7:DCMI_D7" : 13,
"PI7:LCD_B7" : 14,
"PI7:EVENTOUT" : 15,
"PI8:EVENTOUT" : 15,
"PI9:UART4_RX" : 8,
"PI9:CAN1_RX" : 9,
"PI9:FMC_D30" : 12,
"PI9:LCD_VSYNC" : 14,
"PI9:EVENTOUT" : 15,
"PI10:ETH_MII_RX_ER" : 11,
"PI10:FMC_D31" : 12,
"PI10:LCD_HSYNC" : 14,
"PI11:LCD_G6" : 9,
"PI11:OTG_HS_ULPI_DIR" : 10,
"PI11:EVENTOUT" : 15,
"PI12:LCD_HSYNC" : 14,
"PI12:EVENTOUT" : 15,
"PI13:LCD_VSYNC" : 14,
"PI13:EVENTOUT" : 15,
"PI14:LCD_CLK" : 14,
"PI14:EVENTOUT" : 15,
"PI15:LCD_G2" : 9,
"PI15:LCD_R0" : 14,
"PI15:EVENTOUT" : 15,
"PJ0:LCD_R7" : 9,
"PJ0:LCD_R1" : 14,
"PJ0:EVENTOUT" : 15,
"PJ1:LCD_R2" : 14,
"PJ1:EVENTOUT" : 15,
"PJ2:DSI_TE" : 13,
"PJ2:LCD_R3" : 14,
"PJ2:EVENTOUT" : 15,
"PJ3:LCD_R4" : 14,
"PJ3:EVENTOUT" : 15,
"PJ4:LCD_R5" : 14,
"PJ4:EVENTOUT" : 15,
"PJ5:LCD_R6" : 14,
"PJ5:EVENTOUT" : 15,
"PJ6:LCD_R7" : 14,
"PJ6:EVENTOUT" : 15,
"PJ7:LCD_G0" : 14,
"PJ7:EVENTOUT" : 15,
"PJ8:LCD_G1" : 14,
"PJ8:EVENTOUT" : 15,
"PJ9:LCD_G2" : 14,
"PJ9:EVENTOUT" : 15,
"PJ10:LCD_G3" : 14,
"PJ10:EVENTOUT" : 15,
"PJ11:LCD_G4" : 14,
"PJ11:EVENTOUT" : 15,
"PJ12:LCD_G3" : 9,
"PJ12:LCD_B0" : 14,
"PJ12:EVENTOUT" : 15,
"PJ12:LCD_G4" : 9,
"PJ13:LCD_B1" : 14,
"PJ13:EVENTOUT" : 15,
"PJ14:LCD_B2" : 14,
"PJ14:EVENTOUT" : 15,
"PJ15:LCD_B3" : 14,
"PJ15:EVENTOUT" : 15,
"PK0:LCD_G5" : 14,
"PK0:EVENTOUT" : 15,
"PK1:LCD_G6" : 14,
"PK1:EVENTOUT" : 15,
"PK2:LCD_G7" : 14,
"PK2:EVENTOUT" : 15,
"PK3:LCD_B4" : 14,
"PK3:EVENTOUT" : 15,
"PK4:LCD_B5" : 14,
"PK4:EVENTOUT" : 15,
"PK5:LCD_B6" : 14,
"PK5:EVENTOUT" : 15,
"PK6:LCD_B7" : 14,
"PK6:EVENTOUT" : 15,
"PK7:LCD_DE" : 14,
"PK7:EVENTOUT" : 15,
}
ADC1_map = {
# format is PIN : ADC1_CHAN
"PA0" : 0,
"PA1" : 1,
"PA2" : 2,
"PA3" : 3,
"PA4" : 4,
"PA5" : 5,
"PA6" : 6,
"PA7" : 7,
"PB0" : 8,
"PB1" : 9,
"PC0" : 10,
"PC1" : 11,
"PC2" : 12,
"PC3" : 13,
"PC4" : 14,
"PC5" : 15,
}
|
gpl-3.0
|
40223246/2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_suite.py
|
791
|
12066
|
import unittest
import sys
from .support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
sharkykh/SickRage
|
lib/urllib3/packages/backports/makefile.py
|
339
|
1461
|
# -*- coding: utf-8 -*-
"""
backports.makefile
~~~~~~~~~~~~~~~~~~
Backports the Python 3 ``socket.makefile`` method for use with anything that
wants to create a "fake" socket object.
"""
import io
from socket import SocketIO
def backport_makefile(self, mode="r", buffering=None, encoding=None,
errors=None, newline=None):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= set(["r", "w", "b"]):
raise ValueError(
"invalid mode %r (only r, w, b allowed)" % (mode,)
)
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
|
gpl-3.0
|
julian-seward1/servo
|
tests/wpt/web-platform-tests/css/tools/w3ctestlib/Sources.py
|
37
|
57487
|
#!/usr/bin/python
# CSS Test Source Manipulation Library
# Initial code by fantasai, joint copyright 2010 W3C and Microsoft
# Licensed under BSD 3-Clause: <http://www.w3.org/Consortium/Legal/2008/03-bsd-license>
from os.path import basename, exists, join
import os
import filecmp
import shutil
import re
import codecs
import collections
from xml import dom
import html5lib
from html5lib import treebuilders, inputstream
from lxml import etree
from lxml.etree import ParseError
from Utils import getMimeFromExt, escapeToNamedASCII, basepath, isPathInsideBase, relativeURL, assetName
import HTMLSerializer
import warnings
import hashlib
class SourceTree(object):
"""Class that manages structure of test repository source.
Temporarily hard-coded path and filename rules, this should be configurable.
"""
def __init__(self, repository = None):
self.mTestExtensions = ['.xht', '.html', '.xhtml', '.htm', '.xml', '.svg']
self.mReferenceExtensions = ['.xht', '.html', '.xhtml', '.htm', '.xml', '.png', '.svg']
self.mRepository = repository
def _splitDirs(self, dir):
if ('' == dir):
pathList = []
elif ('/' in dir):
pathList = dir.split('/')
else:
pathList = dir.split(os.path.sep)
return pathList
def _splitPath(self, filePath):
"""split a path into a list of directory names and the file name
paths may come form the os or mercurial, which always uses '/' as the
directory separator
"""
dir, fileName = os.path.split(filePath.lower())
return (self._splitDirs(dir), fileName)
def isTracked(self, filePath):
pathList, fileName = self._splitPath(filePath)
return (not self._isIgnored(pathList, fileName))
def _isApprovedPath(self, pathList):
return ((1 < len(pathList)) and ('approved' == pathList[0]) and (('support' == pathList[1]) or ('src' in pathList)))
def isApprovedPath(self, filePath):
pathList, fileName = self._splitPath(filePath)
return (not self._isIgnored(pathList, fileName)) and self._isApprovedPath(pathList)
def _isIgnoredPath(self, pathList):
return (('.hg' in pathList) or ('.git' in pathList) or
('.svn' in pathList) or ('cvs' in pathList) or
('incoming' in pathList) or ('work-in-progress' in pathList) or
('data' in pathList) or ('archive' in pathList) or
('reports' in pathList) or ('tools' == pathList[0]) or
('test-plan' in pathList) or ('test-plans' in pathList))
def _isIgnored(self, pathList, fileName):
if (pathList): # ignore files in root
return (self._isIgnoredPath(pathList) or
fileName.startswith('.directory') or ('lock' == fileName) or
('.ds_store' == fileName) or
fileName.startswith('.hg') or fileName.startswith('.git') or
('sections.dat' == fileName) or ('get-spec-sections.pl' == fileName))
return True
def isIgnored(self, filePath):
pathList, fileName = self._splitPath(filePath)
return self._isIgnored(pathList, fileName)
def isIgnoredDir(self, dir):
pathList = self._splitDirs(dir)
return self._isIgnoredPath(pathList)
def _isToolPath(self, pathList):
return ('tools' in pathList)
def _isTool(self, pathList, fileName):
return self._isToolPath(pathList)
def isTool(self, filePath):
pathList, fileName = self._splitPath(filePath)
return (not self._isIgnored(pathList, fileName)) and self._isTool(pathList, fileName)
def _isSupportPath(self, pathList):
return ('support' in pathList)
def _isSupport(self, pathList, fileName):
return (self._isSupportPath(pathList) or
((not self._isTool(pathList, fileName)) and
(not self._isReference(pathList, fileName)) and
(not self._isTestCase(pathList, fileName))))
def isSupport(self, filePath):
pathList, fileName = self._splitPath(filePath)
return (not self._isIgnored(pathList, fileName)) and self._isSupport(pathList, fileName)
def _isReferencePath(self, pathList):
return (('reftest' in pathList) or ('reference' in pathList))
def _isReference(self, pathList, fileName):
if ((not self._isSupportPath(pathList)) and (not self._isToolPath(pathList))):
baseName, fileExt = os.path.splitext(fileName)[:2]
if (bool(re.search('(^ref-|^notref-).+', baseName)) or
bool(re.search('.+(-ref[0-9]*$|-notref[0-9]*$)', baseName)) or
('-ref-' in baseName) or ('-notref-' in baseName)):
return (fileExt in self.mReferenceExtensions)
if (self._isReferencePath(pathList)):
return (fileExt in self.mReferenceExtensions)
return False
def isReference(self, filePath):
pathList, fileName = self._splitPath(filePath)
return (not self._isIgnored(pathList, fileName)) and self._isReference(pathList, fileName)
def isReferenceAnywhere(self, filePath):
pathList, fileName = self._splitPath(filePath)
return self._isReference(pathList, fileName)
def _isTestCase(self, pathList, fileName):
if ((not self._isToolPath(pathList)) and (not self._isSupportPath(pathList)) and (not self._isReference(pathList, fileName))):
fileExt = os.path.splitext(fileName)[1]
return (fileExt in self.mTestExtensions)
return False
def isTestCase(self, filePath):
pathList, fileName = self._splitPath(filePath)
return (not self._isIgnored(pathList, fileName)) and self._isTestCase(pathList, fileName)
def getAssetName(self, filePath):
pathList, fileName = self._splitPath(filePath)
if (self._isReference(pathList, fileName) or self._isTestCase(pathList, fileName)):
return assetName(fileName)
return fileName.lower() # support files keep full name
def getAssetType(self, filePath):
pathList, fileName = self._splitPath(filePath)
if (self._isReference(pathList, fileName)):
return intern('reference')
if (self._isTestCase(pathList, fileName)):
return intern('testcase')
if (self._isTool(pathList, fileName)):
return intern('tool')
return intern('support')
class SourceCache:
"""Cache for FileSource objects. Supports one FileSource object
per sourcepath.
"""
def __init__(self, sourceTree):
self.__cache = {}
self.sourceTree = sourceTree
def generateSource(self, sourcepath, relpath, data = None):
"""Return a FileSource or derivative based on the extensionMap.
Uses a cache to avoid creating more than one of the same object:
does not support creating two FileSources with the same sourcepath;
asserts if this is tried. (.htaccess files are not cached.)
Cache is bypassed if loading form a change context
"""
if ((None == data) and self.__cache.has_key(sourcepath)):
source = self.__cache[sourcepath]
assert relpath == source.relpath
return source
if basename(sourcepath) == '.htaccess':
return ConfigSource(self.sourceTree, sourcepath, relpath, data)
mime = getMimeFromExt(sourcepath)
if (mime == 'application/xhtml+xml'):
source = XHTMLSource(self.sourceTree, sourcepath, relpath, data)
elif (mime == 'text/html'):
source = HTMLSource(self.sourceTree, sourcepath, relpath, data)
elif (mime == 'image/svg+xml'):
source = SVGSource(self.sourceTree, sourcepath, relpath, data)
elif (mime == 'application/xml'):
source = XMLSource(self.sourceTree, sourcepath, relpath, data)
else:
source = FileSource(self.sourceTree, sourcepath, relpath, mime, data)
if (None == data):
self.__cache[sourcepath] = source
return source
class SourceSet:
"""Set of FileSource objects. No two FileSources of the same type in the set may
have the same name (except .htaccess files, which are merged).
"""
def __init__(self, sourceCache):
self.sourceCache = sourceCache
self.pathMap = {} # type/name -> source
def __len__(self):
return len(self.pathMap)
def _keyOf(self, source):
return source.type() + '/' + source.keyName()
def __contains__(self, source):
return self._keyOf(source) in self.pathMap
def iter(self):
"""Iterate over FileSource objects in SourceSet.
"""
return self.pathMap.itervalues()
def addSource(self, source, ui):
"""Add FileSource `source`. Throws exception if we already have
a FileSource with the same path relpath but different contents.
(ConfigSources are exempt from this requirement.)
"""
cachedSource = self.pathMap.get(self._keyOf(source))
if not cachedSource:
self.pathMap[self._keyOf(source)] = source
else:
if source != cachedSource:
if isinstance(source, ConfigSource):
cachedSource.append(source)
else:
ui.warn("File merge mismatch %s vs %s for %s\n" % \
(cachedSource.sourcepath, source.sourcepath, source.name()))
def add(self, sourcepath, relpath, ui):
"""Generate and add FileSource from sourceCache. Return the resulting
FileSource.
Throws exception if we already have a FileSource with the same path
relpath but different contents.
"""
source = self.sourceCache.generateSource(sourcepath, relpath)
self.addSource(source, ui)
return source
@staticmethod
def combine(a, b, ui):
"""Merges a and b, and returns whichever one contains the merger (which
one is chosen based on merge efficiency). Can accept None as an argument.
"""
if not (a and b):
return a or b
if len(a) < len(b):
return b.merge(a, ui)
return a.merge(b, ui)
def merge(self, other, ui):
"""Merge sourceSet's contents into this SourceSet.
Throws a RuntimeError if there's a sourceCache mismatch.
Throws an Exception if two files with the same relpath mismatch.
Returns merge result (i.e. self)
"""
if self.sourceCache is not other.sourceCache:
raise RuntimeError
for source in other.pathMap.itervalues():
self.addSource(source, ui)
return self
def adjustContentPaths(self, format):
for source in self.pathMap.itervalues():
source.adjustContentPaths(format)
def write(self, format):
"""Write files out through OutputFormat `format`.
"""
for source in self.pathMap.itervalues():
format.write(source)
class StringReader(object):
"""Wrapper around a string to give it a file-like api
"""
def __init__(self, string):
self.mString = string
self.mIndex = 0
def read(self, maxSize = None):
if (self.mIndex < len(self.mString)):
if (maxSize and (0 < maxSize)):
slice = self.mString[self.mIndex:self.mIndex + maxSize]
self.mIndex += len(slice)
return slice
else:
self.mIndex = len(self.mString)
return self.mString
return ''
class NamedDict(object):
def get(self, key):
if (key in self):
return self[key]
return None
def __eq__(self, other):
for key in self.__slots__:
if (self[key] != other[key]):
return False
return True
def __ne__(self, other):
for key in self.__slots__:
if (self[key] != other[key]):
return True
return False
def __len__(self):
return len(self.__slots__)
def __iter__(self):
return iter(self.__slots__)
def __contains__(self, key):
return (key in self.__slots__)
def copy(self):
clone = self.__class__()
for key in self.__slots__:
clone[key] = self[key]
return clone
def keys(self):
return self.__slots__
def has_key(self, key):
return (key in self)
def items(self):
return [(key, self[key]) for key in self.__slots__]
def iteritems(self):
return iter(self.items())
def iterkeys(self):
return self.__iter__()
def itervalues(self):
return iter(self.items())
def __str__(self):
return '{ ' + ', '.join([key + ': ' + str(self[key]) for key in self.__slots__]) + ' }'
class Metadata(NamedDict):
__slots__ = ('name', 'title', 'asserts', 'credits', 'reviewers', 'flags', 'links', 'references', 'revision', 'selftest', 'scripttest')
def __init__(self, name = None, title = None, asserts = [], credits = [], reviewers = [], flags = [], links = [],
references = [], revision = None, selftest = True, scripttest = False):
self.name = name
self.title = title
self.asserts = asserts
self.credits = credits
self.reviewers = reviewers
self.flags = flags
self.links = links
self.references = references
self.revision = revision
self.selftest = selftest
self.scripttest = scripttest
def __getitem__(self, key):
if ('name' == key):
return self.name
if ('title' == key):
return self.title
if ('asserts' == key):
return self.asserts
if ('credits' == key):
return self.credits
if ('reviewers' == key):
return self.reviewers
if ('flags' == key):
return self.flags
if ('links' == key):
return self.links
if ('references' == key):
return self.references
if ('revision' == key):
return self.revision
if ('selftest' == key):
return self.selftest
if ('scripttest' == key):
return self.scripttest
return None
def __setitem__(self, key, value):
if ('name' == key):
self.name = value
elif ('title' == key):
self.title = value
elif ('asserts' == key):
self.asserts = value
elif ('credits' == key):
self.credits = value
elif ('reviewers' == key):
self.reviewers = value
elif ('flags' == key):
self.flags = value
elif ('links' == key):
self.links = value
elif ('references' == key):
self.references = value
elif ('revision' == key):
self.revision = value
elif ('selftest' == key):
self.selftest = value
elif ('scripttest' == key):
self.scripttest = value
else:
raise KeyError()
class ReferenceData(NamedDict):
__slots__ = ('name', 'type', 'relpath', 'repopath')
def __init__(self, name = None, type = None, relpath = None, repopath = None):
self.name = name
self.type = type
self.relpath = relpath
self.repopath = repopath
def __getitem__(self, key):
if ('name' == key):
return self.name
if ('type' == key):
return self.type
if ('relpath' == key):
return self.relpath
if ('repopath' == key):
return self.repopath
return None
def __setitem__(self, key, value):
if ('name' == key):
self.name = value
elif ('type' == key):
self.type = value
elif ('relpath' == key):
self.relpath = value
elif ('repopath' == key):
self.repopath = value
else:
raise KeyError()
UserData = collections.namedtuple('UserData', ('name', 'link'))
class LineString(str):
def __new__(cls, value, line):
self = str.__new__(cls, value)
self.line = line
return self
def lineValue(self):
return 'Line ' + str(self.line) + ': ' + str.__str__(self) if (self.line) else str.__str__(self)
class FileSource:
"""Object representing a file. Two FileSources are equal if they represent
the same file contents. It is recommended to use a SourceCache to generate
FileSources.
"""
def __init__(self, sourceTree, sourcepath, relpath, mimetype = None, data = None):
"""Init FileSource from source path. Give it relative path relpath.
`mimetype` should be the canonical MIME type for the file, if known.
If `mimetype` is None, guess type from file extension, defaulting to
the None key's value in extensionMap.
`data` if provided, is a the contents of the file. Otherwise the file is read
from disk.
"""
self.sourceTree = sourceTree
self.sourcepath = sourcepath
self.relpath = relpath
self.mimetype = mimetype or getMimeFromExt(sourcepath)
self._data = data
self.errors = None
self.encoding = 'utf-8'
self.refs = {}
self.scripts = {}
self.metadata = None
self.metaSource = None
def __eq__(self, other):
if not isinstance(other, FileSource):
return False
return self.sourcepath == other.sourcepath or \
filecmp.cmp(self.sourcepath, other.sourcepath)
def __ne__(self, other):
return not self == other
def __cmp__(self, other):
return cmp(self.name(), other.name())
def name(self):
return self.sourceTree.getAssetName(self.sourcepath)
def keyName(self):
if ('support' == self.type()):
return os.path.relpath(self.relpath, 'support')
return self.name()
def type(self):
return self.sourceTree.getAssetType(self.sourcepath)
def relativeURL(self, other):
return relativeURL(self.relpath, other.relpath)
def data(self):
"""Return file contents as a byte string."""
if (self._data is None):
self._data = open(self.sourcepath, 'r').read()
if (self._data.startswith(codecs.BOM_UTF8)):
self.encoding = 'utf-8-sig' # XXX look for other unicode BOMs
return self._data
def unicode(self):
try:
return self.data().decode(self.encoding)
except UnicodeDecodeError, e:
return None
def parse(self):
"""Parses and validates FileSource data from sourcepath."""
self.loadMetadata()
def validate(self):
"""Ensure data is loaded from sourcepath."""
self.parse()
def adjustContentPaths(self, format):
"""Adjust any paths in file content for output format
XXX need to account for group paths"""
if (self.refs):
seenRefs = {}
seenRefs[self.sourcepath] = '=='
def adjustReferences(source):
newRefs = {}
for refName in source.refs:
refType, refPath, refNode, refSource = source.refs[refName]
if refSource:
refPath = relativeURL(format.dest(self.relpath), format.dest(refSource.relpath))
if (refSource.sourcepath not in seenRefs):
seenRefs[refSource.sourcepath] = refType
adjustReferences(refSource)
else:
refPath = relativeURL(format.dest(self.relpath), format.dest(refPath))
if (refPath != refNode.get('href')):
refNode.set('href', refPath)
newRefs[refName] = (refType, refPath, refNode, refSource) # update path in metadata
source.refs = newRefs
adjustReferences(self)
if (self.scripts): # force testharness.js scripts to absolute path
for src in self.scripts:
if (src.endswith('/resources/testharness.js')): # accept relative paths to testharness.js
scriptNode = self.scripts[src]
scriptNode.set('src', '/resources/testharness.js')
elif (src.endswith('/resources/testharnessreport.js')):
scriptNode = self.scripts[src]
scriptNode.set('src', '/resources/testharnessreport.js')
def write(self, format):
"""Writes FileSource.data() out to `self.relpath` through Format `format`."""
data = self.data()
f = open(format.dest(self.relpath), 'w')
f.write(data)
if (self.metaSource):
self.metaSource.write(format) # XXX need to get output path from format, but not let it choose actual format
def compact(self):
"""Clears all cached data, preserves computed data."""
pass
def revision(self):
"""Returns hash of the contents of this file and any related file, references, support files, etc.
XXX also needs to account for .meta file
"""
sha = hashlib.sha1()
sha.update(self.data())
seenRefs = set(self.sourcepath)
def hashReference(source):
for refName in source.refs:
refSource = source.refs[refName][3]
if (refSource and (refSource.sourcepath not in seenRefs)):
sha.update(refSource.data())
seenRefs.add(refSource.sourcepath)
hashReference(refSource)
hashReference(self)
return sha.hexdigest()
def loadMetadata(self):
"""Look for .meta file and load any metadata from it if present
"""
pass
def augmentMetadata(self, next=None, prev=None, reference=None, notReference=None):
if (self.metaSource):
return self.metaSource.augmentMetadata(next, prev, reference, notReference)
return None
# See http://wiki.csswg.org/test/css2.1/format for more info on metadata
def getMetadata(self, asUnicode = False):
"""Return dictionary of test metadata. Stores list of errors
in self.errors if there are parse or metadata errors.
Data fields include:
- asserts [list of strings]
- credits [list of (name string, url string) tuples]
- reviewers [ list of (name string, url string) tuples]
- flags [list of token strings]
- links [list of url strings]
- name [string]
- title [string]
- references [list of ReferenceData per reference; None if not reftest]
- revision [revision id of last commit]
- selftest [bool]
- scripttest [bool]
Strings are given in ascii unless asUnicode==True.
"""
self.validate()
def encode(str):
return str if (hasattr(str, 'line')) else intern(str.encode('utf-8'))
def escape(str, andIntern = True):
return str.encode('utf-8') if asUnicode else intern(escapeToNamedASCII(str)) if andIntern else escapeToNamedASCII(str)
def listReferences(source, seen):
refGroups = []
for refType, refRelPath, refNode, refSource in source.refs.values():
if ('==' == refType):
if (refSource):
refSourcePath = refSource.sourcepath
else:
refSourcePath = os.path.normpath(join(basepath(source.sourcepath), refRelPath))
if (refSourcePath in seen):
continue
seen.add(refSourcePath)
if (refSource):
sourceData = ReferenceData(name = self.sourceTree.getAssetName(refSourcePath), type = refType,
relpath = refRelPath, repopath = refSourcePath)
if (refSource.refs):
subRefLists = listReferences(refSource, seen.copy())
if (subRefLists):
for subRefList in subRefLists:
refGroups.append([sourceData] + subRefList)
else:
refGroups.append([sourceData])
else:
refGroups.append([sourceData])
else:
sourceData = ReferenceData(name = self.sourceTree.getAssetName(refSourcePath), type = refType,
relpath = relativeURL(self.sourcepath, refSourcePath),
repopath = refSourcePath)
refGroups.append([sourceData])
notRefs = {}
for refType, refRelPath, refNode, refSource in source.refs.values():
if ('!=' == refType):
if (refSource):
refSourcePath = refSource.sourcepath
else:
refSourcePath = os.path.normpath(join(basepath(source.sourcepath), refRelPath))
if (refSourcePath in seen):
continue
seen.add(refSourcePath)
if (refSource):
sourceData = ReferenceData(name = self.sourceTree.getAssetName(refSourcePath), type = refType,
relpath = refRelPath, repopath = refSourcePath)
notRefs[sourceData.name] = sourceData
if (refSource.refs):
for subRefList in listReferences(refSource, seen):
for subRefData in subRefList:
notRefs[subRefData.name] = subRefData
else:
sourceData = ReferenceData(name = self.sourceTree.getAssetName(refSourcePath), type = refType,
relpath = relativeURL(self.sourcepath, refSourcePath),
repopath = refSourcePath)
notRefs[sourceData.name] = sourceData
if (notRefs):
for refData in notRefs.values():
refData.type = '!='
if (refGroups):
for refGroup in refGroups:
for notRef in notRefs.values():
for ref in refGroup:
if (ref.name == notRef.name):
break
else:
refGroup.append(notRef)
else:
refGroups.append(notRefs.values())
return refGroups
references = listReferences(self, set([self.sourcepath])) if (self.refs) else None
if (self.metadata):
data = Metadata(
name = encode(self.name()),
title = escape(self.metadata['title'], False),
asserts = [escape(assertion, False) for assertion in self.metadata['asserts']],
credits = [UserData(escape(name), encode(link)) for name, link in self.metadata['credits']],
reviewers = [UserData(escape(name), encode(link)) for name, link in self.metadata['reviewers']],
flags = [encode(flag) for flag in self.metadata['flags']],
links = [encode(link) for link in self.metadata['links']],
references = references,
revision = self.revision(),
selftest = self.isSelftest(),
scripttest = self.isScripttest()
)
return data
return None
def addReference(self, referenceSource, match = None):
"""Add reference source."""
self.validate()
refName = referenceSource.name()
refPath = self.relativeURL(referenceSource)
if refName not in self.refs:
node = None
if match == '==':
node = self.augmentMetadata(reference=referenceSource).reference
elif match == '!=':
node = self.augmentMetadata(notReference=referenceSource).notReference
self.refs[refName] = (match, refPath, node, referenceSource)
else:
node = self.refs[refName][2]
node.set('href', refPath)
if (match):
node.set('rel', 'mismatch' if ('!=' == match) else 'match')
else:
match = self.refs[refName][0]
self.refs[refName] = (match, refPath, node, referenceSource)
def getReferencePaths(self):
"""Get list of paths to references as tuple(path, relPath, refType)."""
self.validate()
return [(os.path.join(os.path.dirname(self.sourcepath), ref[1]),
os.path.join(os.path.dirname(self.relpath), ref[1]),
ref[0])
for ref in self.refs.values()]
def isTest(self):
self.validate()
return bool(self.metadata) and bool(self.metadata.get('links'))
def isReftest(self):
return self.isTest() and bool(self.refs)
def isSelftest(self):
return self.isTest() and (not bool(self.refs))
def isScripttest(self):
if (self.isTest() and self.scripts):
for src in self.scripts:
if (src.endswith('/resources/testharness.js')): # accept relative paths to testharness.js
return True
return False
def hasFlag(self, flag):
data = self.getMetadata()
if data:
return flag in data['flags']
return False
class ConfigSource(FileSource):
"""Object representing a text-based configuration file.
Capable of merging multiple config-file contents.
"""
def __init__(self, sourceTree, sourcepath, relpath, mimetype = None, data = None):
"""Init ConfigSource from source path. Give it relative path relpath.
"""
FileSource.__init__(self, sourceTree, sourcepath, relpath, mimetype, data)
self.sourcepath = [sourcepath]
def __eq__(self, other):
if not isinstance(other, ConfigSource):
return False
if self is other or self.sourcepath == other.sourcepath:
return True
if len(self.sourcepath) != len(other.sourcepath):
return False
for this, that in zip(self.sourcepath, other.sourcepath):
if not filecmp.cmp(this, that):
return False
return True
def __ne__(self, other):
return not self == other
def name(self):
return '.htaccess'
def type(self):
return intern('support')
def data(self):
"""Merge contents of all config files represented by this source."""
data = ''
for src in self.sourcepath:
data += open(src).read()
data += '\n'
return data
def getMetadata(self, asUnicode = False):
return None
def append(self, other):
"""Appends contents of ConfigSource `other` to this source.
Asserts if self.relpath != other.relpath.
"""
assert isinstance(other, ConfigSource)
assert self != other and self.relpath == other.relpath
self.sourcepath.extend(other.sourcepath)
class ReftestFilepathError(Exception):
pass
class ReftestManifest(ConfigSource):
"""Object representing a reftest manifest file.
Iterating the ReftestManifest returns (testpath, refpath) tuples
with paths relative to the manifest.
"""
def __init__(self, sourceTree, sourcepath, relpath, data = None):
"""Init ReftestManifest from source path. Give it relative path `relpath`
and load its .htaccess file.
"""
ConfigSource.__init__(self, sourceTree, sourcepath, relpath, mimetype = 'config/reftest', data = data)
def basepath(self):
"""Returns the base relpath of this reftest manifest path, i.e.
the parent of the manifest file.
"""
return basepath(self.relpath)
baseRE = re.compile(r'^#\s*relstrip\s+(\S+)\s*')
stripRE = re.compile(r'#.*')
parseRE = re.compile(r'^\s*([=!]=)\s*(\S+)\s+(\S+)')
def __iter__(self):
"""Parse the reftest manifest files represented by this ReftestManifest
and return path information about each reftest pair as
((test-sourcepath, ref-sourcepath), (test-relpath, ref-relpath), reftype)
Raises a ReftestFilepathError if any sources file do not exist or
if any relpaths point higher than the relpath root.
"""
striplist = []
for src in self.sourcepath:
relbase = basepath(self.relpath)
srcbase = basepath(src)
for line in open(src):
strip = self.baseRE.search(line)
if strip:
striplist.append(strip.group(1))
line = self.stripRE.sub('', line)
m = self.parseRE.search(line)
if m:
record = ((join(srcbase, m.group(2)), join(srcbase, m.group(3))), \
(join(relbase, m.group(2)), join(relbase, m.group(3))), \
m.group(1))
# for strip in striplist:
# strip relrecord
if not exists(record[0][0]):
raise ReftestFilepathError("Manifest Error in %s: "
"Reftest test file %s does not exist." \
% (src, record[0][0]))
elif not exists(record[0][1]):
raise ReftestFilepathError("Manifest Error in %s: "
"Reftest reference file %s does not exist." \
% (src, record[0][1]))
elif not isPathInsideBase(record[1][0]):
raise ReftestFilepathError("Manifest Error in %s: "
"Reftest test replath %s not within relpath root." \
% (src, record[1][0]))
elif not isPathInsideBase(record[1][1]):
raise ReftestFilepathError("Manifest Error in %s: "
"Reftest test replath %s not within relpath root." \
% (src, record[1][1]))
yield record
import Utils # set up XML catalog
xhtmlns = '{http://www.w3.org/1999/xhtml}'
svgns = '{http://www.w3.org/2000/svg}'
xmlns = '{http://www.w3.org/XML/1998/namespace}'
xlinkns = '{http://www.w3.org/1999/xlink}'
class XMLSource(FileSource):
"""FileSource object with support reading XML trees."""
NodeTuple = collections.namedtuple('NodeTuple', ['next', 'prev', 'reference', 'notReference'])
# Public Data
syntaxErrorDoc = \
u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Syntax Error</title></head>
<body>
<p>The XML file <![CDATA[%s]]> contains a syntax error and could not be parsed.
Please correct it and try again.</p>
<p>The parser's error report was:</p>
<pre><![CDATA[%s]]></pre>
</body>
</html>
"""
# Private Data and Methods
__parser = etree.XMLParser(no_network=True,
# perf nightmare dtd_validation=True,
remove_comments=False,
strip_cdata=False,
resolve_entities=False)
# Public Methods
def __init__(self, sourceTree, sourcepath, relpath, data = None):
"""Initialize XMLSource by loading from XML file `sourcepath`.
Parse errors are reported in `self.errors`,
and the source is replaced with an XHTML error message.
"""
FileSource.__init__(self, sourceTree, sourcepath, relpath, data = data)
self.tree = None
self.injectedTags = {}
def cacheAsParseError(self, filename, e):
"""Replace document with an error message."""
errorDoc = self.syntaxErrorDoc % (filename, e)
from StringIO import StringIO
self.tree = etree.parse(StringIO(errorDoc), parser=self.__parser)
def parse(self):
"""Parse file and store any parse errors in self.errors"""
self.errors = None
try:
data = self.data()
if (data):
self.tree = etree.parse(StringReader(data), parser=self.__parser)
self.encoding = self.tree.docinfo.encoding or 'utf-8'
self.injectedTags = {}
else:
self.tree = None
self.errors = ['Empty source file']
self.encoding = 'utf-8'
FileSource.loadMetadata(self)
if ((not self.metadata) and self.tree and (not self.errors)):
self.extractMetadata(self.tree)
except etree.ParseError as e:
print "PARSE ERROR: " + self.sourcepath
self.cacheAsParseError(self.sourcepath, e)
e.W3CTestLibErrorLocation = self.sourcepath
self.errors = [str(e)]
self.encoding = 'utf-8'
def validate(self):
"""Parse file if not parsed, and store any parse errors in self.errors"""
if self.tree is None:
self.parse()
def getMeatdataContainer(self):
return self.tree.getroot().find(xhtmlns+'head')
def injectMetadataLink(self, rel, href, tagCode = None):
"""Inject (prepend) <link> with data given inside metadata container.
Injected element is tagged with `tagCode`, which can be
used to clear it with clearInjectedTags later.
"""
self.validate()
container = self.getMeatdataContainer()
if (container):
node = etree.Element(xhtmlns+'link', {'rel': rel, 'href': href})
node.tail = container.text
container.insert(0, node)
self.injectedTags[node] = tagCode or True
return node
return None
def clearInjectedTags(self, tagCode = None):
"""Clears all injected elements from the tree, or clears injected
elements tagged with `tagCode` if `tagCode` is given.
"""
if not self.injectedTags or not self.tree: return
for node in self.injectedTags:
node.getparent().remove(node)
del self.injectedTags[node]
def serializeXML(self):
self.validate()
return etree.tounicode(self.tree)
def data(self):
if ((not self.tree) or (self.metaSource)):
return FileSource.data(self)
return self.serializeXML().encode(self.encoding, 'xmlcharrefreplace')
def unicode(self):
if ((not self.tree) or (self.metaSource)):
return FileSource.unicode(self)
return self.serializeXML()
def write(self, format, output=None):
"""Write Source through OutputFormat `format`.
Write contents as string `output` instead if specified.
"""
if not output:
output = self.unicode()
# write
f = open(format.dest(self.relpath), 'w')
f.write(output.encode(self.encoding, 'xmlcharrefreplace'))
f.close()
def compact(self):
self.tree = None
def getMetadataElements(self, tree):
container = self.getMeatdataContainer()
if (None != container):
return [node for node in container]
return None
def extractMetadata(self, tree):
"""Extract metadata from tree."""
links = []; credits = []; reviewers = []; flags = []; asserts = []; title = ''
def tokenMatch(token, string):
return bool(re.search('(^|\s+)%s($|\s+)' % token, string)) if (string) else False
errors = []
readFlags = False
metaElements = self.getMetadataElements(tree)
if (not metaElements):
errors.append("Missing <head> element")
else:
# Scan and cache metadata
for node in metaElements:
if (node.tag == xhtmlns+'link'):
# help links
if tokenMatch('help', node.get('rel')):
link = node.get('href').strip() if node.get('href') else None
if (not link):
errors.append(LineString("Help link missing href value.", node.sourceline))
elif (not (link.startswith('http://') or link.startswith('https://'))):
errors.append(LineString("Help link " + link.encode('utf-8') + " must be absolute URL.", node.sourceline))
elif (link in links):
errors.append(LineString("Duplicate help link " + link.encode('utf-8') + ".", node.sourceline))
else:
links.append(LineString(link, node.sourceline))
# == references
elif tokenMatch('match', node.get('rel')) or tokenMatch('reference', node.get('rel')):
refPath = node.get('href').strip() if node.get('href') else None
if (not refPath):
errors.append(LineString("Reference link missing href value.", node.sourceline))
else:
refName = self.sourceTree.getAssetName(join(self.sourcepath, refPath))
if (refName in self.refs):
errors.append(LineString("Reference " + refName.encode('utf-8') + " already specified.", node.sourceline))
else:
self.refs[refName] = ('==', refPath, node, None)
# != references
elif tokenMatch('mismatch', node.get('rel')) or tokenMatch('not-reference', node.get('rel')):
refPath = node.get('href').strip() if node.get('href') else None
if (not refPath):
errors.append(LineString("Reference link missing href value.", node.sourceline))
else:
refName = self.sourceTree.getAssetName(join(self.sourcepath, refPath))
if (refName in self.refs):
errors.append(LineString("Reference " + refName.encode('utf-8') + " already specified.", node.sourceline))
else:
self.refs[refName] = ('!=', refPath, node, None)
else: # may have both author and reviewer in the same link
# credits
if tokenMatch('author', node.get('rel')):
name = node.get('title')
name = name.strip() if name else name
if (not name):
errors.append(LineString("Author link missing name (title attribute).", node.sourceline))
else:
link = node.get('href').strip() if node.get('href') else None
if (not link):
errors.append(LineString("Author link for \"" + name.encode('utf-8') + "\" missing contact URL (http or mailto).", node.sourceline))
else:
credits.append((name, link))
# reviewers
if tokenMatch('reviewer', node.get('rel')):
name = node.get('title')
name = name.strip() if name else name
if (not name):
errors.append(LineString("Reviewer link missing name (title attribute).", node.sourceline))
else:
link = node.get('href').strip() if node.get('href') else None
if (not link):
errors.append(LineString("Reviewer link for \"" + name.encode('utf-8') + "\" missing contact URL (http or mailto).", node.sourceline))
else:
reviewers.append((name, link))
elif (node.tag == xhtmlns+'meta'):
metatype = node.get('name')
metatype = metatype.strip() if metatype else metatype
# requirement flags
if ('flags' == metatype):
if (readFlags):
errors.append(LineString("Flags must only be specified once.", node.sourceline))
else:
readFlags = True
if (None == node.get('content')):
errors.append(LineString("Flags meta missing content attribute.", node.sourceline))
else:
for flag in sorted(node.get('content').split()):
flags.append(flag)
# test assertions
elif ('assert' == metatype):
if (None == node.get('content')):
errors.append(LineString("Assert meta missing content attribute.", node.sourceline))
else:
asserts.append(node.get('content').strip().replace('\t', ' '))
# title
elif (node.tag == xhtmlns+'title'):
title = node.text.strip() if node.text else ''
match = re.match('(?:[^:]*)[tT]est(?:[^:]*):(.*)', title, re.DOTALL)
if (match):
title = match.group(1)
title = title.strip()
# script
elif (node.tag == xhtmlns+'script'):
src = node.get('src').strip() if node.get('src') else None
if (src):
self.scripts[src] = node
if (asserts or credits or reviewers or flags or links or title):
self.metadata = {'asserts' : asserts,
'credits' : credits,
'reviewers' : reviewers,
'flags' : flags,
'links' : links,
'title' : title
}
if (errors):
if (self.errors):
self.errors += errors
else:
self.errors = errors
def augmentMetadata(self, next=None, prev=None, reference=None, notReference=None):
"""Add extra useful metadata to the head. All arguments are optional.
* Adds next/prev links to next/prev Sources given
* Adds reference link to reference Source given
"""
self.validate()
if next:
next = self.injectMetadataLink('next', self.relativeURL(next), 'next')
if prev:
prev = self.injectMetadataLink('prev', self.relativeURL(prev), 'prev')
if reference:
reference = self.injectMetadataLink('match', self.relativeURL(reference), 'ref')
if notReference:
notReference = self.injectMetadataLink('mismatch', self.relativeURL(notReference), 'not-ref')
return self.NodeTuple(next, prev, reference, notReference)
class XHTMLSource(XMLSource):
"""FileSource object with support for XHTML->HTML conversions."""
# Public Methods
def __init__(self, sourceTree, sourcepath, relpath, data = None):
"""Initialize XHTMLSource by loading from XHTML file `sourcepath`.
Parse errors are stored in `self.errors`,
and the source is replaced with an XHTML error message.
"""
XMLSource.__init__(self, sourceTree, sourcepath, relpath, data = data)
def serializeXHTML(self, doctype = None):
return self.serializeXML()
def serializeHTML(self, doctype = None):
self.validate()
# Serialize
# print self.relpath
serializer = HTMLSerializer.HTMLSerializer()
output = serializer.serializeHTML(self.tree, doctype)
return output
class SVGSource(XMLSource):
"""FileSource object with support for extracting metadata from SVG."""
def __init__(self, sourceTree, sourcepath, relpath, data = None):
"""Initialize SVGSource by loading from SVG file `sourcepath`.
Parse errors are stored in `self.errors`,
and the source is replaced with an XHTML error message.
"""
XMLSource.__init__(self, sourceTree, sourcepath, relpath, data = data)
def getMeatdataContainer(self):
groups = self.tree.getroot().findall(svgns+'g')
for group in groups:
if ('testmeta' == group.get('id')):
return group
return None
def extractMetadata(self, tree):
"""Extract metadata from tree."""
links = []; credits = []; reviewers = []; flags = []; asserts = []; title = ''
def tokenMatch(token, string):
return bool(re.search('(^|\s+)%s($|\s+)' % token, string)) if (string) else False
errors = []
readFlags = False
metaElements = self.getMetadataElements(tree)
if (not metaElements):
errors.append("Missing <g id='testmeta'> element")
else:
# Scan and cache metadata
for node in metaElements:
if (node.tag == xhtmlns+'link'):
# help links
if tokenMatch('help', node.get('rel')):
link = node.get('href').strip() if node.get('href') else None
if (not link):
errors.append(LineString("Help link missing href value.", node.sourceline))
elif (not (link.startswith('http://') or link.startswith('https://'))):
errors.append(LineString("Help link " + link.encode('utf-8') + " must be absolute URL.", node.sourceline))
elif (link in links):
errors.append(LineString("Duplicate help link " + link.encode('utf-8') + ".", node.sourceline))
else:
links.append(LineString(link, node.sourceline))
# == references
elif tokenMatch('match', node.get('rel')) or tokenMatch('reference', node.get('rel')):
refPath = node.get('href').strip() if node.get('href') else None
if (not refPath):
errors.append(LineString("Reference link missing href value.", node.sourceline))
else:
refName = self.sourceTree.getAssetName(join(self.sourcepath, refPath))
if (refName in self.refs):
errors.append(LineString("Reference " + refName.encode('utf-8') + " already specified.", node.sourceline))
else:
self.refs[refName] = ('==', refPath, node, None)
# != references
elif tokenMatch('mismatch', node.get('rel')) or tokenMatch('not-reference', node.get('rel')):
refPath = node.get('href').strip() if node.get('href') else None
if (not refPath):
errors.append(LineString("Reference link missing href value.", node.sourceline))
else:
refName = self.sourceTree.getAssetName(join(self.sourcepath, refPath))
if (refName in self.refs):
errors.append(LineString("Reference " + refName.encode('utf-8') + " already specified.", node.sourceline))
else:
self.refs[refName] = ('!=', refPath, node, None)
else: # may have both author and reviewer in the same link
# credits
if tokenMatch('author', node.get('rel')):
name = node.get('title')
name = name.strip() if name else name
if (not name):
errors.append(LineString("Author link missing name (title attribute).", node.sourceline))
else:
link = node.get('href').strip() if node.get('href') else None
if (not link):
errors.append(LineString("Author link for \"" + name.encode('utf-8') + "\" missing contact URL (http or mailto).", node.sourceline))
else:
credits.append((name, link))
# reviewers
if tokenMatch('reviewer', node.get('rel')):
name = node.get('title')
name = name.strip() if name else name
if (not name):
errors.append(LineString("Reviewer link missing name (title attribute).", node.sourceline))
else:
link = node.get('href').strip() if node.get('href') else None
if (not link):
errors.append(LineString("Reviewer link for \"" + name.encode('utf-8') + "\" missing contact URL (http or mailto).", node.sourceline))
else:
reviewers.append((name, link))
elif (node.tag == svgns+'metadata'):
metatype = node.get('class')
metatype = metatype.strip() if metatype else metatype
# requirement flags
if ('flags' == metatype):
if (readFlags):
errors.append(LineString("Flags must only be specified once.", node.sourceline))
else:
readFlags = True
text = node.find(svgns+'text')
flagString = text.text if (text) else node.text
if (flagString):
for flag in sorted(flagString.split()):
flags.append(flag)
elif (node.tag == svgns+'desc'):
metatype = node.get('class')
metatype = metatype.strip() if metatype else metatype
# test assertions
if ('assert' == metatype):
asserts.append(node.text.strip().replace('\t', ' '))
# test title
elif node.tag == svgns+'title':
title = node.text.strip() if node.text else ''
match = re.match('(?:[^:]*)[tT]est(?:[^:]*):(.*)', title, re.DOTALL)
if (match):
title = match.group(1)
title = title.strip()
# script tag (XXX restricted to metadata container?)
elif (node.tag == svgns+'script'):
src = node.get('src').strip() if node.get('src') else None
if (src):
self.scripts[src] = node
if (asserts or credits or reviewers or flags or links or title):
self.metadata = {'asserts' : asserts,
'credits' : credits,
'reviewers' : reviewers,
'flags' : flags,
'links' : links,
'title' : title
}
if (errors):
if (self.errors):
self.errors += errors
else:
self.errors = errors
class HTMLSource(XMLSource):
"""FileSource object with support for HTML metadata and HTML->XHTML conversions (untested)."""
# Private Data and Methods
__parser = html5lib.HTMLParser(tree = treebuilders.getTreeBuilder('lxml'))
# Public Methods
def __init__(self, sourceTree, sourcepath, relpath, data = None):
"""Initialize HTMLSource by loading from HTML file `sourcepath`.
"""
XMLSource.__init__(self, sourceTree, sourcepath, relpath, data = data)
def parse(self):
"""Parse file and store any parse errors in self.errors"""
self.errors = None
try:
data = self.data()
if data:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
htmlStream = html5lib.inputstream.HTMLInputStream(data)
if ('utf-8-sig' != self.encoding): # if we found a BOM, respect it
self.encoding = htmlStream.detectEncoding()[0]
self.tree = self.__parser.parse(data, encoding = self.encoding)
self.injectedTags = {}
else:
self.tree = None
self.errors = ['Empty source file']
self.encoding = 'utf-8'
FileSource.loadMetadata(self)
if ((not self.metadata) and self.tree and (not self.errors)):
self.extractMetadata(self.tree)
except Exception as e:
print "PARSE ERROR: " + self.sourcepath
e.W3CTestLibErrorLocation = self.sourcepath
self.errors = [str(e)]
self.encoding = 'utf-8'
def _injectXLinks(self, element, nodeList):
injected = False
xlinkAttrs = ['href', 'type', 'role', 'arcrole', 'title', 'show', 'actuate']
if (element.get('href') or element.get(xlinkns + 'href')):
for attr in xlinkAttrs:
if (element.get(xlinkns + attr)):
injected = True
if (element.get(attr)):
injected = True
value = element.get(attr)
del element.attrib[attr]
element.set(xlinkns + attr, value)
nodeList.append((element, xlinkns + attr, attr))
for child in element:
if (type(child.tag) == type('')): # element node
qName = etree.QName(child.tag)
if ('foreignobject' != qName.localname.lower()):
injected |= self._injectXLinks(child, nodeList)
return injected
def _findElements(self, namespace, elementName):
elements = self.tree.findall('.//{' + namespace + '}' + elementName)
if (self.tree.getroot().tag == '{' + namespace + '}' + elementName):
elements.insert(0, self.tree.getroot())
return elements
def _injectNamespace(self, elementName, prefix, namespace, doXLinks, nodeList):
attr = xmlns + prefix if (prefix) else 'xmlns'
elements = self._findElements(namespace, elementName)
for element in elements:
if not element.get(attr):
element.set(attr, namespace)
nodeList.append((element, attr, None))
if (doXLinks):
if (self._injectXLinks(element, nodeList)):
element.set(xmlns + 'xlink', 'http://www.w3.org/1999/xlink')
nodeList.append((element, xmlns + 'xlink', None))
def injectNamespaces(self):
nodeList = []
self._injectNamespace('html', None, 'http://www.w3.org/1999/xhtml', False, nodeList)
self._injectNamespace('svg', None, 'http://www.w3.org/2000/svg', True, nodeList)
self._injectNamespace('math', None, 'http://www.w3.org/1998/Math/MathML', True, nodeList)
return nodeList
def removeNamespaces(self, nodeList):
if nodeList:
for element, attr, oldAttr in nodeList:
if (oldAttr):
value = element.get(attr)
del element.attrib[attr]
element.set(oldAttr, value)
else:
del element.attrib[attr]
def serializeXHTML(self, doctype = None):
self.validate()
# Serialize
nodeList = self.injectNamespaces()
# print self.relpath
serializer = HTMLSerializer.HTMLSerializer()
o = serializer.serializeXHTML(self.tree, doctype)
self.removeNamespaces(nodeList)
return o
def serializeHTML(self, doctype = None):
self.validate()
# Serialize
# print self.relpath
serializer = HTMLSerializer.HTMLSerializer()
o = serializer.serializeHTML(self.tree, doctype)
return o
def data(self):
if ((not self.tree) or (self.metaSource)):
return FileSource.data(self)
return self.serializeHTML().encode(self.encoding, 'xmlcharrefreplace')
def unicode(self):
if ((not self.tree) or (self.metaSource)):
return FileSource.unicode(self)
return self.serializeHTML()
|
mpl-2.0
|
crosswalk-project/blink-crosswalk
|
Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py
|
680
|
3709
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
|
bsd-3-clause
|
atzengin/OCC
|
oc-utils/python/modtool/oc-newmod/docs/doxygen/doxyxml/generated/compound.py
|
344
|
20296
|
#!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from string import lower as str_lower
from xml.dom import minidom
from xml.dom import Node
import sys
import compoundsuper as supermod
from compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
def find(self, details):
return self.compounddef.find(details)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
def find(self, details):
if self.id == details.refid:
return self
for sectiondef in self.sectiondef:
result = sectiondef.find(details)
if result:
return result
supermod.compounddefType.subclass = compounddefTypeSub
# end class compounddefTypeSub
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
class incTypeSub(supermod.incType):
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
supermod.incType.subclass = incTypeSub
# end class incTypeSub
class refTypeSub(supermod.refType):
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
supermod.refType.subclass = refTypeSub
# end class refTypeSub
class refTextTypeSub(supermod.refTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header='', description=None, memberdef=None):
supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
def find(self, details):
for memberdef in self.memberdef:
if memberdef.id == details.refid:
return memberdef
return None
supermod.sectiondefType.subclass = sectiondefTypeSub
# end class sectiondefTypeSub
class memberdefTypeSub(supermod.memberdefType):
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
class descriptionTypeSub(supermod.descriptionType):
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
class referenceTypeSub(supermod.referenceType):
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
class docSect2TypeSub(supermod.docSect2Type):
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
class docSect3TypeSub(supermod.docSect3Type):
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
class docSect4TypeSub(supermod.docSect4Type):
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
class docImageTypeSub(supermod.docImageType):
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
self.parameterlist = []
self.simplesects = []
self.content = []
def buildChildren(self, child_, nodeName_):
supermod.docParaType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == "ref":
obj_ = supermod.docRefTextType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameterlist':
obj_ = supermod.docParamListType.factory()
obj_.build(child_)
self.parameterlist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'simplesect':
obj_ = supermod.docSimpleSectType.factory()
obj_.build(child_)
self.simplesects.append(obj_)
supermod.docParaType.subclass = docParaTypeSub
# end class docParaTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
|
gpl-3.0
|
dparks1134/RefineM
|
refinem/__init__.py
|
1
|
1495
|
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
def version():
"""Read program version from file."""
import refinem
version_file = open(os.path.join(__path__[0], 'VERSION'))
return version_file.readline().strip()
|
gpl-3.0
|
kittiu/odoo
|
addons/mail/tests/__init__.py
|
261
|
1173
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail_group, test_mail_message, test_mail_features, test_mail_gateway, test_message_read, test_invite
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
alajara/servo
|
tests/wpt/web-platform-tests/tools/py/py/_apipkg.py
|
210
|
5855
|
"""
apipkg: control the exported namespace of a python package.
see http://pypi.python.org/pypi/apipkg
(c) holger krekel, 2009 - MIT license
"""
import os
import sys
from types import ModuleType
__version__ = '1.3.dev'
def _py_abspath(path):
"""
special version of abspath
that will leave paths from jython jars alone
"""
if path.startswith('__pyclasspath__'):
return path
else:
return os.path.abspath(path)
def initpkg(pkgname, exportdefs, attr=dict()):
""" initialize given package from the export definitions. """
oldmod = sys.modules.get(pkgname)
d = {}
f = getattr(oldmod, '__file__', None)
if f:
f = _py_abspath(f)
d['__file__'] = f
if hasattr(oldmod, '__version__'):
d['__version__'] = oldmod.__version__
if hasattr(oldmod, '__loader__'):
d['__loader__'] = oldmod.__loader__
if hasattr(oldmod, '__path__'):
d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
d['__doc__'] = oldmod.__doc__
d.update(attr)
if hasattr(oldmod, "__dict__"):
oldmod.__dict__.update(d)
mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
sys.modules[pkgname] = mod
def importobj(modpath, attrname):
module = __import__(modpath, None, None, ['__doc__'])
if not attrname:
return module
retval = module
names = attrname.split(".")
for x in names:
retval = getattr(retval, x)
return retval
class ApiModule(ModuleType):
def __docget(self):
try:
return self.__doc
except AttributeError:
if '__doc__' in self.__map__:
return self.__makeattr('__doc__')
def __docset(self, value):
self.__doc = value
__doc__ = property(__docget, __docset)
def __init__(self, name, importspec, implprefix=None, attr=None):
self.__name__ = name
self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
self.__map__ = {}
self.__implprefix__ = implprefix or name
if attr:
for name, val in attr.items():
# print "setting", self.__name__, name, val
setattr(self, name, val)
for name, importspec in importspec.items():
if isinstance(importspec, dict):
subname = '%s.%s' % (self.__name__, name)
apimod = ApiModule(subname, importspec, implprefix)
sys.modules[subname] = apimod
setattr(self, name, apimod)
else:
parts = importspec.split(':')
modpath = parts.pop(0)
attrname = parts and parts[0] or ""
if modpath[0] == '.':
modpath = implprefix + modpath
if not attrname:
subname = '%s.%s' % (self.__name__, name)
apimod = AliasModule(subname, modpath)
sys.modules[subname] = apimod
if '.' not in name:
setattr(self, name, apimod)
else:
self.__map__[name] = (modpath, attrname)
def __repr__(self):
l = []
if hasattr(self, '__version__'):
l.append("version=" + repr(self.__version__))
if hasattr(self, '__file__'):
l.append('from ' + repr(self.__file__))
if l:
return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
return '<ApiModule %r>' % (self.__name__,)
def __makeattr(self, name):
"""lazily compute value for name or raise AttributeError if unknown."""
# print "makeattr", self.__name__, name
target = None
if '__onfirstaccess__' in self.__map__:
target = self.__map__.pop('__onfirstaccess__')
importobj(*target)()
try:
modpath, attrname = self.__map__[name]
except KeyError:
if target is not None and name != '__onfirstaccess__':
# retry, onfirstaccess might have set attrs
return getattr(self, name)
raise AttributeError(name)
else:
result = importobj(modpath, attrname)
setattr(self, name, result)
try:
del self.__map__[name]
except KeyError:
pass # in a recursive-import situation a double-del can happen
return result
__getattr__ = __makeattr
def __dict__(self):
# force all the content of the module to be loaded when __dict__ is read
dictdescr = ModuleType.__dict__['__dict__']
dict = dictdescr.__get__(self)
if dict is not None:
hasattr(self, 'some')
for name in self.__all__:
try:
self.__makeattr(name)
except AttributeError:
pass
return dict
__dict__ = property(__dict__)
def AliasModule(modname, modpath, attrname=None):
mod = []
def getmod():
if not mod:
x = importobj(modpath, None)
if attrname is not None:
x = getattr(x, attrname)
mod.append(x)
return mod[0]
class AliasModule(ModuleType):
def __repr__(self):
x = modpath
if attrname:
x += "." + attrname
return '<AliasModule %r for %r>' % (modname, x)
def __getattribute__(self, name):
try:
return getattr(getmod(), name)
except ImportError:
return None
def __setattr__(self, name, value):
setattr(getmod(), name, value)
def __delattr__(self, name):
delattr(getmod(), name)
return AliasModule(str(modname))
|
mpl-2.0
|
keto/askbot-devel
|
askbot/search/haystack/searchquery.py
|
9
|
2024
|
from askbot.models import Thread, User
from haystack.query import SearchQuerySet
class AskbotSearchQuerySet(SearchQuerySet):
def _determine_backend(self):
'''This is a hack somehow connection_router got wrong values
from setting and did not loaded the LanguageRouter'''
from haystack import connections, connection_router
# A backend has been manually selected. Use it instead.
if self._using is not None:
self.query = connections[self._using].get_query()
return
# No backend, so rely on the routers to figure out what's right.
hints = {}
if self.query:
hints['models'] = self.query.models
backend_alias = connection_router.for_read(**hints)
if isinstance(backend_alias, (list, tuple)) and len(backend_alias):
# We can only effectively read from one engine.
backend_alias = backend_alias[0]
# The ``SearchQuery`` might swap itself out for a different variant
# here.
if self.query:
self.query = self.query.using(backend_alias)
else:
self.query = connections[backend_alias].get_query()
def get_django_queryset(self, model_klass=Thread):
'''dirty hack because models() method from the
SearchQuerySet does not work </3'''
id_list = []
for r in self:
if r.model_name in ['thread','post'] \
and model_klass._meta.object_name.lower() == 'thread':
if getattr(r, 'thread_id'):
id_list.append(r.thread_id)
else:
id_list.append(r.pk)
elif r.model_name == model_klass._meta.object_name.lower():
#FIXME: add a highlight here?
id_list.append(r.pk)
if model_klass == User:
return model_klass.objects.filter(id__in=set(id_list))
else:
return model_klass.objects.filter(id__in=set(id_list), deleted=False)
|
gpl-3.0
|
gannetson/django
|
django/contrib/admin/widgets.py
|
345
|
14769
|
"""
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.db.models.deletion import CASCADE
from django.forms.utils import flatatt
from django.forms.widgets import RadioFieldRenderer
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import (
escape, escapejs, format_html, format_html_join, smart_urlquote,
)
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s); });</script>\n'
% (name, escapejs(self.verbose_name), int(self.is_stacked)))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{} {}<br />{} {}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{}>\n{}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.model
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" title="%s"></a>' %
(related_url, url, name, _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.model.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template = 'admin/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'widget': self.widget.render(name, value, *args, **kwargs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return mark_safe(render_to_string(self.template, context))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{} <a{}>{}</a><br />{} {}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
|
bsd-3-clause
|
hynnet/hiwifi-openwrt-HC5661-HC5761
|
staging_dir/host/lib64/scons-2.1.0/SCons/Tool/packaging/targz.py
|
21
|
1806
|
"""SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/targz.py 5357 2011/09/09 21:31:03 bdeegan"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source, TARFLAGS='-zc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
TeamTwisted/external_chromium_org
|
tools/site_compare/scrapers/chrome/chrome011010.py
|
189
|
1183
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for versions of Chrome from 0.1.101.0 up."""
from drivers import windowing
import chromebase
# Default version
version = "0.1.101.0"
def GetChromeRenderPane(wnd):
return windowing.FindChildWindow(wnd, "Chrome_TabContents")
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if succeeded, else an error code
"""
chromebase.GetChromeRenderPane = GetChromeRenderPane
return chromebase.Scrape(urls, outdir, size, pos, timeout, kwargs)
def Time(urls, size, timeout, **kwargs):
"""Forwards the Time command to chromebase."""
chromebase.GetChromeRenderPane = GetChromeRenderPane
return chromebase.Time(urls, size, timeout, kwargs)
|
bsd-3-clause
|
gdimitris/ChessPuzzlerBackend
|
Virtual_Environment/lib/python2.7/site-packages/pip/operations/freeze.py
|
284
|
3984
|
from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
find_tags=False,
default_vcs=None,
isolated=False,
wheel_cache=None):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links,
find_tags=find_tags,
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match.search(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip()
|
mit
|
AdamStelmaszczyk/pyechonest
|
pyechonest/util.py
|
1
|
12016
|
#!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-04-25.
Utility functions to support the Echo Nest web API interface.
"""
import urllib
import urllib2
import httplib
import config
import logging
import socket
import re
import time
import os
import subprocess
import traceback
from types import StringType, UnicodeType
try:
import json
except ImportError:
import simplejson as json
logger = logging.getLogger(__name__)
TYPENAMES = (
('AR', 'artist'),
('SO', 'song'),
('RE', 'release'),
('TR', 'track'),
('PE', 'person'),
('DE', 'device'),
('LI', 'listener'),
('ED', 'editor'),
('TW', 'tweditor'),
('CA', 'catalog'),
)
foreign_regex = re.compile(r'^.+?:(%s):([^^]+)\^?([0-9\.]+)?' % r'|'.join(n[1] for n in TYPENAMES))
short_regex = re.compile(r'^((%s)[0-9A-Z]{16})\^?([0-9\.]+)?' % r'|'.join(n[0] for n in TYPENAMES))
long_regex = re.compile(r'music://id.echonest.com/.+?/(%s)/(%s)[0-9A-Z]{16}\^?([0-9\.]+)?' % (r'|'.join(n[0] for n in TYPENAMES), r'|'.join(n[0] for n in TYPENAMES)))
headers = [('User-Agent', 'Pyechonest %s' % (config.__version__,))]
class MyBaseHandler(urllib2.BaseHandler):
def default_open(self, request):
if config.TRACE_API_CALLS:
logger.info("%s" % (request.get_full_url(),))
request.start_time = time.time()
return None
class MyErrorProcessor(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
code = response.code
if config.TRACE_API_CALLS:
logger.info("took %2.2fs: (%i)" % (time.time()-request.start_time,code))
if code/100 in (2, 4, 5):
return response
else:
urllib2.HTTPErrorProcessor.http_response(self, request, response)
opener = urllib2.build_opener(MyBaseHandler(), MyErrorProcessor())
opener.addheaders = headers
class EchoNestException(Exception):
"""
Parent exception class. Catches API and URL/HTTP errors.
"""
def __init__(self, code, message, headers):
if code is None:
code = -1
message = 'Echo Nest Unknown Error'
if message is None:
super(EchoNestException, self).__init__('Echo Nest Error %d' % code,)
else:
super(EchoNestException, self).__init__(message,)
self.headers = headers
self.code = code
class EchoNestAPIError(EchoNestException):
"""
API Specific Errors.
"""
def __init__(self, code, message, headers, http_status):
if http_status:
http_status_message_part = ' [HTTP %d]' % http_status
else:
http_status_message_part = ''
self.http_status = http_status
formatted_message = ('Echo Nest API Error %d: %s%s' %
(code, message, http_status_message_part),)
super(EchoNestAPIError, self).__init__(code, formatted_message, headers)
class EchoNestIOError(EchoNestException):
"""
URL and HTTP errors.
"""
def __init__(self, code=None, error=None, headers=headers):
formatted_message = ('Echo Nest IOError: %s' % headers,)
super(EchoNestIOError, self).__init__(code, formatted_message, headers)
def get_successful_response(raw_json):
if hasattr(raw_json, 'headers'):
headers = raw_json.headers
else:
headers = {'Headers':'No Headers'}
if hasattr(raw_json, 'getcode'):
http_status = raw_json.getcode()
else:
http_status = None
raw_json = raw_json.read()
try:
response_dict = json.loads(raw_json)
status_dict = response_dict['response']['status']
code = int(status_dict['code'])
message = status_dict['message']
if (code != 0):
# do some cute exception handling
raise EchoNestAPIError(code, message, headers, http_status)
del response_dict['response']['status']
return response_dict
except ValueError:
logger.debug(traceback.format_exc())
raise EchoNestAPIError(-1, "Unknown error.", headers, http_status)
# These two functions are to deal with the unknown encoded output of codegen (varies by platform and ID3 tag)
def reallyunicode(s, encoding="utf-8"):
if type(s) is StringType:
for args in ((encoding,), ('utf-8',), ('latin-1',), ('ascii', 'replace')):
try:
s = s.decode(*args)
break
except UnicodeDecodeError:
continue
if type(s) is not UnicodeType:
raise ValueError, "%s is not a string at all." % s
return s
def reallyUTF8(s):
return reallyunicode(s).encode("utf-8")
def codegen(filename, start=0, duration=30):
# Run codegen on the file and return the json. If start or duration is -1 ignore them.
cmd = config.CODEGEN_BINARY_OVERRIDE
if not cmd:
# Is this is posix platform, or is it windows?
if hasattr(os, 'uname'):
if(os.uname()[0] == "Darwin"):
cmd = "codegen.Darwin"
else:
cmd = 'codegen.'+os.uname()[0]+'-'+os.uname()[4]
else:
cmd = "codegen.windows.exe"
if not os.path.exists(cmd):
raise Exception("Codegen binary not found.")
command = cmd + " \"" + filename + "\" "
if start >= 0:
command = command + str(start) + " "
if duration >= 0:
command = command + str(duration)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(json_block, errs) = p.communicate()
json_block = reallyUTF8(json_block)
try:
return json.loads(json_block)
except ValueError:
logger.debug("No JSON object came out of codegen: error was %s" % (errs))
return None
def callm(method, param_dict, POST=False, socket_timeout=None, data=None):
"""
Call the api!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
"""
try:
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
elif val is not None:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
if(POST):
if (not method == 'track/upload') or ((method == 'track/upload') and 'url' in param_dict):
"""
this is a normal POST call
"""
url = 'http://%s/%s/%s/%s' % (config.API_HOST, config.API_SELECTOR,
config.API_VERSION, method)
if data is None:
data = ''
data = urllib.urlencode(data)
data = "&".join([data, params])
f = opener.open(url, data=data)
else:
"""
upload with a local file is special, as the body of the request is the content of the file,
and the other parameters stay on the URL
"""
url = '/%s/%s/%s?%s' % (config.API_SELECTOR, config.API_VERSION,
method, params)
if ':' in config.API_HOST:
host, port = config.API_HOST.split(':')
else:
host = config.API_HOST
port = 80
if config.TRACE_API_CALLS:
logger.info("%s/%s" % (host+':'+str(port), url,))
conn = httplib.HTTPConnection(host, port = port)
conn.request('POST', url, body = data, headers = dict([('Content-Type', 'application/octet-stream')]+headers))
f = conn.getresponse()
else:
"""
just a normal GET call
"""
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
f = opener.open(url)
socket.setdefaulttimeout(orig_timeout)
# try/except
response_dict = get_successful_response(f)
return response_dict
except IOError, e:
if hasattr(e, 'reason'):
raise EchoNestIOError(error=e.reason)
elif hasattr(e, 'code'):
raise EchoNestIOError(code=e.code)
else:
raise
def oauthgetm(method, param_dict, socket_timeout=None):
try:
import oauth2 # lazy import this so oauth2 is not a hard dep
except ImportError:
raise Exception("You must install the python-oauth2 library to use this method.")
"""
Call the api! With Oauth!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
"""
def build_request(url):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': int(time.time())
}
consumer = oauth2.Consumer(key=config.ECHO_NEST_CONSUMER_KEY, secret=config.ECHO_NEST_SHARED_SECRET)
params['oauth_consumer_key'] = config.ECHO_NEST_CONSUMER_KEY
req = oauth2.Request(method='GET', url=url, parameters=params)
signature_method = oauth2.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, consumer, None)
return req
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
elif val is not None:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
"""
just a normal GET call
"""
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
req = build_request(url)
f = opener.open(req.to_url())
socket.setdefaulttimeout(orig_timeout)
# try/except
response_dict = get_successful_response(f)
return response_dict
def postChunked(host, selector, fields, files):
"""
Attempt to replace postMultipart() with nearly-identical interface.
(The files tuple no longer requires the filename, and we only return
the response body.)
Uses the urllib2_file.py originally from
http://fabien.seisen.org which was also drawn heavily from
http://code.activestate.com/recipes/146306/ .
This urllib2_file.py is more desirable because of the chunked
uploading from a file pointer (no need to read entire file into
memory) and the ability to work from behind a proxy (due to its
basis on urllib2).
"""
params = urllib.urlencode(fields)
url = 'http://%s%s?%s' % (host, selector, params)
u = urllib2.urlopen(url, files)
result = u.read()
[fp.close() for (key, fp) in files]
return result
def fix(x):
# we need this to fix up all the dict keys to be strings, not unicode objects
assert(isinstance(x,dict))
return dict((str(k), v) for (k,v) in x.iteritems())
|
bsd-3-clause
|
schalkneethling/snippets-service
|
snippets/base/middleware.py
|
2
|
1348
|
from django.conf import settings
from django.core.urlresolvers import Resolver404, resolve
from snippets.base.views import fetch_json_snippets, fetch_snippets
class FetchSnippetsMiddleware(object):
"""
If the incoming request is for the fetch_snippets view, execute the view
and return it before other middleware can run.
fetch_snippets is a very very basic view that doesn't need any of the
middleware that the rest of the site needs, such as the session or csrf
middlewares. To avoid unintended issues (such as headers we don't want
being added to the response) this middleware detects requests to that view
and executes the view early, bypassing the rest of the middleware.
"""
def process_request(self, request):
try:
result = resolve(request.path)
except Resolver404:
return
if result.func in (fetch_snippets, fetch_json_snippets):
return result.func(request, *result.args, **result.kwargs)
class HostnameMiddleware(object):
def __init__(self):
values = [getattr(settings, x) for x in ['HOSTNAME', 'DEIS_APP', 'DEIS_DOMAIN']]
self.backend_server = '.'.join(x for x in values if x)
def process_response(self, request, response):
response['X-Backend-Server'] = self.backend_server
return response
|
mpl-2.0
|
tboyce021/home-assistant
|
homeassistant/components/goalzero/config_flow.py
|
9
|
2661
|
"""Config flow for Goal Zero Yeti integration."""
import logging
from goalzero import Yeti, exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DEFAULT_NAME, DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({"host": str, "name": str})
class GoalZeroFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Goal Zero Yeti."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
name = user_input[CONF_NAME]
if await self._async_endpoint_existed(host):
return self.async_abort(reason="already_configured")
try:
await self._async_try_connect(host)
except exceptions.ConnectError:
errors["base"] = "cannot_connect"
_LOGGER.error("Error connecting to device at %s", host)
except exceptions.InvalidHost:
errors["base"] = "invalid_host"
_LOGGER.error("Invalid host at %s", host)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(
title=name,
data={CONF_HOST: host, CONF_NAME: name},
)
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_HOST, default=user_input.get(CONF_HOST) or ""
): str,
vol.Optional(
CONF_NAME, default=user_input.get(CONF_NAME) or DEFAULT_NAME
): str,
}
),
errors=errors,
)
async def _async_endpoint_existed(self, endpoint):
for entry in self._async_current_entries():
if endpoint == entry.data.get(CONF_HOST):
return True
return False
async def _async_try_connect(self, host):
session = async_get_clientsession(self.hass)
api = Yeti(host, self.hass.loop, session)
await api.get_state()
|
apache-2.0
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-dns/setup.py
|
1
|
2805
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-dns"
PACKAGE_PPRINT_NAME = "DNS Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=["tests"]),
install_requires=[
'msrestazure~=0.4.11',
'azure-common~=1.1',
],
cmdclass=cmdclass
)
|
mit
|
daliwangi/bitcoin
|
test/functional/proxy_test.py
|
19
|
8415
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
mit
|
swannapa/erpnext
|
erpnext/hr/doctype/expense_claim_type/expense_claim_type.py
|
41
|
1031
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class ExpenseClaimType(Document):
def validate(self):
self.validate_accounts()
self.validate_repeating_companies()
def validate_repeating_companies(self):
"""Error when Same Company is entered multiple times in accounts"""
accounts_list = []
for entry in self.accounts:
accounts_list.append(entry.company)
if len(accounts_list)!= len(set(accounts_list)):
frappe.throw(_("Same Company is entered more than once"))
def validate_accounts(self):
for entry in self.accounts:
"""Error when Company of Ledger account doesn't match with Company Selected"""
if frappe.db.get_value("Account", entry.default_account, "company") != entry.company:
frappe.throw(_("Account {0} does not match with Company {1}"
).format(entry.default_account, entry.company))
|
gpl-3.0
|
pombredanne/commons
|
tests/python/twitter/common/http/test_building.py
|
13
|
3734
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import functools
import wsgiref.util
from twitter.common.http import HttpServer
import pytest
skipifpy3k = pytest.mark.skipif('sys.version_info >= (3,0)')
def make_request(path):
test_req = {'REQUEST_METHOD': 'GET', 'PATH_INFO': path}
wsgiref.util.setup_testing_defaults(test_req)
return test_req
def response_asserter(intended, status, headers):
assert int(status.split()[0]) == intended
# TODO(wickman) Fix bind method delegation in py3x. It's currently brittle
# and the new module might actually allow for binding in this fashion now.
@skipifpy3k
def test_basic_server_method_binding():
class MyServer(HttpServer):
def __init__(self):
HttpServer.__init__(self)
@HttpServer.route("/hello")
@HttpServer.route("/hello/:first")
@HttpServer.route("/hello/:first/:last")
def hello(self, first = 'Zaphod', last = 'Beeblebrox'):
return 'Hello, %s %s!' % (first, last)
server = MyServer()
assert server.app.handle('/hello') == 'Hello, Zaphod Beeblebrox!'
assert server.app.handle('/hello/Brian') == 'Hello, Brian Beeblebrox!'
assert server.app.handle('/hello/Brian/Horfgorf') == 'Hello, Brian Horfgorf!'
@skipifpy3k
def test_basic_server_error_binding():
BREAKAGE = '*****breakage*****'
class MyServer(object):
@HttpServer.route('/broken')
def broken_handler(self):
raise Exception('unhandled exception!')
@HttpServer.error(404)
@HttpServer.error(500)
def error_handler(self, error):
return BREAKAGE
server = HttpServer()
mserver = MyServer()
server.mount_routes(mserver)
# Test 404 error handling.
resp = server.app(make_request('/nonexistent_page'), functools.partial(response_asserter, 404))
assert resp[0] == BREAKAGE
# Test 500 error handling.
resp = server.app(make_request('/broken'), functools.partial(response_asserter, 500))
assert resp[0] == BREAKAGE
@skipifpy3k
def test_bind_method():
class BaseServer(HttpServer):
NAME = "heavens to murgatroyd!"
def __init__(self):
self._name = BaseServer.NAME
HttpServer.__init__(self)
class BaseServerNotSubclass(object):
def method_one(self):
return 'method_one'
class BaseServerIsSubclass(BaseServer):
def method_two(self):
return 'method_two'
bs = BaseServer()
# make sure we properly raise un nonexistent methods
with pytest.raises(ValueError):
bs._bind_method(BaseServerIsSubclass, 'undefined_method_name')
# properly raise on classes w/ divergent parents
with pytest.raises(TypeError):
bs._bind_method(BaseServerNotSubclass, 'method_one')
# should be able to bind method to base class self
bs._bind_method(BaseServerNotSubclass(), 'method_one')
bs._bind_method(BaseServerIsSubclass(), 'method_two')
assert bs.method_one() == 'method_one'
assert bs.method_two() == 'method_two'
|
apache-2.0
|
lyudmildrx/lymph
|
lymph/tests/test_mock_helpers.py
|
11
|
8021
|
import re
import unittest
import mock
import lymph
from lymph.testing import RPCServiceTestCase
from lymph.testing.mock_helpers import MockMixins, RpcMockTestCase, EventMockTestCase
class DummyTestCase(unittest.TestCase, MockMixins):
def runTest(self):
pass
class RPCMockHelperTests(unittest.TestCase):
def setUp(self):
self.dummy_case = DummyTestCase()
def test_single_call_match(self):
self.dummy_case._assert_equal_calls(
[mock.call('func', 1, foo='bar')],
[mock.call('func', 1, foo='bar')]
)
def test_single_call_different_name(self):
with self.assertRaisesRegexp(AssertionError, "function #0 name doesn't match, expected 'func2' actual 'func1'"):
self.dummy_case._assert_equal_calls(
[mock.call('func1', 1)],
[mock.call('func2', 1)]
)
def test_single_call_args_mismatch(self):
with self.assertRaisesRegexp(AssertionError, re.compile("function #0 argument #0 doesn't match.*", re.DOTALL)):
self.dummy_case._assert_equal_calls(
[mock.call('func', 1, foo='bar')],
[mock.call('func', 102, foo='bar')]
)
def test_single_call_keyword_value_mismatch(self):
with self.assertRaisesRegexp(AssertionError, re.compile("function #0 keyword argument 'foo' doesn't match.*", re.DOTALL)):
self.dummy_case._assert_equal_calls(
[mock.call('func', 1, foo='foobar')],
[mock.call('func', 1, foo='bar')]
)
def test_single_call_keyword_name_mismatch(self):
with self.assertRaisesRegexp(AssertionError, "function #0 keyword arguments doesn't match, expected \['something'\] actual \['foo'\]"):
self.dummy_case._assert_equal_calls(
[mock.call('func', 1, foo='bar')],
[mock.call('func', 1, something='bar')]
)
def test_single_call_keyword_different_count(self):
with self.assertRaisesRegexp(AssertionError, "function #0 keyword arguments doesn't match, expected \['bar', 'foo'\] actual \['foo'\]"):
self.dummy_case._assert_equal_calls(
[mock.call('func', 1, foo='bar')],
[mock.call('func', 1, foo='bar', bar='taz')]
)
def test_single_call_argument_different_count(self):
with self.assertRaisesRegexp(AssertionError, "function #0 arguments count doesn't match, expected 1 actual 2"):
self.dummy_case._assert_equal_calls(
[mock.call('func', 1, 2)],
[mock.call('func', 1)]
)
def test_multiple_call_match(self):
self.dummy_case._assert_equal_calls(
[
mock.call('func', 1, foo='foo'),
mock.call('func', 2, foo='bar'),
mock.call('func', 3, foo='foobar')
],
[
mock.call('func', 1, foo='foo'),
mock.call('func', 2, foo='bar'),
mock.call('func', 3, foo='foobar')
],
)
def test_assert_equal_any_call_success(self):
self.dummy_case._assert_equal_any_calls(
[
mock.call('func1', 1, foo='foo'),
mock.call('func2', 2, foo='bar'),
mock.call('func3', 3, foo='foobar')
],
[
mock.call('func2', 2, foo='bar'),
mock.call('func3', 3, foo='foobar')
],
)
def test_assert_equal_any_call_success_with_no_expect(self):
self.dummy_case._assert_equal_any_calls(
[
mock.call('func1', 1, foo='foo'),
mock.call('func2', 2, foo='bar'),
mock.call('func3', 3, foo='foobar')
],
[],
)
def test_assert_equal_any_call_fail_with_possible_matches(self):
with self.assertRaisesRegexp(AssertionError, "Call 'call\('func10', 3, foo='foobar'\)' wasn't found."):
self.dummy_case._assert_equal_any_calls(
[
mock.call('func1', 1, foo='foo'),
mock.call('func2', 2, foo='bar'),
mock.call('func3', 3, foo='foobar')
],
[
mock.call('func10', 3, foo='foobar')
],
)
def test_assert_equal_any_call_fail_with_no_match(self):
with self.assertRaisesRegexp(AssertionError, re.compile("Call 'call\('func2', 3, foo='foo'\)' wasn't found. Maybe you want:.*?", re.DOTALL)):
self.dummy_case._assert_equal_any_calls(
[
mock.call('func2', 1, foo='foo'),
mock.call('func2', 2, foo='bar'),
mock.call('func2', 3, foo='foobar')
],
[
mock.call('func2', 3, foo='foo')
],
)
def test_assert_equal_any_call_fail_with_no_match_better_message(self):
with self.assertRaisesRegexp(AssertionError, re.compile("function #0 keyword argument 'foo' doesn't match*?", re.DOTALL)):
self.dummy_case._assert_equal_any_calls(
[
mock.call('func1', 2, foo='bar'),
mock.call('func2', 3, foo='foobar')
],
[
mock.call('func2', 3, foo='foo')
],
)
def test_assert_equal_any_call_fail_with_wrong_order(self):
with self.assertRaisesRegexp(AssertionError, re.compile("Call 'call\('func2', 2, foo='bar'\)' wasn't found.", re.DOTALL)):
self.dummy_case._assert_equal_any_calls(
[
mock.call('func1', 1, foo='foo'),
mock.call('func2', 2, foo='bar'),
mock.call('func3', 3, foo='foobar')
],
[
mock.call('func3', 3, foo='foobar'),
mock.call('func2', 2, foo='bar')
],
)
class StringService(lymph.Interface):
@lymph.rpc()
def upper(self, text):
self.emit('str.uppered', {'text': text})
return text.upper()
@lymph.rpc()
def lower(self, text):
self.emit('str.lowered', {'text': text})
return text.lower()
class MetaRPCUpperTestCase(RPCServiceTestCase, RpcMockTestCase, EventMockTestCase):
service_class = StringService
service_name = 'str'
def setUp(self):
super(MetaRPCUpperTestCase, self).setUp()
self.setup_rpc_mocks({
'str.upper': 'HELLO WORLD',
'str.lower': 'hello world'
})
def test_meta_rpc(self):
response = self.client.upper(text='hello world')
self.assertEqual(response, 'HELLO WORLD')
self.assert_rpc_calls(
mock.call('str.upper', text='hello world')
)
def test_meta_events(self):
self.delete_rpc_mock('str.upper')
response = self.client.upper(text='hello world')
self.assertEqual(response, 'HELLO WORLD')
self.assert_rpc_calls(
mock.call('str.upper', text='hello world')
)
self.assert_events_emitted(
mock.call('str.uppered', {'text': 'hello world'})
)
def test_meta_update_rpc(self):
self.update_rpc_mock('str.upper', 'FOOBAR')
response = self.client.upper(text='hello world')
self.assertEqual(response, 'FOOBAR')
self.assert_rpc_calls(
mock.call('str.upper', text='hello world')
)
def test_meta_multiple_rpc(self):
response = self.client.upper(text='hello world')
self.assertEqual(response, 'HELLO WORLD')
response = self.client.lower(text='HELLO WORLD')
self.assertEqual(response, 'hello world')
self.assert_rpc_calls(
mock.call('str.upper', text='hello world'),
mock.call('str.lower', text='HELLO WORLD'),
)
|
apache-2.0
|
Borkata/android-tegra-nv-3.1.10-rel-15r7
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
tellesnobrega/sahara
|
sahara/utils/openstack/swift.py
|
3
|
3632
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import swiftclient
from sahara import context
from sahara.swift import swift_helper as sh
from sahara.swift import utils as su
from sahara.utils.openstack import base
from sahara.utils.openstack import keystone as k
opts = [
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL requests to swift.'),
cfg.StrOpt('ca_file',
help='Location of ca certificates file to use for swift '
'client requests.'),
cfg.StrOpt("endpoint_type",
default="internalURL",
help="Endpoint type for swift client requests")
]
swift_group = cfg.OptGroup(name='swift',
title='Swift client options')
CONF = cfg.CONF
CONF.register_group(swift_group)
CONF.register_opts(opts, group=swift_group)
def client(username, password, trust_id=None):
'''return a Swift client
This will return a Swift client for the specified username scoped to the
current context project, unless a trust identifier is specified.
If a trust identifier is present then the Swift client will be created
based on a preauthorized token generated by the username scoped to the
trust identifier.
:param username: The username for the Swift client
:param password: The password associated with the username
:param trust_id: A trust identifier for scoping the username (optional)
:returns: A Swift client object
'''
if trust_id:
proxyauth = k.auth_for_proxy(username, password, trust_id)
return client_from_token(k.token_from_auth(proxyauth))
else:
return swiftclient.Connection(
auth_version='2.0',
cacert=CONF.swift.ca_file,
insecure=CONF.swift.api_insecure,
authurl=su.retrieve_auth_url(CONF.keystone.endpoint_type),
user=username,
key=password,
tenant_name=sh.retrieve_tenant(),
retries=CONF.retries.retries_number,
retry_on_ratelimit=True,
starting_backoff=CONF.retries.retry_after,
max_backoff=CONF.retries.retry_after)
def client_from_token(token=None):
if not token:
token = context.get_auth_token()
'''return a Swift client authenticated from a token.'''
return swiftclient.Connection(auth_version='2.0',
cacert=CONF.swift.ca_file,
insecure=CONF.swift.api_insecure,
preauthurl=base.url_for(
service_type="object-store",
endpoint_type=CONF.swift.endpoint_type),
preauthtoken=token,
retries=CONF.retries.retries_number,
retry_on_ratelimit=True,
starting_backoff=CONF.retries.retry_after,
max_backoff=CONF.retries.retry_after)
|
apache-2.0
|
ZhangXinNan/tensorflow
|
tensorflow/python/grappler/graph_placer_test.py
|
49
|
5594
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph placer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.grappler import cluster
from tensorflow.python.grappler import graph_placer
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class GraphPlacerTest(test.TestCase):
@staticmethod
def _buildMnist(batch_size=128,
input_size=256,
num_classes=1024,
num_layers=10,
hidden_size=256,
name='mnist'):
g = tf_ops.get_default_graph()
with g.as_default():
ops = {}
x = random_ops.random_uniform(
[batch_size, input_size], -0.1, 0.1, dtype=dtypes.float32)
for layer_id in range(num_layers):
with variable_scope.variable_scope('layer_{}'.format(layer_id)):
a = input_size if layer_id == 0 else hidden_size
b = hidden_size if layer_id < num_layers - 1 else num_classes
w = variable_scope.get_variable('w', [a, b])
x = math_ops.matmul(x, w)
x = nn_ops.relu(x)
ops['y_preds'] = math_ops.argmax(x, axis=1)
train_op = g.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP)
train_op.append(ops['y_preds'])
return g
@staticmethod
def _buildCluster(num_cpus=1, num_gpus=1):
devices = []
if num_gpus > 0:
device_properties = device_properties_pb2.DeviceProperties(
type='GPU',
vendor='NVidia',
model='GeForce GTX TITAN X',
frequency=1076,
num_cores=24,
environment={'architecture': '5.2',
'cuda': '8000',
'cudnn': '6021'},
num_registers=65536,
l1_cache_size=24576,
l2_cache_size=3145728,
shared_memory_size_per_multiprocessor=98304,
memory_size=12783648768,
bandwidth=336480000)
for i in range(num_gpus):
devices.append(
device_properties_pb2.NamedDevice(
properties=device_properties, name='/GPU:' + str(i)))
assert num_cpus > 0
device_properties = device_properties_pb2.DeviceProperties(
type='CPU',
frequency=2000,
num_cores=4,
l1_cache_size=32768,
l2_cache_size=262144,
l3_cache_size=12582912)
for i in range(num_cpus):
devices.append(
device_properties_pb2.NamedDevice(
properties=device_properties, name='/CPU:' + str(i)))
return cluster.Cluster(devices=devices)
def testBasic(self):
"""Place a trivial graph."""
a = constant_op.constant(10, name='a')
b = constant_op.constant(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = tf_ops.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=tf_ops.get_default_graph())
gcluster = cluster.Cluster()
placed_mg = graph_placer.PlaceGraph(mg, allotted_time=15, cluster=gcluster)
self.assertEqual(4, len(placed_mg.graph_def.node))
self.assertItemsEqual([node.name for node in placed_mg.graph_def.node],
[node.name for node in mg.graph_def.node])
available_devices = [device.name for device in gcluster.ListDevices()]
for node in placed_mg.graph_def.node:
# The constant nodes are optimized away before the placer is run, and
# therefore won't be placed.
self.assertTrue(not node.device or node.device in available_devices)
def testMNIST(self):
graph = GraphPlacerTest._buildMnist()
mg = meta_graph.create_meta_graph_def(graph=graph)
gcluster = GraphPlacerTest._buildCluster(num_gpus=1)
# Spend 15 seconds trying to optimize the placement of the model. This
# should give us enough time to exercise the code, but not enough to find
# a good placement, so we'll just check for legality.
placed_mg = graph_placer.PlaceGraph(mg, allotted_time=15, cluster=gcluster)
self.assertEqual(len(placed_mg.graph_def.node), len(mg.graph_def.node))
self.assertItemsEqual([node.name for node in placed_mg.graph_def.node],
[node.name for node in mg.graph_def.node])
available_devices = [device.name for device in gcluster.ListDevices()]
for node in placed_mg.graph_def.node:
self.assertTrue(not node.device or node.device in available_devices)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
Stanford-Online/edx-platform
|
common/lib/xmodule/xmodule/tests/xml/factories.py
|
14
|
5168
|
"""
Factories for generating edXML for testing XModule import
"""
import inspect
from tempfile import mkdtemp
from fs.osfs import OSFS
from factory import Factory, lazy_attribute, post_generation, Sequence
from lxml import etree
from xblock.mixins import HierarchyMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin
from xmodule.modulestore import only_xmodules
class XmlImportData(object):
"""
Class to capture all of the data needed to actually run an XML import,
so that the Factories have something to generate
"""
def __init__(self, xml_node, xml=None, course_id=None,
default_class=None, policy=None,
filesystem=None, parent=None,
xblock_mixins=(), xblock_select=None):
self._xml_node = xml_node
self._xml_string = xml
self.course_id = course_id
self.default_class = default_class
self.filesystem = filesystem
self.xblock_mixins = xblock_mixins
self.xblock_select = xblock_select
self.parent = parent
if policy is None:
self.policy = {}
else:
self.policy = policy
@property
def xml_string(self):
"""Return the stringified version of the generated xml"""
if self._xml_string is not None:
return self._xml_string
return etree.tostring(self._xml_node)
def __repr__(self):
return u"XmlImportData{!r}".format((
self._xml_node, self._xml_string, self.course_id,
self.default_class, self.policy,
self.filesystem, self.parent, self.xblock_mixins,
self.xblock_select,
))
# Extract all argument names used to construct XmlImportData objects,
# so that the factory doesn't treat them as XML attributes
XML_IMPORT_ARGS = inspect.getargspec(XmlImportData.__init__).args
class XmlImportFactory(Factory):
"""
Factory for generating XmlImportData's, which can hold all the data needed
to run an XModule XML import
"""
class Meta(object):
model = XmlImportData
filesystem = OSFS(mkdtemp())
xblock_mixins = (InheritanceMixin, XModuleMixin, HierarchyMixin)
xblock_select = only_xmodules
url_name = Sequence(str)
attribs = {}
policy = {}
inline_xml = True
tag = 'unknown'
course_id = 'edX/xml_test_course/101'
@classmethod
def _adjust_kwargs(cls, **kwargs):
"""
Adjust the kwargs to be passed to the generated class.
Any kwargs that match :fun:`XmlImportData.__init__` will be passed
through. Any other unknown `kwargs` will be treated as XML attributes
:param tag: xml tag for the generated :class:`Element` node
:param text: (Optional) specifies the text of the generated :class:`Element`.
:param policy: (Optional) specifies data for the policy json file for this node
:type policy: dict
:param attribs: (Optional) specify attributes for the XML node
:type attribs: dict
"""
tag = kwargs.pop('tag', 'unknown')
kwargs['policy'] = {'{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']): kwargs['policy']}
kwargs['xml_node'].text = kwargs.pop('text', None)
kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))
# Make sure that the xml_module doesn't try and open a file to find the contents
# of this node.
inline_xml = kwargs.pop('inline_xml')
if inline_xml:
kwargs['xml_node'].set('not_a_pointer', 'true')
for key in kwargs.keys():
if key not in XML_IMPORT_ARGS:
kwargs['xml_node'].set(key, kwargs.pop(key))
if not inline_xml:
kwargs['xml_node'].write(
kwargs['filesystem'].open(
'{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])
),
encoding='utf-8'
)
return kwargs
@lazy_attribute
def xml_node(self):
"""An :class:`xml.etree.Element`"""
return etree.Element(self.tag)
@post_generation
def parent(self, _create, extracted, **_):
"""Hook to merge this xml into a parent xml node"""
if extracted is None:
return
extracted._xml_node.append(self._xml_node) # pylint: disable=no-member, protected-access
extracted.policy.update(self.policy)
class CourseFactory(XmlImportFactory):
"""Factory for <course> nodes"""
tag = 'course'
name = '101'
static_asset_path = 'xml_test_course'
class ChapterFactory(XmlImportFactory):
"""Factory for <chapter> nodes"""
tag = 'chapter'
class SequenceFactory(XmlImportFactory):
"""Factory for <sequential> nodes"""
tag = 'sequential'
class VerticalFactory(XmlImportFactory):
"""Factory for <vertical> nodes"""
tag = 'vertical'
class ProblemFactory(XmlImportFactory):
"""Factory for <problem> nodes"""
tag = 'problem'
text = '<h1>Empty Problem!</h1>'
class HtmlFactory(XmlImportFactory):
"""Factory for <html> nodes"""
tag = 'html'
|
agpl-3.0
|
ampamo/smart-build
|
SmartBuild/SmartBuild/settings/_base.py
|
1
|
2793
|
"""
Django settings for SmartBuild project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
#***************Get enviroment variable*****************************
def get_env_variable(var_name):
try:
return os.environ.get(var_name)
except KeyError:
error_msg = "Set the %s enviroment variable" % var_name
raise ImproperlyConfigured(error_msg)
#******************************************************************
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w9$1cpjr7!n@5(p*#zsy3^42g=u87x8et5b!!d@x(=eu(v)n!3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'places',
'builds',
'modules',
'indications',
'widget_tweaks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'SmartBuild.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SmartBuild.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
cc0-1.0
|
souzainf3/namebench
|
nb_third_party/graphy/bar_chart.py
|
233
|
5769
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to bar charts."""
import copy
import warnings
from graphy import common
from graphy import util
class BarsStyle(object):
"""Style of a series of bars in a BarChart
Object Attributes:
color: Hex string, like '00ff00' for green
"""
def __init__(self, color):
self.color = color
class BarChartStyle(object):
"""Represents the style for bars on a BarChart.
Any of the object attributes may be set to None, in which case the
value will be auto-calculated.
Object Attributes:
bar_thickness: The thickness of a bar, in pixels.
bar_gap: The gap between bars, in pixels, or as a fraction of bar thickness
if use_fractional_gap_spacing is True.
group_gap: The gap between groups of bars, in pixels, or as a fraction of
bar thickness if use_fractional_gap_spacing is True.
use_fractional_gap_spacing: if True, bar_gap and group_gap specify gap
sizes as a fraction of bar width. Default is False.
"""
_DEFAULT_GROUP_GAP = 8
_DEFAULT_BAR_GAP = 4
def __init__(self, bar_thickness=None,
bar_gap=_DEFAULT_BAR_GAP, group_gap=_DEFAULT_GROUP_GAP,
use_fractional_gap_spacing=False):
"""Create a new BarChartStyle.
Args:
bar_thickness: The thickness of a bar, in pixels. Set this to None if
you want the bar thickness to be auto-calculated (this is the default
behaviour).
bar_gap: The gap between bars, in pixels. Default is 4.
group_gap: The gap between groups of bars, in pixels. Default is 8.
"""
self.bar_thickness = bar_thickness
self.bar_gap = bar_gap
self.group_gap = group_gap
self.use_fractional_gap_spacing = use_fractional_gap_spacing
class BarStyle(BarChartStyle):
def __init__(self, *args, **kwargs):
warnings.warn('BarStyle is deprecated. Use BarChartStyle.',
DeprecationWarning, stacklevel=2)
super(BarStyle, self).__init__(*args, **kwargs)
class BarChart(common.BaseChart):
"""Represents a bar chart.
Object attributes:
vertical: if True, the bars will be vertical. Default is True.
stacked: if True, the bars will be stacked. Default is False.
style: The BarChartStyle for all bars on this chart, specifying bar
thickness and gaps between bars.
"""
def __init__(self, points=None):
"""Constructor for BarChart objects."""
super(BarChart, self).__init__()
if points is not None:
self.AddBars(points)
self.vertical = True
self.stacked = False
self.style = BarChartStyle(None, None, None) # full auto
def AddBars(self, points, label=None, color=None):
"""Add a series of bars to the chart.
points: List of y-values for the bars in this series
label: Name of the series (used in the legend)
color: Hex string, like '00ff00' for green
This is a convenience method which constructs & appends the DataSeries for
you.
"""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label is a hex triplet. Maybe it is a color? The '
'old argument order (color before label) is deprecated.',
DeprecationWarning, stacklevel=2)
style = BarsStyle(color)
series = common.DataSeries(points, label=label, style=style)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Get the dependendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
else:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
def GetIndependentAxes(self):
"""Get the independendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
else:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
def GetDependentAxis(self):
"""Get the main dependendant axis, which depends on orientation."""
if self.vertical:
return self.left
else:
return self.bottom
def GetIndependentAxis(self):
"""Get the main independendant axis, which depends on orientation."""
if self.vertical:
return self.bottom
else:
return self.left
def GetMinMaxValues(self):
"""Get the largest & smallest bar values as (min_value, max_value)."""
if not self.stacked:
return super(BarChart, self).GetMinMaxValues()
if not self.data:
return None, None # No data, nothing to do.
num_bars = max(len(series.data) for series in self.data)
positives = [0 for i in xrange(0, num_bars)]
negatives = list(positives)
for series in self.data:
for i, point in enumerate(series.data):
if point:
if point > 0:
positives[i] += point
else:
negatives[i] += point
min_value = min(min(positives), min(negatives))
max_value = max(max(positives), max(negatives))
return min_value, max_value
|
apache-2.0
|
endlessm/chromium-browser
|
third_party/catapult/dashboard/dashboard/pinpoint/models/compare/kolmogorov_smirnov_test.py
|
1
|
1146
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from dashboard.pinpoint.models.compare import kolmogorov_smirnov
class KolmogorovSmirnovTest(unittest.TestCase):
def testBasic(self):
self.assertAlmostEqual(
kolmogorov_smirnov.KolmogorovSmirnov(range(10), range(20, 30)),
1.8879793657162556e-05)
self.assertAlmostEqual(
kolmogorov_smirnov.KolmogorovSmirnov(range(5), range(10)),
0.26680230985258474)
def testDuplicateValues(self):
self.assertAlmostEqual(
kolmogorov_smirnov.KolmogorovSmirnov([0] * 5, [1] * 5),
0.0037813540593701006)
def testSmallSamples(self):
self.assertEqual(
kolmogorov_smirnov.KolmogorovSmirnov([0], [1]), 0.2890414283708268)
def testAllValuesIdentical(self):
self.assertEqual(kolmogorov_smirnov.KolmogorovSmirnov(
[0] * 5, [0] * 5), 1.0)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
sgnes/PracticeOfIntroductionToAlgorithms
|
sorting.py
|
1
|
2042
|
#!C:\Python27\python.exe
# _*_ coding=utf-8 _*_
import random
def main():
max_num = 1000
cur_num = 100
arr = random.sample(range(1,max_num),cur_num)
print arr
#insert_sort(arr, cur_num)
print merge_sort(arr)
#bubble_sort(arr)
pass
def insert_sort(arr, length):
"""This is the basic insert sort alg.
:param arr: The unsorted sequence(list)
:param length: The length of the unsorted sequence
:return:the sorted sequence
"""
for i in range(1,length):
j = i - 1
key = arr[i]
while(j >= 0 and arr[j] > key):
arr[j + 1] = arr[j]
arr[j] = key
j -= 1
return arr
def merge_sort(arr):
"""
This is the basic merge sort alg.
:param arr:The unsorted sequence(list)
:return:The sorted sequence
"""
if len(arr) > 1:
m = int(len(arr)/2)
larr = arr[:m]
rarr = arr[m:]
l = merge_sort(larr)
r = merge_sort(rarr)
return merge(l, r)
else:
return arr
def merge(larr, rarr):
"""
merge the two sub-arr to one arr
:param larr:The left arr to be merged
:param rarr:The right arr to be merged
:return:The merged arr
"""
marr = list()
while larr and rarr:
if larr[0] < rarr[0]:
marr.append(larr.pop(0))
else:
marr.append(rarr.pop(0))
if larr:
marr += larr
if rarr:
marr += rarr
return marr
def bubble_sort(arr):
"""
This is the basic bubble sort.
:param arr:The unsorted sequence(list)
:return:The sorted sequence
"""
for i in range(0, len(arr)):
for j in range(0, len(arr) - i -1):
if arr[j] > arr[j + 1]:
key = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = key
return arr
def quick_sort(arr):
"""
This is the basic quick sort alg.
:param arr:The unsorted sequence
:return:The sorted sequence
"""
pass
if __name__ == '__main__':
main()
|
gpl-2.0
|
materialsproject/pymatgen
|
pymatgen/core/spectrum.py
|
1
|
7661
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to represent any type of spectrum, essentially any
x y value pairs.
"""
from typing import List, Union, Callable
import numpy as np
from monty.json import MSONable
from scipy import stats
from scipy.ndimage.filters import convolve1d
from pymatgen.util.coord import get_linear_interpolated_value
from pymatgen.util.typing import ArrayLike
def lorentzian(x, x_0: float = 0, sigma: float = 1.0):
"""
:param x: x values
:param x_0: Center
:param sigma: FWHM
:return: Value of lorentzian at x.
"""
return 1 / np.pi * 0.5 * sigma / ((x - x_0) ** 2 + (0.5 * sigma) ** 2)
class Spectrum(MSONable):
"""
Base class for any type of xas, essentially just x, y values. Examples
include XRD patterns, XANES, EXAFS, NMR, DOS, etc.
Implements basic tools like application of smearing, normalization, addition
multiplication, etc.
Subclasses should extend this object and ensure that super is called with
ALL args and kwargs. That ensures subsequent things like add and mult work
properly.
"""
XLABEL = "x"
YLABEL = "y"
def __init__(self, x: ArrayLike, y: ArrayLike, *args, **kwargs):
r"""
Args:
x (ndarray): A ndarray of N values.
y (ndarray): A ndarray of N x k values. The first dimension must be
the same as that of x. Each of the k values are interpreted as separate.
*args: All subclasses should provide args other than x and y
when calling super, e.g., super().__init__(
x, y, arg1, arg2, kwarg1=val1, ..). This guarantees the +, -, *,
etc. operators work properly.
**kwargs: Same as that for *args.
"""
self.x = np.array(x)
self.y = np.array(y)
self.ydim = self.y.shape
if self.x.shape[0] != self.ydim[0]:
raise ValueError("x and y values have different first dimension!")
self._args = args
self._kwargs = kwargs
def __getattr__(self, item):
if item == self.XLABEL.lower():
return self.x
if item == self.YLABEL.lower():
return self.y
raise AttributeError("Invalid attribute name %s" % str(item))
def __len__(self):
return self.ydim[0]
def normalize(self, mode: str = "max", value: float = 1.0):
"""
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
"""
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value
def smear(self, sigma: float = 0.0, func: Union[str, Callable] = "gaussian"):
"""
Apply Gaussian/Lorentzian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function
func: "gaussian" or "lorentzian" or a callable. If this is a callable, the sigma value is ignored. The
callable should only take a single argument (a numpy array) and return a set of weights.
"""
points = np.linspace(np.min(self.x) - np.mean(self.x), np.max(self.x) - np.mean(self.x), len(self.x))
if callable(func):
weights = func(points)
elif func.lower() == "gaussian":
weights = stats.norm.pdf(points, scale=sigma)
elif func.lower() == "lorentzian":
weights = lorentzian(points, sigma=sigma)
else:
raise ValueError(f"Invalid func {func}")
weights /= np.sum(weights)
if len(self.ydim) == 1:
total = np.sum(self.y)
self.y = convolve1d(self.y, weights)
self.y *= total / np.sum(self.y) # renormalize to maintain the same integrated sum as before.
else:
total = np.sum(self.y, axis=0)
self.y = np.array([convolve1d(self.y[:, k], weights) for k in range(self.ydim[1])]).T
self.y *= total / np.sum(self.y, axis=0) # renormalize to maintain the same integrated sum as before.
def get_interpolated_value(self, x: float) -> List[float]:
"""
Returns an interpolated y value for a particular x value.
Args:
x: x value to return the y value for
Returns:
Value of y at x
"""
if len(self.ydim) == 1:
return get_linear_interpolated_value(self.x, self.y, x)
return [get_linear_interpolated_value(self.x, self.y[:, k], x) for k in range(self.ydim[1])]
def copy(self):
"""
Returns:
Copy of Spectrum object.
"""
return self.__class__(self.x, self.y, *self._args, **self._kwargs)
def __add__(self, other):
"""
Add two Spectrum object together. Checks that x scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another Spectrum object
Returns:
Sum of the two Spectrum objects
"""
if not all(np.equal(self.x, other.x)):
raise ValueError("X axis values are not compatible!")
return self.__class__(self.x, self.y + other.y, *self._args, **self._kwargs)
def __sub__(self, other):
"""
Substract one Spectrum object from another. Checks that x scales are
the same.
Otherwise, a ValueError is thrown
Args:
other: Another Spectrum object
Returns:
Substraction of the two Spectrum objects
"""
if not all(np.equal(self.x, other.x)):
raise ValueError("X axis values are not compatible!")
return self.__class__(self.x, self.y - other.y, *self._args, **self._kwargs)
def __mul__(self, other):
"""
Scale the Spectrum's y values
Args:
other: scalar, The scale amount
Returns:
Spectrum object with y values scaled
"""
return self.__class__(self.x, other * self.y, *self._args, **self._kwargs)
__rmul__ = __mul__
def __truediv__(self, other):
"""
True division of y
Args:
other: The divisor
Returns:
Spectrum object with y values divided
"""
return self.__class__(self.x, self.y.__truediv__(other), *self._args, **self._kwargs)
def __floordiv__(self, other):
"""
True division of y
Args:
other: The divisor
Returns:
Spectrum object with y values divided
"""
return self.__class__(self.x, self.y.__floordiv__(other), *self._args, **self._kwargs)
__div__ = __truediv__
def __str__(self):
"""
Returns a string containing values and labels of spectrum object for
plotting.
"""
return "\n".join(
[
self.__class__.__name__,
"%s: %s" % (self.XLABEL, self.x),
"%s: %s" % (self.YLABEL, self.y),
]
)
def __repr__(self):
"""
Returns a printable representation of the class
"""
return self.__str__()
|
mit
|
eklitzke/spitfire
|
spitfire/compiler/macros/i18n.py
|
1
|
2929
|
import sys
import cStringIO as StringIO
from spitfire.compiler import analyzer
from spitfire.compiler.ast import *
from spitfire.compiler.visitor import print_tree
import spitfire.util
# generate a reasonable substitute name from a raw placeholder node
def make_placeholder_name(placeholder_node):
node_type = type(placeholder_node.expression)
placeholder_name = ''
if node_type is PlaceholderNode:
placeholder_name = placeholder_node.expression.name
elif node_type is CallFunctionNode:
placeholder = placeholder_node.expression.expression
if type(placeholder) is PlaceholderNode:
placeholder_name = placeholder.name
placeholder_name = placeholder_name.upper()
return placeholder_name
# create a translated version of the raw_msg while allowing placeholder
# expressions to pass through correctly
def make_i18n_message(raw_msg, macro_ast):
# top should be a fragment and due to the limited syntax, we can more or
# less scan this one level of nodes -- there are no nested i18n sections yet
output = StringIO.StringIO()
for i, n in enumerate(macro_ast.child_nodes):
#print i, type(n), "start", n.start, "end", n.end
#print "raw:", "'%s'" % raw_msg[n.start:n.end]
if isinstance(n, PlaceholderSubstitutionNode):
raw_placeholder_expression = raw_msg[n.start:n.end]
#output.write(make_placeholder_name(n))
output.write(raw_placeholder_expression)
else:
output.write(spitfire.util.i18n_mangled_message(n.value))
return output.getvalue()
def macro_i18n(macro_node, arg_map, compiler):
# fixme: parse the parameter list into something usable
# macro_node.parameter_list
# generate a fake translation for now to verify this is working
# most apps will have to stub this part out somehow i think
macro_content_ast = spitfire.compiler.util.parse(macro_node.value,
'i18n_goal')
i18n_msg = make_i18n_message(macro_node.value, macro_content_ast)
i18n_msg_utf8 = i18n_msg.encode('utf-8')
#print "macro_content_ast"
#print "orginal:", macro_node.value
#print "i18n:", i18n_msg_utf8
#print_tree(macro_content_ast)
return i18n_msg_utf8
def macro_function_i18n(call_node, arg_map, compiler):
# generate a fake translation for now to verify this is working
# most apps will have to stub this part out somehow i think
# in the context of a function, the goal is to replace a function with a
# translated literal string. we have to do some shenanigans since Spitfire
# doesn't parse repr(unicode)
msg_arg_node = call_node.arg_list.parg_list[0]
if not isinstance(msg_arg_node, LiteralNode):
raise analyzer.SemanticAnalyzerError(
'$i18n argument "%s" must be a string literal' % msg_arg_node)
i18n_msg = spitfire.util.i18n_mangled_message(msg_arg_node.value)
i18n_msg_utf8 = i18n_msg.encode('utf-8')
return u"'%s'" % i18n_msg.replace("'", "\\'")
|
bsd-3-clause
|
rotocoin/rotocoin
|
contrib/testgen/base58.py
|
2
|
2822
|
'''
Rotocoin base58 encoding and decoding.
Based on https://rotocointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Rotocoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/rotocoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
mit
|
carltongibson/django-allauth
|
allauth/socialaccount/providers/twitter/south_migrations/0004_auto__del_twitteraccount__del_twitterapp.py
|
83
|
1925
|
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TwitterAccount'
db.delete_table('twitter_twitteraccount')
# Deleting model 'TwitterApp'
db.delete_table('twitter_twitterapp')
def backwards(self, orm):
# Adding model 'TwitterAccount'
db.create_table('twitter_twitteraccount', (
('username', self.gf('django.db.models.fields.CharField')(max_length=15)),
('social_id', self.gf('django.db.models.fields.BigIntegerField')(unique=True)),
('socialaccount_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['socialaccount.SocialAccount'], unique=True, primary_key=True)),
('profile_image_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal('twitter', ['TwitterAccount'])
# Adding model 'TwitterApp'
db.create_table('twitter_twitterapp', (
('consumer_secret', self.gf('django.db.models.fields.CharField')(max_length=80)),
('request_token_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('authorize_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('consumer_key', self.gf('django.db.models.fields.CharField')(max_length=80)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('access_token_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
))
db.send_create_signal('twitter', ['TwitterApp'])
models = {
}
complete_apps = ['twitter']
|
mit
|
suninsky/ReceiptOCR
|
Python/server/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
|
1730
|
2746
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == "head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == "meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace, name), value in token["data"].items():
if namespace is not None:
continue
elif name.lower() == 'charset':
token["data"][(namespace, name)] = self.encoding
meta_found = True
break
elif name == 'http-equiv' and value.lower() == 'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, "content") in token["data"]:
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == "head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": "head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
yield {"type": "EndTag", "name": "head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == "head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
|
mit
|
Spoken-tutorial/spoken-website
|
cron/spoken_search/MySQLdb/MySQLdb/connections.py
|
1
|
11091
|
"""
This module implements connections for MySQLdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from MySQLdb import cursors
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
import types, _mysql
import re
def defaulterrorhandler(connection, cursor, errorclass, errorvalue):
"""
If cursor is not None, (errorclass, errorvalue) is appended to
cursor.messages; otherwise it is appended to
connection.messages. Then errorclass is raised with errorvalue as
the value.
You can override this with your own error handler by assigning it
to the instance.
"""
error = errorclass, errorvalue
if cursor:
cursor.messages.append(error)
else:
connection.messages.append(error)
del cursor
del connection
raise errorclass(errorvalue)
re_numeric_part = re.compile(r"^(\d+)")
def numeric_part(s):
"""Returns the leading numeric part of a string.
>>> numeric_part("20-alpha")
20
>>> numeric_part("foo")
>>> numeric_part("16b")
16
"""
m = re_numeric_part.match(s)
if m:
return int(m.group(1))
return None
class Connection(_mysql.connection):
"""MySQL Database Connection Object"""
default_cursor = cursors.Cursor
def __init__(self, *args, **kwargs):
"""
Create a connection to the database. It is strongly recommended
that you only use keyword parameters. Consult the MySQL C API
documentation for more information.
host
string, host to connect
user
string, user to connect as
passwd
string, password to use
db
string, database to use
port
integer, TCP/IP port to connect to
unix_socket
string, location of unix_socket to use
conv
conversion dictionary, see MySQLdb.converters
connect_timeout
number of seconds to wait before the connection attempt
fails.
compress
if set, compression is enabled
named_pipe
if set, a named pipe is used to connect (Windows only)
init_command
command which is run once the connection is created
read_default_file
file from which default client values are read
read_default_group
configuration group to use from the default file
cursorclass
class object, used to create cursors (keyword only)
use_unicode
If True, text-like columns are returned as unicode objects
using the connection's character set. Otherwise, text-like
columns are returned as strings. columns are returned as
normal strings. Unicode objects will always be encoded to
the connection's character set regardless of this setting.
charset
If supplied, the connection character set will be changed
to this character set (MySQL-4.1 and newer). This implies
use_unicode=True.
sql_mode
If supplied, the session SQL mode will be changed to this
setting (MySQL-4.1 and newer). For more details and legal
values, see the MySQL documentation.
client_flag
integer, flags to use or 0
(see MySQL docs or constants/CLIENTS.py)
ssl
dictionary or mapping, contains SSL connection parameters;
see the MySQL documentation for more details
(mysql_ssl_set()). If this is set, and the client does not
support SSL, NotSupportedError will be raised.
local_infile
integer, non-zero enables LOAD LOCAL INFILE; zero disables
There are a number of undocumented, non-standard methods. See the
documentation for the MySQL C API for some hints on what they do.
"""
from MySQLdb.constants import CLIENT, FIELD_TYPE
from MySQLdb.converters import conversions
from weakref import proxy, WeakValueDictionary
import types
kwargs2 = kwargs.copy()
if 'conv' in kwargs:
conv = kwargs['conv']
else:
conv = conversions
conv2 = {}
for k, v in list(conv.items()):
if isinstance(k, int) and isinstance(v, list):
conv2[k] = v[:]
else:
conv2[k] = v
kwargs2['conv'] = conv2
cursorclass = kwargs2.pop('cursorclass', self.default_cursor)
charset = kwargs2.pop('charset', '')
if charset:
use_unicode = True
else:
use_unicode = False
use_unicode = kwargs2.pop('use_unicode', use_unicode)
sql_mode = kwargs2.pop('sql_mode', '')
client_flag = kwargs.get('client_flag', 0)
client_version = tuple([ numeric_part(n) for n in _mysql.get_client_info().split('.')[:2] ])
if client_version >= (4, 1):
client_flag |= CLIENT.MULTI_STATEMENTS
if client_version >= (5, 0):
client_flag |= CLIENT.MULTI_RESULTS
kwargs2['client_flag'] = client_flag
super(Connection, self).__init__(*args, **kwargs2)
self.cursorclass = cursorclass
self.encoders = dict([ (k, v) for k, v in list(conv.items())
if type(k) is not int ])
self._server_version = tuple([ numeric_part(n) for n in self.get_server_info().split('.')[:2] ])
db = proxy(self)
def _get_string_literal():
def string_literal(obj, dummy=None):
return db.string_literal(obj)
return string_literal
def _get_unicode_literal():
def unicode_literal(u, dummy=None):
return db.literal(u.encode(unicode_literal.charset))
return unicode_literal
def _get_string_decoder():
def string_decoder(s):
return s.decode(string_decoder.charset)
return string_decoder
string_literal = _get_string_literal()
self.unicode_literal = unicode_literal = _get_unicode_literal()
self.string_decoder = string_decoder = _get_string_decoder()
if not charset:
charset = self.character_set_name()
self.set_character_set(charset)
if sql_mode:
self.set_sql_mode(sql_mode)
if use_unicode:
self.converter[FIELD_TYPE.STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VAR_STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VARCHAR].append((None, string_decoder))
self.converter[FIELD_TYPE.BLOB].append((None, string_decoder))
self.encoders[bytes] = string_literal
self.encoders[str] = unicode_literal
self._transactional = self.server_capabilities & CLIENT.TRANSACTIONS
if self._transactional:
# PEP-249 requires autocommit to be initially off
self.autocommit(False)
self.messages = []
def cursor(self, cursorclass=None):
"""
Create a cursor on which queries may be performed. The
optional cursorclass parameter is used to create the
Cursor. By default, self.cursorclass=cursors.Cursor is
used.
"""
return (cursorclass or self.cursorclass)(self)
def __enter__(self): return self.cursor()
def __exit__(self, exc, value, tb):
if exc:
self.rollback()
else:
self.commit()
def literal(self, o):
"""
If o is a single object, returns an SQL literal as a string.
If o is a non-string sequence, the items of the sequence are
converted and returned as a sequence.
Non-standard. For internal use; do not use this in your
applications.
"""
return self.escape(o, self.encoders)
def begin(self):
"""Explicitly begin a connection. Non-standard.
DEPRECATED: Will be removed in 1.3.
Use an SQL BEGIN statement instead."""
from warnings import warn
warn("begin() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
self.query("BEGIN")
if not hasattr(_mysql.connection, 'warning_count'):
def warning_count(self):
"""Return the number of warnings generated from the
last query. This is derived from the info() method."""
from string import atoi
info = self.info()
if info:
return atoi(info.split()[-1])
else:
return 0
def set_character_set(self, charset):
"""Set the connection character set to charset. The character
set can only be changed in MySQL-4.1 and newer. If you try
to change the character set from the current value in an
older version, NotSupportedError will be raised."""
if charset == "utf8mb4":
py_charset = "utf8"
else:
py_charset = charset
if self.character_set_name() != charset:
try:
super(Connection, self).set_character_set(charset)
except AttributeError:
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set charset")
self.query('SET NAMES %s' % charset)
self.store_result()
self.string_decoder.charset = py_charset
self.unicode_literal.charset = py_charset
def set_sql_mode(self, sql_mode):
"""Set the connection sql_mode. See MySQL documentation for
legal values."""
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set sql_mode")
self.query("SET SESSION sql_mode='%s'" % sql_mode)
self.store_result()
def show_warnings(self):
"""Return detailed information about warnings as a
sequence of tuples of (Level, Code, Message). This
is only supported in MySQL-4.1 and up. If your server
is an earlier version, an empty sequence is returned."""
if self._server_version < (4,1): return ()
self.query("SHOW WARNINGS")
r = self.store_result()
warnings = r.fetch_row(0)
return warnings
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
errorhandler = defaulterrorhandler
|
gpl-3.0
|
bukalov/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
|
115
|
26224
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import StringIO
from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.queues import *
from webkitpy.tool.commands.queuestest import QueuesTest
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.common.net.statusserver_mock import MockStatusServer
from webkitpy.tool.mocktool import MockTool, MockOptions
class TestCommitQueue(CommitQueue):
def __init__(self, tool=None):
CommitQueue.__init__(self)
if tool:
self.bind_to_tool(tool)
self._options = MockOptions(confirm=False, parent_command="commit-queue", port=None)
def begin_work_queue(self):
output_capture = OutputCapture()
output_capture.capture_output()
CommitQueue.begin_work_queue(self)
output_capture.restore_output()
class TestQueue(AbstractPatchQueue):
name = "test-queue"
class TestReviewQueue(AbstractReviewQueue):
name = "test-review-queue"
class TestFeederQueue(FeederQueue):
_sleep_duration = 0
class AbstractQueueTest(CommandsTest):
def test_log_directory(self):
self.assertEqual(TestQueue()._log_directory(), os.path.join("..", "test-queue-logs"))
def _assert_run_webkit_patch(self, run_args, port=None):
queue = TestQueue()
tool = MockTool()
tool.status_server.bot_id = "gort"
tool.executive = Mock()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = port
queue.run_webkit_patch(run_args)
expected_run_args = ["echo", "--status-host=example.com", "--bot-id=gort"]
if port:
expected_run_args.append("--port=%s" % port)
expected_run_args.extend(run_args)
tool.executive.run_command.assert_called_with(expected_run_args, cwd='/mock-checkout')
def test_run_webkit_patch(self):
self._assert_run_webkit_patch([1])
self._assert_run_webkit_patch(["one", 2])
self._assert_run_webkit_patch([1], port="mockport")
def test_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
queue._options.iterations = 3
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertFalse(queue.should_continue_work_queue())
def test_no_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
def _assert_log_message(self, script_error, log_message):
failure_log = AbstractQueue._log_from_script_error_for_upload(script_error, output_limit=10)
self.assertTrue(failure_log.read(), log_message)
def test_log_from_script_error_for_upload(self):
self._assert_log_message(ScriptError("test"), "test")
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self._assert_log_message(ScriptError(unicode_tor), utf8_tor)
script_error = ScriptError(unicode_tor, output=unicode_tor)
expected_output = "%s\nLast %s characters of output:\n%s" % (utf8_tor, 10, utf8_tor[-10:])
self._assert_log_message(script_error, expected_output)
class FeederQueueTest(QueuesTest):
def test_feeder_queue(self):
queue = TestFeederQueue()
tool = MockTool(log_executive=True)
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("feeder-queue"),
"process_work_item": """Warning, attachment 10001 on bug 50000 has invalid committer ([email protected])
Warning, attachment 10001 on bug 50000 has invalid committer ([email protected])
MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.\n\[email protected] does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.'
MOCK: update_work_items: commit-queue [10005, 10000]
Feeding commit-queue items [10005, 10000]
Feeding EWS (1 r? patch, 1 new)
MOCK: submit_to_ews: 10002
""",
"handle_unexpected_error": "Mock error message\n",
}
self.assert_queue_outputs(queue, tool=tool, expected_logs=expected_logs)
class AbstractPatchQueueTest(CommandsTest):
def test_next_patch(self):
queue = AbstractPatchQueue()
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
self.assertIsNone(queue._next_patch())
tool.status_server = MockStatusServer(work_items=[2, 10000, 10001])
expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n" # A mock-only message to prevent us from making mistakes.
expected_logs = "MOCK: release_work_item: None 2\n"
patch = OutputCapture().assert_outputs(self, queue._next_patch, expected_stdout=expected_stdout, expected_logs=expected_logs)
# The patch.id() == 2 is ignored because it doesn't exist.
self.assertEqual(patch.id(), 10000)
self.assertEqual(queue._next_patch().id(), 10001)
self.assertEqual(queue._next_patch(), None) # When the queue is empty
class PatchProcessingQueueTest(CommandsTest):
def test_upload_results_archive_for_patch(self):
queue = PatchProcessingQueue()
queue.name = "mock-queue"
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
patch = queue._tool.bugs.fetch_attachment(10001)
expected_logs = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot for mac-snowleopard filename=layout-test-results.zip mimetype=None
-- Begin comment --
The attached test failures were seen while running run-webkit-tests on the mock-queue.
Port: mac-snowleopard Platform: MockPlatform 1.0
-- End comment --
"""
OutputCapture().assert_outputs(self, queue._upload_results_archive_for_patch, [patch, Mock()], expected_logs=expected_logs)
class NeedsUpdateSequence(StepSequence):
def _run(self, tool, options, state):
raise CheckoutNeedsUpdate([], 1, "", None)
class AlwaysCommitQueueTool(object):
def __init__(self):
self.status_server = MockStatusServer()
def command_by_name(self, name):
return CommitQueue
class SecondThoughtsCommitQueue(TestCommitQueue):
def __init__(self, tool=None):
self._reject_patch = False
TestCommitQueue.__init__(self, tool)
def run_command(self, command):
# We want to reject the patch after the first validation,
# so wait to reject it until after some other command has run.
self._reject_patch = True
return CommitQueue.run_command(self, command)
def refetch_patch(self, patch):
if not self._reject_patch:
return self._tool.bugs.fetch_attachment(patch.id())
attachment_dictionary = {
"id": patch.id(),
"bug_id": patch.bug_id(),
"name": "Rejected",
"is_obsolete": True,
"is_patch": False,
"review": "-",
"reviewer_email": "[email protected]",
"commit-queue": "-",
"committer_email": "[email protected]",
"attacher_email": "Contributer1",
}
return Attachment(attachment_dictionary, None)
class CommitQueueTest(QueuesTest):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def test_commit_queue(self):
tool = MockTool()
tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean --port=mac
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=mac
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=mac
MOCK: update_status: commit-queue Built patch
Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=mac
MOCK: update_status: commit-queue Passed tests
Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10000 --port=mac
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_logs=expected_logs)
def test_commit_queue_failure(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMOCK script error
Full output: http://dummy_url'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
queue = CommitQueue()
def mock_run_webkit_patch(command):
if command[0] == 'clean' or command[0] == 'update':
# We want cleaning to succeed so we can error out on a step
# that causes the commit-queue to reject the patch.
return
raise ScriptError('MOCK script error')
queue.run_webkit_patch = mock_run_webkit_patch
self.assert_queue_outputs(queue, expected_logs=expected_logs)
def test_commit_queue_failure_with_failing_tests(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nNew failing tests:
mock_test_name.html
another_test_name.html
Full output: http://dummy_url'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
queue = CommitQueue()
def mock_run_webkit_patch(command):
if command[0] == 'clean' or command[0] == 'update':
# We want cleaning to succeed so we can error out on a step
# that causes the commit-queue to reject the patch.
return
queue._expected_failures.unexpected_failures_observed = lambda results: ["mock_test_name.html", "another_test_name.html"]
raise ScriptError('MOCK script error')
queue.run_webkit_patch = mock_run_webkit_patch
self.assert_queue_outputs(queue, expected_logs=expected_logs)
def test_rollout(self):
tool = MockTool()
tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
tool.buildbot.light_tree_on_fire()
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=%(port)s
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=%(port)s
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=%(port)s
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=%(port)s
MOCK: update_status: commit-queue Built patch
Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=%(port)s
MOCK: update_status: commit-queue Passed tests
Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10000 --port=%(port)s
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10000
""" % {"port": "mac"},
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_logs=expected_logs)
def test_rollout_lands(self):
tool = MockTool()
tool.buildbot.light_tree_on_fire()
rollout_patch = tool.bugs.fetch_attachment(10005) # _patch6, a rollout patch.
assert(rollout_patch.is_rollout())
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=%(port)s
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10005 --port=%(port)s
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10005 --port=%(port)s
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10005 --port=%(port)s
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10005
""" % {"port": "mac"},
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10005' with comment 'Rejecting attachment 10005 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=rollout_patch, expected_logs=expected_logs)
def test_auto_retry(self):
queue = CommitQueue()
options = Mock()
options.parent_command = "commit-queue"
tool = AlwaysCommitQueueTool()
sequence = NeedsUpdateSequence(None)
expected_logs = """Commit failed because the checkout is out of date. Please update and try again.
MOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date). Updating, then landing without building or re-running tests.
"""
state = {'patch': None}
OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_logs=expected_logs)
self.assertTrue(options.update)
self.assertFalse(options.build)
self.assertFalse(options.test)
def test_manual_reject_during_processing(self):
queue = SecondThoughtsCommitQueue(MockTool())
queue.begin_work_queue()
queue._tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
queue._tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
queue._options = Mock()
queue._options.port = None
expected_logs = """Running: webkit-patch --status-host=example.com clean --port=mac
MOCK: update_status: commit-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update --port=mac
MOCK: update_status: commit-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue Applied patch
Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue ChangeLog validated
Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=mac
MOCK: update_status: commit-queue Built patch
Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=mac
MOCK: update_status: commit-queue Passed tests
MOCK: update_status: commit-queue Retry
MOCK: release_work_item: commit-queue 10000
"""
self.maxDiff = None
OutputCapture().assert_outputs(self, queue.process_work_item, [QueuesTest.mock_work_item], expected_logs=expected_logs)
def test_report_flaky_tests(self):
queue = TestCommitQueue(MockTool())
expected_logs = """MOCK bug comment: bug_id=50002, cc=None
--- Begin comment ---
The commit-queue just saw foo/bar.html flake (text diff) while processing attachment 10000 on bug 50000.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
MOCK add_attachment_to_bug: bug_id=50002, description=Failure diff from bot filename=failure.diff mimetype=None
MOCK bug comment: bug_id=50002, cc=None
--- Begin comment ---
The commit-queue just saw bar/baz.html flake (text diff) while processing attachment 10000 on bug 50000.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
bar/baz-diffs.txt does not exist in results archive, uploading entire archive.
MOCK add_attachment_to_bug: bug_id=50002, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
MOCK bug comment: bug_id=50000, cc=None
--- Begin comment ---
The commit-queue encountered the following flaky tests while processing attachment 10000:
foo/bar.html bug 50002 (author: [email protected])
bar/baz.html bug 50002 (author: [email protected])
The commit-queue is continuing to process your patch.
--- End comment ---
"""
test_names = ["foo/bar.html", "bar/baz.html"]
test_results = [self._mock_test_result(name) for name in test_names]
class MockZipFile(object):
def __init__(self):
self.fp = StringIO()
def read(self, path):
return ""
def namelist(self):
# This is intentionally missing one diffs.txt to exercise the "upload the whole zip" codepath.
return ['foo/bar-diffs.txt']
OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, test_results, MockZipFile()], expected_logs=expected_logs)
def test_did_pass_testing_ews(self):
tool = MockTool()
patch = tool.bugs.fetch_attachment(10000)
queue = TestCommitQueue(tool)
self.assertFalse(queue.did_pass_testing_ews(patch))
class StyleQueueTest(QueuesTest):
def test_style_queue_with_style_exception(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean
MOCK: update_status: style-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update
MOCK: update_status: style-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000
MOCK: update_status: style-queue Applied patch
Running: webkit-patch --status-host=example.com apply-watchlist-local 50000
MOCK: update_status: style-queue Watchlist applied
Running: webkit-patch --status-host=example.com check-style-local --non-interactive --quiet
MOCK: update_status: style-queue Style checked
MOCK: update_status: style-queue Pass
MOCK: release_work_item: style-queue 10000
""",
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK output\n",
}
tool = MockTool(executive_throws_when_run=set(['check-style']))
self.assert_queue_outputs(StyleQueue(), expected_logs=expected_logs, tool=tool)
def test_style_queue_with_watch_list_exception(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
"process_work_item": """Running: webkit-patch --status-host=example.com clean
MOCK: update_status: style-queue Cleaned working directory
Running: webkit-patch --status-host=example.com update
MOCK: update_status: style-queue Updated working directory
Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000
MOCK: update_status: style-queue Applied patch
Running: webkit-patch --status-host=example.com apply-watchlist-local 50000
Exception for ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000]
MOCK command output
MOCK: update_status: style-queue Unabled to apply watchlist
Running: webkit-patch --status-host=example.com check-style-local --non-interactive --quiet
MOCK: update_status: style-queue Style checked
MOCK: update_status: style-queue Pass
MOCK: release_work_item: style-queue 10000
""",
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK output\n",
}
tool = MockTool(executive_throws_when_run=set(['apply-watchlist-local']))
self.assert_queue_outputs(StyleQueue(), expected_logs=expected_logs, tool=tool)
|
bsd-3-clause
|
mbauskar/omnitech-erpnext
|
erpnext/setup/page/setup_wizard/setup_wizard.py
|
1
|
17852
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json, copy
from frappe.utils import cstr, flt, getdate
from frappe import _
from frappe.utils.file_manager import save_file
from frappe.translate import (set_default_language, get_dict,
get_lang_dict, send_translations, get_language_from_code)
from frappe.geo.country_info import get_country_info
from frappe.utils.nestedset import get_root_of
from .default_website import website_maker
import install_fixtures
from .sample_data import make_sample_data
from erpnext.accounts.utils import FiscalYearError
from erpnext.accounts.doctype.account.account import RootNotEditable
# TODO
# remove roles
# remove modules
@frappe.whitelist()
def setup_account(args=None):
try:
if frappe.db.sql("select name from tabCompany"):
frappe.throw(_("Setup Already Complete!!"))
if not args:
args = frappe.local.form_dict
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
if args.language and args.language != "english":
set_default_language(args.language)
frappe.clear_cache()
install_fixtures.install(args.get("country"))
update_user_name(args)
frappe.local.message_log = []
create_fiscal_year_and_company(args)
frappe.local.message_log = []
create_users(args)
frappe.local.message_log = []
set_defaults(args)
frappe.local.message_log = []
create_territories()
frappe.local.message_log = []
create_price_lists(args)
frappe.local.message_log = []
create_feed_and_todo()
frappe.local.message_log = []
create_email_digest()
frappe.local.message_log = []
create_letter_head(args)
frappe.local.message_log = []
create_taxes(args)
frappe.local.message_log = []
create_items(args)
frappe.local.message_log = []
create_customers(args)
frappe.local.message_log = []
create_suppliers(args)
frappe.local.message_log = []
frappe.db.set_default('desktop:home_page', 'desktop')
website_maker(args.company_name.strip(), args.company_tagline, args.name)
create_logo(args)
frappe.db.commit()
login_as_first_user(args)
frappe.db.commit()
frappe.clear_cache()
if args.get("add_sample_data"):
try:
make_sample_data()
frappe.clear_cache()
except FiscalYearError:
pass
except:
if args:
traceback = frappe.get_traceback()
for hook in frappe.get_hooks("setup_wizard_exception"):
frappe.get_attr(hook)(traceback, args)
raise
else:
for hook in frappe.get_hooks("setup_wizard_success"):
frappe.get_attr(hook)(args)
def update_user_name(args):
if args.get("email"):
args['name'] = args.get("email")
_mute_emails, frappe.flags.mute_emails = frappe.flags.mute_emails, True
doc = frappe.get_doc({
"doctype":"User",
"email": args.get("email"),
"first_name": args.get("first_name"),
"last_name": args.get("last_name")
})
doc.flags.no_welcome_mail = True
doc.insert()
frappe.flags.mute_emails = _mute_emails
from frappe.auth import _update_password
_update_password(args.get("email"), args.get("password"))
else:
args['name'] = frappe.session.user
# Update User
if not args.get('last_name') or args.get('last_name')=='None':
args['last_name'] = None
frappe.db.sql("""update `tabUser` SET first_name=%(first_name)s,
last_name=%(last_name)s WHERE name=%(name)s""", args)
if args.get("attach_user"):
attach_user = args.get("attach_user").split(",")
if len(attach_user)==3:
filename, filetype, content = attach_user
fileurl = save_file(filename, content, "User", args.get("name"), decode=True).file_url
frappe.db.set_value("User", args.get("name"), "user_image", fileurl)
add_all_roles_to(args.get("name"))
def create_fiscal_year_and_company(args):
curr_fiscal_year = get_fy_details(args.get('fy_start_date'), args.get('fy_end_date'))
frappe.get_doc({
"doctype":"Fiscal Year",
'year': curr_fiscal_year,
'year_start_date': args.get('fy_start_date'),
'year_end_date': args.get('fy_end_date'),
}).insert()
# Company
frappe.get_doc({
"doctype":"Company",
'domain': args.get("industry"),
'company_name':args.get('company_name').strip(),
'abbr':args.get('company_abbr'),
'default_currency':args.get('currency'),
'country': args.get('country'),
'chart_of_accounts': args.get(('chart_of_accounts')),
}).insert()
# Bank Account
args["curr_fiscal_year"] = curr_fiscal_year
def create_price_lists(args):
for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))):
frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": 1 if pl_type == "Buying" else 0,
"selling": 1 if pl_type == "Selling" else 0,
"currency": args["currency"]
}).insert()
def set_defaults(args):
# enable default currency
frappe.db.set_value("Currency", args.get("currency"), "enabled", 1)
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.update({
'current_fiscal_year': args.curr_fiscal_year,
'default_currency': args.get('currency'),
'default_company':args.get('company_name').strip(),
"country": args.get("country"),
})
global_defaults.save()
number_format = get_country_info(args.get("country")).get("number_format", "#,###.##")
# replace these as float number formats, as they have 0 precision
# and are currency number formats and not for floats
if number_format=="#.###":
number_format = "#.###,##"
elif number_format=="#,###":
number_format = "#,###.##"
system_settings = frappe.get_doc("System Settings", "System Settings")
system_settings.update({
"language": args.get("language"),
"time_zone": args.get("timezone"),
"float_precision": 3,
"email_footer_address": args.get("company"),
'date_format': frappe.db.get_value("Country", args.get("country"), "date_format"),
'number_format': number_format,
'enable_scheduler': 1 if not frappe.flags.in_test else 0
})
system_settings.save()
accounts_settings = frappe.get_doc("Accounts Settings")
accounts_settings.auto_accounting_for_stock = 1
accounts_settings.save()
stock_settings = frappe.get_doc("Stock Settings")
stock_settings.item_naming_by = "Item Code"
stock_settings.valuation_method = "FIFO"
stock_settings.stock_uom = _("Nos")
stock_settings.auto_indent = 1
stock_settings.auto_insert_price_list_rate_if_missing = 1
stock_settings.automatically_set_serial_nos_based_on_fifo = 1
stock_settings.save()
selling_settings = frappe.get_doc("Selling Settings")
selling_settings.cust_master_name = "Customer Name"
selling_settings.so_required = "No"
selling_settings.dn_required = "No"
selling_settings.save()
buying_settings = frappe.get_doc("Buying Settings")
buying_settings.supp_master_name = "Supplier Name"
buying_settings.po_required = "No"
buying_settings.pr_required = "No"
buying_settings.maintain_same_rate = 1
buying_settings.save()
notification_control = frappe.get_doc("Notification Control")
notification_control.quotation = 1
notification_control.sales_invoice = 1
notification_control.purchase_order = 1
notification_control.save()
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.save()
def create_feed_and_todo():
"""update Activity feed and create todo for creation of item, customer, vendor"""
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Comment",
"subject": "ERPNext Setup Complete!"
}).insert(ignore_permissions=True)
def create_email_digest():
from frappe.utils.user import get_system_managers
system_managers = get_system_managers(only_name=True)
if not system_managers:
return
companies = frappe.db.sql_list("select name FROM `tabCompany`")
for company in companies:
if not frappe.db.exists("Email Digest", "Default Weekly Digest - " + company):
edigest = frappe.get_doc({
"doctype": "Email Digest",
"name": "Default Weekly Digest - " + company,
"company": company,
"frequency": "Weekly",
"recipient_list": "\n".join(system_managers)
})
for df in edigest.meta.get("fields", {"fieldtype": "Check"}):
if df.fieldname != "scheduler_errors":
edigest.set(df.fieldname, 1)
edigest.insert()
# scheduler errors digest
if companies:
edigest = frappe.new_doc("Email Digest")
edigest.update({
"name": "Scheduler Errors",
"company": companies[0],
"frequency": "Daily",
"recipient_list": "\n".join(system_managers),
"scheduler_errors": 1,
"enabled": 1
})
edigest.insert()
def get_fy_details(fy_start_date, fy_end_date):
start_year = getdate(fy_start_date).year
if start_year == getdate(fy_end_date).year:
fy = cstr(start_year)
else:
fy = cstr(start_year) + '-' + cstr(start_year + 1)
return fy
def create_taxes(args):
for i in xrange(1,6):
if args.get("tax_" + str(i)):
# replace % in case someone also enters the % symbol
tax_rate = (args.get("tax_rate_" + str(i)) or "").replace("%", "")
try:
tax_group = frappe.db.get_value("Account", {"company": args.get("company_name"),
"is_group": 1, "account_type": "Tax", "root_type": "Liability"})
if tax_group:
account = make_tax_head(args, i, tax_group, tax_rate)
make_sales_and_purchase_tax_templates(account)
except frappe.NameError, e:
if e.args[2][0]==1062:
pass
else:
raise
except RootNotEditable, e:
pass
def make_tax_head(args, i, tax_group, tax_rate):
return frappe.get_doc({
"doctype":"Account",
"company": args.get("company_name").strip(),
"parent_account": tax_group,
"account_name": args.get("tax_" + str(i)),
"is_group": 0,
"report_type": "Balance Sheet",
"account_type": "Tax",
"tax_rate": flt(tax_rate) if tax_rate else None
}).insert(ignore_permissions=True)
def make_sales_and_purchase_tax_templates(account):
doc = {
"doctype": "Sales Taxes and Charges Template",
"title": account.name,
"taxes": [{
"category": "Valuation and Total",
"charge_type": "On Net Total",
"account_head": account.name,
"description": "{0} @ {1}".format(account.account_name, account.tax_rate),
"rate": account.tax_rate
}]
}
# Sales
frappe.get_doc(copy.deepcopy(doc)).insert()
# Purchase
doc["doctype"] = "Purchase Taxes and Charges Template"
frappe.get_doc(copy.deepcopy(doc)).insert()
def create_items(args):
for i in xrange(1,6):
item = args.get("item_" + str(i))
if item:
item_group = args.get("item_group_" + str(i))
is_sales_item = args.get("is_sales_item_" + str(i))
is_purchase_item = args.get("is_purchase_item_" + str(i))
is_stock_item = item_group!=_("Services")
default_warehouse = ""
if is_stock_item:
default_warehouse = frappe.db.get_value("Warehouse", filters={
"warehouse_name": _("Finished Goods") if is_sales_item else _("Stores"),
"company": args.get("company_name").strip()
})
try:
frappe.get_doc({
"doctype":"Item",
"item_code": item,
"item_name": item,
"description": item,
"is_sales_item": 1 if is_sales_item else 0,
"is_purchase_item": 1 if is_purchase_item else 0,
"show_in_website": 1,
"is_stock_item": is_stock_item and 1 or 0,
"item_group": item_group,
"stock_uom": args.get("item_uom_" + str(i)),
"default_warehouse": default_warehouse
}).insert()
if args.get("item_img_" + str(i)):
item_image = args.get("item_img_" + str(i)).split(",")
if len(item_image)==3:
filename, filetype, content = item_image
fileurl = save_file(filename, content, "Item", item, decode=True).file_url
frappe.db.set_value("Item", item, "image", fileurl)
if args.get("item_price_" + str(i)):
item_price = flt(args.get("item_price_" + str(i)))
if is_sales_item:
price_list_name = frappe.db.get_value("Price List", {"selling": 1})
make_item_price(item, price_list_name, item_price)
if is_purchase_item:
price_list_name = frappe.db.get_value("Price List", {"buying": 1})
make_item_price(item, price_list_name, item_price)
except frappe.NameError:
pass
def make_item_price(item, price_list_name, item_price):
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list_name,
"item_code": item,
"price_list_rate": item_price
}).insert()
def create_customers(args):
for i in xrange(1,6):
customer = args.get("customer_" + str(i))
if customer:
try:
frappe.get_doc({
"doctype":"Customer",
"customer_name": customer,
"customer_type": "Company",
"customer_group": _("Commercial"),
"territory": args.get("country"),
"company": args.get("company_name").strip()
}).insert()
if args.get("customer_contact_" + str(i)):
create_contact(args.get("customer_contact_" + str(i)),
"customer", customer)
except frappe.NameError:
pass
def create_suppliers(args):
for i in xrange(1,6):
supplier = args.get("supplier_" + str(i))
if supplier:
try:
frappe.get_doc({
"doctype":"Supplier",
"supplier_name": supplier,
"supplier_type": _("Local"),
"company": args.get("company_name").strip()
}).insert()
if args.get("supplier_contact_" + str(i)):
create_contact(args.get("supplier_contact_" + str(i)),
"supplier", supplier)
except frappe.NameError:
pass
def create_contact(contact, party_type, party):
"""Create contact based on given contact name"""
contact = contact.strip().split(" ")
frappe.get_doc({
"doctype":"Contact",
party_type: party,
"first_name":contact[0],
"last_name": len(contact) > 1 and contact[1] or ""
}).insert()
def create_letter_head(args):
if args.get("attach_letterhead"):
frappe.get_doc({
"doctype":"Letter Head",
"letter_head_name": _("Standard"),
"is_default": 1
}).insert()
attach_letterhead = args.get("attach_letterhead").split(",")
if len(attach_letterhead)==3:
filename, filetype, content = attach_letterhead
fileurl = save_file(filename, content, "Letter Head", _("Standard"), decode=True).file_url
frappe.db.set_value("Letter Head", _("Standard"), "content", "<img src='%s' style='max-width: 100%%;'>" % fileurl)
def create_logo(args):
if args.get("attach_logo"):
attach_logo = args.get("attach_logo").split(",")
if len(attach_logo)==3:
filename, filetype, content = attach_logo
fileurl = save_file(filename, content, "Website Settings", "Website Settings",
decode=True).file_url
frappe.db.set_value("Website Settings", "Website Settings", "brand_html",
"<img src='{0}' style='max-width: 40px; max-height: 25px;'> {1}".format(fileurl, args.get("company_name").strip()))
# def add_all_roles_to(name):
# user = frappe.get_doc("User", name)
# for role in frappe.db.sql("""select name from tabRole"""):
# if role[0] not in ["Administrator", "Guest", "All", "Customer", "Supplier", "Partner", "Employee"]:
# d = user.append("user_roles")
# d.role = role[0]
# # user.save()
# TODO check admin roles
def add_all_roles_to(name):
user = frappe.get_doc("User", name)
d = user.append("user_roles")
d.role = "System Manager"
user.save()
def create_territories():
"""create two default territories, one for home country and one named Rest of the World"""
from frappe.utils.nestedset import get_root_of
country = frappe.db.get_default("country")
root_territory = get_root_of("Territory")
for name in (country, _("Rest Of The World")):
if name and not frappe.db.exists("Territory", name):
frappe.get_doc({
"doctype": "Territory",
"territory_name": name.replace("'", ""),
"parent_territory": root_territory,
"is_group": "No"
}).insert()
def login_as_first_user(args):
if args.get("email") and hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.login_as(args.get("email"))
def create_users(args):
# create employee for self
emp = frappe.get_doc({
"doctype": "Employee",
"full_name": " ".join(filter(None, [args.get("first_name"), args.get("last_name")])),
"user_id": frappe.session.user,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
for i in xrange(1,5):
email = args.get("user_email_" + str(i))
fullname = args.get("user_fullname_" + str(i))
if email:
if not fullname:
fullname = email.split("@")[0]
parts = fullname.split(" ", 1)
user = frappe.get_doc({
"doctype": "User",
"email": email,
"first_name": parts[0],
"last_name": parts[1] if len(parts) > 1 else "",
"enabled": 1,
"user_type": "System User"
})
# default roles
# user.append_roles("Projects User", "Stock User", "Support Team")
# if args.get("user_sales_" + str(i)):
# user.append_roles("Sales User", "Sales Manager", "Accounts User")
# if args.get("user_purchaser_" + str(i)):
# user.append_roles("Purchase User", "Purchase Manager", "Accounts User")
# if args.get("user_accountant_" + str(i)):
# user.append_roles("Accounts Manager", "Accounts User")
user.flags.delay_emails = True
if not frappe.db.get_value("User", email):
user.insert(ignore_permissions=True)
# create employee
emp = frappe.get_doc({
"doctype": "Employee",
"full_name": fullname,
"user_id": email,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
@frappe.whitelist()
def load_messages(language):
frappe.clear_cache()
set_default_language(language)
m = get_dict("page", "setup-wizard")
m.update(get_dict("boot"))
send_translations(m)
return frappe.local.lang
@frappe.whitelist()
def load_languages():
return {
"default_language": get_language_from_code(frappe.local.lang),
"languages": sorted(get_lang_dict().keys())
}
|
agpl-3.0
|
YUPlayGodDev/platform_kernel_cyanogen_msm8916
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
leki75/ansible
|
lib/ansible/modules/network/nxos/nxos_nxapi.py
|
25
|
9768
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: nxos_nxapi
extends_documentation_fragment: nxos
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage NXAPI configuration on an NXOS device.
description:
- Configures the NXAPI feature on devices running Cisco NXOS. The
NXAPI feature is absent from the configuration by default. Since
this module manages the NXAPI feature it only supports the use
of the C(Cli) transport.
options:
http_port:
description:
- Configure the port with which the HTTP server will listen on
for requests. By default, NXAPI will bind the HTTP service
to the standard HTTP port 80. This argument accepts valid
port values in the range of 1 to 65535.
required: false
default: 80
http:
description:
- Controls the operating state of the HTTP protocol as one of the
underlying transports for NXAPI. By default, NXAPI will enable
the HTTP transport when the feature is first configured. To
disable the use of the HTTP transport, set the value of this
argument to False.
required: false
default: yes
choices: ['yes', 'no']
aliases: ['enable_http']
https_port:
description:
- Configure the port with which the HTTPS server will listen on
for requests. By default, NXAPI will bind the HTTPS service
to the standard HTTPS port 443. This argument accepts valid
port values in the range of 1 to 65535.
required: false
default: 443
https:
description:
- Controls the operating state of the HTTPS protocol as one of the
underlying transports for NXAPI. By default, NXAPI will disable
the HTTPS transport when the feature is first configured. To
enable the use of the HTTPS transport, set the value of this
argument to True.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_https']
sandbox:
description:
- The NXAPI feature provides a web base UI for developers for
entering commands. This feature is initially disabled when
the NXAPI feature is configured for the first time. When the
C(sandbox) argument is set to True, the developer sandbox URL
will accept requests and when the value is set to False, the
sandbox URL is unavailable.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_sandbox']
state:
description:
- The C(state) argument controls whether or not the NXAPI
feature is configured on the remote device. When the value
is C(present) the NXAPI feature configuration is present in
the device running-config. When the values is C(absent) the
feature configuration is removed from the running-config.
choices: ['present', 'absent']
required: false
default: present
"""
EXAMPLES = """
- name: Enable NXAPI access with default configuration
nxos_nxapi:
state: present
- name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled
nxos_nxapi:
enable_http: false
https_port: 9443
https: yes
enable_sandbox: no
- name: remove NXAPI configuration
nxos_nxapi:
state: absent
"""
RETURN = """
updates:
description:
- Returns the list of commands that need to be pushed into the remote
device to satisfy the arguments
returned: always
type: list
sample: ['no feature nxapi']
"""
import re
from ansible.module_utils.nxos import run_commands, load_config
from ansible.module_utils.nxos import nxos_argument_spec
from ansible.module_utils.nxos import check_args as nxos_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def check_args(module, warnings):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
if 'nxapi' in (transport, provider_transport):
module.fail_json(msg='transport=nxapi is not supporting when configuring nxapi')
nxos_check_args(module, warnings)
state = module.params['state']
if state == 'started':
module.params['state'] = 'present'
warnings.append('state=started is deprecated and will be removed in a '
'a future release. Please use state=present instead')
elif state == 'stopped':
module.params['state'] = 'absent'
warnings.append('state=stopped is deprecated and will be removed in a '
'a future release. Please use state=absent instead')
if module.params['transport'] == 'nxapi':
module.fail_json(msg='module not supported over nxapi transport')
for key in ['config']:
if module.params[key]:
warnings.append('argument %s is deprecated and will be ignored' % key)
for key in ['http_port', 'https_port']:
if module.params[key] is not None:
if not 1 <= module.params[key] <= 65535:
module.fail_json(msg='%s must be between 1 and 65535' % key)
return warnings
def map_obj_to_commands(want, have, module):
commands = list()
needs_update = lambda x: want.get(x) is not None and (want.get(x) != have.get(x))
if needs_update('state'):
if want['state'] == 'absent':
return ['no feature nxapi']
commands.append('feature nxapi')
if needs_update('http') or (have.get('http') and needs_update('http_port')):
if want['http'] is True or (want['http'] is None and have['http'] is True):
port = want['http_port'] or 80
commands.append('nxapi http port %s' % port)
elif want['http'] is False:
commands.append('no nxapi http')
if needs_update('https') or (have.get('https') and needs_update('https_port')):
if want['https'] is True or (want['https'] is None and have['https'] is True):
port = want['https_port'] or 443
commands.append('nxapi https port %s' % port)
elif want['https'] is False:
commands.append('no nxapi https')
if needs_update('sandbox'):
cmd = 'nxapi sandbox'
if not want['sandbox']:
cmd = 'no %s' % cmd
commands.append(cmd)
return commands
def parse_http(data):
http_res = [r'HTTP Port:\s+(\d+)', r'HTTP Listen on port (\d+)']
http_port = None
for regex in http_res:
match = re.search(regex, data, re.M)
if match:
http_port = int(match.group(1))
break
return {'http': http_port is not None, 'http_port': http_port}
def parse_https(data):
https_res = [r'HTTPS Port:\s+(\d+)', r'HTTPS Listen on port (\d+)']
https_port = None
for regex in https_res:
match = re.search(regex, data, re.M)
if match:
https_port = int(match.group(1))
break
return {'https': https_port is not None, 'https_port': https_port}
def parse_sandbox(data):
match = re.search(r'Sandbox:\s+(.+)$', data, re.M)
value = False
if match:
value = match.group(1) == 'Enabled'
return {'sandbox': value}
def map_config_to_obj(module):
out = run_commands(module, ['show nxapi'], check_rc=False)[0]
if out == '':
return {'state': 'absent'}
out = str(out).strip()
obj = {'state': 'present'}
obj.update(parse_http(out))
obj.update(parse_https(out))
obj.update(parse_sandbox(out))
return obj
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
'http_port': module.params['http_port'],
'https': module.params['https'],
'https_port': module.params['https_port'],
'sandbox': module.params['sandbox'],
'state': module.params['state']
}
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], type='bool'),
http_port=dict(type='int'),
https=dict(aliases=['enable_https'], type='bool'),
https_port=dict(type='int'),
sandbox=dict(aliases=['enable_sandbox'], type='bool'),
# deprecated (Ansible 2.3) arguments
config=dict(),
state=dict(default='present', choices=['started', 'stopped', 'present', 'absent'])
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sbusso/rethinkdb
|
drivers/python/rethinkdb/errors.py
|
8
|
4930
|
# Copyright 2010-2014 RethinkDB, all rights reserved.
__all__ = ['RqlError',
'RqlClientError',
'RqlCompileError',
'RqlRuntimeError',
'RqlInternalError',
'RqlResourceError',
'RqlLogicError',
'RqlNonExistenceError',
'RqlOpError',
'RqlOpFailedError',
'RqlOpIndeterminateError',
'RqlUserError',
'RqlDriverError',
'RqlTimeoutError',
'RqlCursorEmpty']
import sys
try:
unicode
def convertForPrint(inputString):
if type(inputString) == unicode:
return inputString.encode(sys.stdout.encoding or 'utf-8',
'replace')
else:
return str(inputString)
except NameError:
def convertForPrint(inputString):
return inputString
try:
{}.iteritems
dict_items = lambda d: d.iteritems()
except AttributeError:
dict_items = lambda d: d.items()
class RqlError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return convertForPrint(self.message)
def __repr__(self):
return self.__class__.__name__ + "(" + repr(self.message) + ")"
class RqlServerError(RqlError):
def __init__(self, message, term, frames):
RqlError.__init__(self, message)
self.frames = frames
self.query_printer = QueryPrinter(term, self.frames)
def __str__(self):
return convertForPrint(self.__class__.__name__ + ": " + self.message +
" in:\n" + self.query_printer.print_query() +
'\n' + self.query_printer.print_carrots())
class RqlClientError(RqlServerError):
pass
class RqlCompileError(RqlServerError):
pass
class RqlRuntimeError(RqlServerError):
def __str__(self):
return convertForPrint(self.message + " in:\n" +
self.query_printer.print_query() + '\n' +
self.query_printer.print_carrots())
class RqlInternalError(RqlRuntimeError):
pass
class RqlResourceError(RqlRuntimeError):
pass
class RqlLogicError(RqlRuntimeError):
pass
class RqlNonExistenceError(RqlLogicError):
pass
class RqlOpError(RqlRuntimeError):
pass
class RqlOpFailedError(RqlOpError):
pass
class RqlOpIndeterminateError(RqlOpError):
pass
class RqlUserError(RqlRuntimeError):
pass
class RqlDriverError(RqlError):
pass
try:
class RqlTimeoutError(RqlError, TimeoutError):
def __init__(self):
RqlError.__init__(self, 'Operation timed out.')
except NameError:
class RqlTimeoutError(RqlError):
def __init__(self):
RqlError.__init__(self, 'Operation timed out.')
class RqlCursorEmpty(RqlServerError):
def __init__(self, term):
RqlServerError.__init__(self, 'Cursor is empty.', term, [])
class QueryPrinter(object):
def __init__(self, root, frames=[]):
self.root = root
self.frames = frames
def print_query(self):
return ''.join(self.compose_term(self.root))
def print_carrots(self):
return ''.join(self.compose_carrots(self.root, self.frames))
def compose_term(self, term):
args = [self.compose_term(a) for a in term.args]
optargs = {}
for k, v in dict_items(term.optargs):
optargs[k] = self.compose_term(v)
return term.compose(args, optargs)
def compose_carrots(self, term, frames):
# This term is the cause of the error
if len(frames) == 0:
return ['^' for i in self.compose_term(term)]
cur_frame = frames[0]
args = [self.compose_carrots(arg, frames[1:])
if cur_frame == i else self.compose_term(arg)
for i, arg in enumerate(term.args)]
optargs = {}
for k, v in dict_items(term.optargs):
if cur_frame == k:
optargs[k] = self.compose_carrots(v, frames[1:])
else:
optargs[k] = self.compose_term(v)
return [' ' if i != '^' else '^' for i in term.compose(args, optargs)]
# This 'enhanced' tuple recursively iterates over it's elements allowing us to
# construct nested heirarchies that insert subsequences into tree. It's used
# to construct the query representation used by the pretty printer.
class T(object):
# N.B Python 2.x doesn't allow keyword default arguments after *seq
# In Python 3.x we can rewrite this as `__init__(self, *seq, intsp=''`
def __init__(self, *seq, **opts):
self.seq = seq
self.intsp = opts.pop('intsp', '')
def __iter__(self):
itr = iter(self.seq)
for sub in next(itr):
yield sub
for token in itr:
for sub in self.intsp:
yield sub
for sub in token:
yield sub
|
agpl-3.0
|
utecuy/edx-platform
|
cms/lib/xblock/test/test_authoring_mixin.py
|
105
|
6282
|
"""
Tests for the Studio authoring XBlock mixin.
"""
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class AuthoringMixinTestCase(ModuleStoreTestCase):
"""
Tests the studio authoring XBlock mixin.
"""
def setUp(self):
"""
Create a simple course with a video component.
"""
super(AuthoringMixinTestCase, self).setUp()
self.course = CourseFactory.create()
chapter = ItemFactory.create(
category='chapter',
parent_location=self.course.location,
display_name='Test Chapter'
)
sequential = ItemFactory.create(
category='sequential',
parent_location=chapter.location,
display_name='Test Sequential'
)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name='Test Vertical'
)
video = ItemFactory.create(
category='video',
parent_location=vertical.location,
display_name='Test Vertical'
)
self.vertical_location = vertical.location
self.video_location = video.location
self.pet_groups = [Group(1, 'Cat Lovers'), Group(2, 'Dog Lovers')]
def create_content_groups(self, content_groups):
"""
Create a cohorted user partition with the specified content groups.
"""
# pylint: disable=attribute-defined-outside-init
self.content_partition = UserPartition(
1,
'Content Groups',
'Contains Groups for Cohorted Courseware',
content_groups,
scheme_id='cohort'
)
self.course.user_partitions = [self.content_partition]
self.store.update_item(self.course, self.user.id)
def create_verification_user_partitions(self, checkpoint_names):
"""
Create user partitions for verification checkpoints.
"""
scheme = UserPartition.get_scheme("verification")
self.course.user_partitions = [
UserPartition(
id=0,
name=checkpoint_name,
description="Verification checkpoint",
scheme=scheme,
groups=[
Group(scheme.ALLOW, "Completed verification at {}".format(checkpoint_name)),
Group(scheme.DENY, "Did not complete verification at {}".format(checkpoint_name)),
],
)
for checkpoint_name in checkpoint_names
]
self.store.update_item(self.course, self.user.id)
def set_staff_only(self, item_location):
"""Make an item visible to staff only."""
item = self.store.get_item(item_location)
item.visible_to_staff_only = True
self.store.update_item(item, self.user.id)
def set_group_access(self, item_location, group_ids):
"""
Set group_access for the specified item to the specified group
ids within the content partition.
"""
item = self.store.get_item(item_location)
item.group_access[self.content_partition.id] = group_ids
self.store.update_item(item, self.user.id)
def verify_visibility_view_contains(self, item_location, substrings):
"""
Verify that an item's visibility view returns an html string
containing all the expected substrings.
"""
item = self.store.get_item(item_location)
html = item.visibility_view().body_html()
for string in substrings:
self.assertIn(string, html)
def test_html_no_partition(self):
self.verify_visibility_view_contains(self.video_location, 'No content groups exist')
def test_html_empty_partition(self):
self.create_content_groups([])
self.verify_visibility_view_contains(self.video_location, 'No content groups exist')
def test_html_populated_partition(self):
self.create_content_groups(self.pet_groups)
self.verify_visibility_view_contains(self.video_location, ['Cat Lovers', 'Dog Lovers'])
def test_html_no_partition_staff_locked(self):
self.set_staff_only(self.vertical_location)
self.verify_visibility_view_contains(self.video_location, ['No content groups exist'])
def test_html_empty_partition_staff_locked(self):
self.create_content_groups([])
self.set_staff_only(self.vertical_location)
self.verify_visibility_view_contains(self.video_location, 'No content groups exist')
def test_html_populated_partition_staff_locked(self):
self.create_content_groups(self.pet_groups)
self.set_staff_only(self.vertical_location)
self.verify_visibility_view_contains(
self.video_location,
['The Unit this component is contained in is hidden from students.', 'Cat Lovers', 'Dog Lovers']
)
def test_html_false_content_group(self):
self.create_content_groups(self.pet_groups)
self.set_group_access(self.video_location, ['false_group_id'])
self.verify_visibility_view_contains(
self.video_location, ['Cat Lovers', 'Dog Lovers', 'Content group no longer exists.']
)
def test_html_false_content_group_staff_locked(self):
self.create_content_groups(self.pet_groups)
self.set_staff_only(self.vertical_location)
self.set_group_access(self.video_location, ['false_group_id'])
self.verify_visibility_view_contains(
self.video_location,
[
'Cat Lovers',
'Dog Lovers',
'The Unit this component is contained in is hidden from students.',
'Content group no longer exists.'
]
)
def test_html_verification_checkpoints(self):
self.create_verification_user_partitions(["Midterm A", "Midterm B"])
self.verify_visibility_view_contains(
self.video_location,
[
"Verification Checkpoint",
"Midterm A",
"Midterm B",
]
)
|
agpl-3.0
|
utecuy/edx-platform
|
lms/djangoapps/notifier_api/tests.py
|
115
|
9872
|
import itertools
import ddt
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from django_comment_common.models import Role, Permission
from lang_pref import LANGUAGE_KEY
from notification_prefs import NOTIFICATION_PREF_KEY
from notifier_api.views import NotifierUsersViewSet
from opaque_keys.edx.locator import CourseLocator
from student.models import CourseEnrollment
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.tests.factories import UserPreferenceFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@override_settings(EDX_API_KEY="test_api_key")
class NotifierUsersViewSetTest(UrlResetMixin, ModuleStoreTestCase):
def setUp(self):
super(NotifierUsersViewSetTest, self).setUp()
self.courses = []
self.cohorts = []
self.user = UserFactory()
self.notification_pref = UserPreferenceFactory(
user=self.user,
key=NOTIFICATION_PREF_KEY,
value="notification pref test value"
)
self.list_view = NotifierUsersViewSet.as_view({"get": "list"})
self.detail_view = NotifierUsersViewSet.as_view({"get": "retrieve"})
def _set_up_course(self, is_course_cohorted, is_user_cohorted, is_moderator):
cohort_config = {"cohorted": True} if is_course_cohorted else {}
course = CourseFactory(
number=("TestCourse{}".format(len(self.courses))),
cohort_config=cohort_config
)
self.courses.append(course)
CourseEnrollmentFactory(user=self.user, course_id=course.id)
if is_user_cohorted:
cohort = CourseUserGroup.objects.create(
name="Test Cohort",
course_id=course.id,
group_type=CourseUserGroup.COHORT
)
cohort.users.add(self.user)
self.cohorts.append(cohort)
if is_moderator:
moderator_perm, _ = Permission.objects.get_or_create(name="see_all_cohorts")
moderator_role = Role.objects.create(name="Moderator", course_id=course.id)
moderator_role.permissions.add(moderator_perm)
self.user.roles.add(moderator_role)
def _assert_basic_user_info_correct(self, user, result_user):
self.assertEqual(result_user["id"], user.id)
self.assertEqual(result_user["email"], user.email)
self.assertEqual(result_user["name"], user.profile.name)
def test_without_api_key(self):
request = RequestFactory().get("dummy")
for view in [self.list_view, self.detail_view]:
response = view(request)
self.assertEqual(response.status_code, 403)
# Detail view tests
def _make_detail_request(self):
request = RequestFactory().get("dummy", HTTP_X_EDX_API_KEY=settings.EDX_API_KEY)
return self.detail_view(
request,
**{NotifierUsersViewSet.lookup_field: str(self.user.id)}
)
def _get_detail(self):
response = self._make_detail_request()
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.data.keys()),
{"id", "email", "name", "preferences", "course_info"}
)
return response.data
def test_detail_invalid_user(self):
UserPreference.objects.all().delete()
response = self._make_detail_request()
self.assertEqual(response.status_code, 404)
def test_basic_user_info(self):
result = self._get_detail()
self._assert_basic_user_info_correct(self.user, result)
def test_course_info(self):
expected_course_info = {}
for is_course_cohorted, is_user_cohorted, is_moderator in (
itertools.product([True, False], [True, False], [True, False])
):
self._set_up_course(is_course_cohorted, is_user_cohorted, is_moderator)
expected_course_info[unicode(self.courses[-1].id)] = {
"cohort_id": self.cohorts[-1].id if is_user_cohorted else None,
"see_all_cohorts": is_moderator or not is_course_cohorted
}
result = self._get_detail()
self.assertEqual(result["course_info"], expected_course_info)
def test_course_info_unenrolled(self):
self._set_up_course(False, False, False)
course_id = self.courses[0].id
CourseEnrollment.unenroll(self.user, course_id)
result = self._get_detail()
self.assertNotIn(unicode(course_id), result["course_info"])
def test_course_info_no_enrollments(self):
result = self._get_detail()
self.assertEqual(result["course_info"], {})
def test_course_info_non_existent_course_enrollment(self):
CourseEnrollmentFactory(
user=self.user,
course_id=CourseLocator(org="dummy", course="dummy", run="non_existent")
)
result = self._get_detail()
self.assertEqual(result["course_info"], {})
def test_preferences(self):
lang_pref = UserPreferenceFactory(
user=self.user,
key=LANGUAGE_KEY,
value="language pref test value"
)
UserPreferenceFactory(user=self.user, key="non_included_key")
result = self._get_detail()
self.assertEqual(
result["preferences"],
{
NOTIFICATION_PREF_KEY: self.notification_pref.value,
LANGUAGE_KEY: lang_pref.value,
}
)
# List view tests
def _make_list_request(self, page, page_size):
request = RequestFactory().get(
"dummy",
{"page": page, "page_size": page_size},
HTTP_X_EDX_API_KEY=settings.EDX_API_KEY
)
return self.list_view(request)
def _get_list(self, page=1, page_size=None):
response = self._make_list_request(page, page_size)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.data.keys()),
{"count", "next", "previous", "results"}
)
return response.data["results"]
def test_no_users(self):
UserPreference.objects.all().delete()
results = self._get_list()
self.assertEqual(len(results), 0)
def test_multiple_users(self):
other_user = UserFactory()
other_notification_pref = UserPreferenceFactory(
user=other_user,
key=NOTIFICATION_PREF_KEY,
value="other value"
)
self._set_up_course(is_course_cohorted=True, is_user_cohorted=True, is_moderator=False)
self._set_up_course(is_course_cohorted=False, is_user_cohorted=False, is_moderator=False)
# Users have different sets of enrollments
CourseEnrollmentFactory(user=other_user, course_id=self.courses[0].id)
result_map = {result["id"]: result for result in self._get_list()}
self.assertEqual(set(result_map.keys()), {self.user.id, other_user.id})
for user in [self.user, other_user]:
self._assert_basic_user_info_correct(user, result_map[user.id])
self.assertEqual(
result_map[self.user.id]["preferences"],
{NOTIFICATION_PREF_KEY: self.notification_pref.value}
)
self.assertEqual(
result_map[other_user.id]["preferences"],
{NOTIFICATION_PREF_KEY: other_notification_pref.value}
)
self.assertEqual(
result_map[self.user.id]["course_info"],
{
unicode(self.courses[0].id): {
"cohort_id": self.cohorts[0].id,
"see_all_cohorts": False,
},
unicode(self.courses[1].id): {
"cohort_id": None,
"see_all_cohorts": True,
},
}
)
self.assertEqual(
result_map[other_user.id]["course_info"],
{
unicode(self.courses[0].id): {
"cohort_id": None,
"see_all_cohorts": False,
},
}
)
@ddt.data(
3, # Factor of num of results
5, # Non-factor of num of results
12, # Num of results
15 # More than num of results
)
def test_pagination(self, page_size):
num_users = 12
users = [self.user]
while len(users) < num_users:
new_user = UserFactory()
users.append(new_user)
UserPreferenceFactory(user=new_user, key=NOTIFICATION_PREF_KEY)
num_pages = (num_users - 1) / page_size + 1
result_list = []
for i in range(1, num_pages + 1):
result_list.extend(self._get_list(page=i, page_size=page_size))
result_map = {result["id"]: result for result in result_list}
self.assertEqual(len(result_list), num_users)
for user in users:
self._assert_basic_user_info_correct(user, result_map[user.id])
self.assertEqual(
self._make_list_request(page=(num_pages + 1), page_size=page_size).status_code,
404
)
def test_db_access(self):
for _ in range(10):
new_user = UserFactory()
UserPreferenceFactory(user=new_user, key=NOTIFICATION_PREF_KEY)
# The number of queries is one for the users plus one for each prefetch
# in NotifierUsersViewSet (roles__permissions does one for each table).
with self.assertNumQueries(6):
self._get_list()
|
agpl-3.0
|
xsmart/opencvr
|
3rdparty/miniupnpc/pymoduletest.py
|
158
|
1377
|
#! /usr/bin/python
# MiniUPnP project
# Author : Thomas Bernard
# This Sample code is public domain.
# website : http://miniupnp.tuxfamily.org/
# import the python miniupnpc module
import miniupnpc
import sys
# create the object
u = miniupnpc.UPnP()
print 'inital(default) values :'
print ' discoverdelay', u.discoverdelay
print ' lanaddr', u.lanaddr
print ' multicastif', u.multicastif
print ' minissdpdsocket', u.minissdpdsocket
u.discoverdelay = 200;
#u.minissdpdsocket = '../minissdpd/minissdpd.sock'
# discovery process, it usualy takes several seconds (2 seconds or more)
print 'Discovering... delay=%ums' % u.discoverdelay
print u.discover(), 'device(s) detected'
# select an igd
try:
u.selectigd()
except Exception, e:
print 'Exception :', e
sys.exit(1)
# display information about the IGD and the internet connection
print 'local ip address :', u.lanaddr
print 'external ip address :', u.externalipaddress()
print u.statusinfo(), u.connectiontype()
#print u.addportmapping(64000, 'TCP',
# '192.168.1.166', 63000, 'port mapping test', '')
#print u.deleteportmapping(64000, 'TCP')
port = 0
proto = 'UDP'
# list the redirections :
i = 0
while True:
p = u.getgenericportmapping(i)
if p==None:
break
print i, p
(port, proto, (ihost,iport), desc, c, d, e) = p
#print port, desc
i = i + 1
print u.getspecificportmapping(port, proto)
|
mit
|
ted-gould/nova
|
nova/tests/unit/volume/test_cinder.py
|
9
|
16912
|
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exception
import mock
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
class FakeCinderClient(object):
class Volumes(object):
def get(self, volume_id):
return {'id': volume_id}
def list(self, detailed, search_opts=None):
if search_opts is not None and 'id' in search_opts:
return [{'id': search_opts['id']}]
else:
return [{'id': 'id1'}, {'id': 'id2'}]
def create(self, *args, **kwargs):
return {'id': 'created_id'}
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
self.volume_snapshots = self.volumes
class FakeVolume(object):
def __init__(self, dict=dict()):
self.id = dict.get('id') or '1234'
self.status = dict.get('status') or 'available'
self.size = dict.get('size') or 1
self.availability_zone = dict.get('availability_zone') or 'cinder'
self.created_at = dict.get('created_at')
self.attach_time = dict.get('attach_time')
self.mountpoint = dict.get('mountpoint')
self.display_name = dict.get('display_name') or 'volume-' + self.id
self.display_description = dict.get('display_description') or 'fake'
self.volume_type_id = dict.get('volume_type_id')
self.snapshot_id = dict.get('snapshot_id')
self.metadata = dict.get('volume_metadata') or {}
class CinderApiTestCase(test.NoDBTestCase):
def setUp(self):
super(CinderApiTestCase, self).setUp()
self.api = cinder.API()
self.cinderclient = FakeCinderClient()
self.ctx = context.get_admin_context()
self.mox.StubOutWithMock(cinder, 'cinderclient')
self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view')
self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view')
def test_get(self):
volume_id = 'volume_id1'
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx, {'id': 'volume_id1'})
self.mox.ReplayAll()
self.api.get(self.ctx, volume_id)
def test_get_failed(self):
volume_id = 'volume_id'
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound(''))
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest(''))
cinder.cinderclient(self.ctx).AndRaise(
cinder_exception.ConnectionError(''))
self.mox.ReplayAll()
self.assertRaises(exception.VolumeNotFound,
self.api.get, self.ctx, volume_id)
self.assertRaises(exception.InvalidInput,
self.api.get, self.ctx, volume_id)
self.assertRaises(exception.CinderConnectionFailed,
self.api.get, self.ctx, volume_id)
def test_create(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'})
self.mox.ReplayAll()
self.api.create(self.ctx, 1, '', '')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.BadRequest(''))
self.assertRaises(exception.InvalidInput,
self.api.create, self.ctx, 1, '', '')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_over_quota_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.OverLimit(413))
self.assertRaises(exception.OverQuota, self.api.create, self.ctx,
1, '', '')
mock_cinderclient.return_value.volumes.create.assert_called_once_with(
1, user_id=None, imageRef=None, availability_zone=None,
volume_type=None, description='', snapshot_id=None, name='',
project_id=None, metadata=None)
def test_get_all(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx,
{'id': 'id1'}).AndReturn('id1')
cinder._untranslate_volume_summary_view(self.ctx,
{'id': 'id2'}).AndReturn('id2')
self.mox.ReplayAll()
self.assertEqual(['id1', 'id2'], self.api.get_all(self.ctx))
def test_get_all_with_search(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx,
{'id': 'id1'}).AndReturn('id1')
self.mox.ReplayAll()
self.assertEqual(['id1'], self.api.get_all(self.ctx,
search_opts={'id': 'id1'}))
def test_check_attach_volume_status_error(self):
volume = {'id': 'fake', 'status': 'error'}
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume)
def test_check_attach_volume_already_attached(self):
volume = {'id': 'fake', 'status': 'available'}
volume['attach_status'] = "attached"
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume)
def test_check_attach_availability_zone_differs(self):
volume = {'id': 'fake', 'status': 'available'}
volume['attach_status'] = "detached"
instance = {'id': 'fake',
'availability_zone': 'zone1', 'host': 'fakehost'}
with mock.patch.object(cinder.az, 'get_instance_availability_zone',
side_effect=lambda context,
instance: 'zone1') as mock_get_instance_az:
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(self.ctx,
volume, instance))
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
mock_get_instance_az.reset_mock()
volume['availability_zone'] = 'zone2'
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
mock_get_instance_az.reset_mock()
del instance['host']
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(
self.ctx, volume, instance))
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
mock_get_instance_az.reset_mock()
volume['availability_zone'] = 'zone2'
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
cinder.CONF.reset()
def test_check_attach(self):
volume = {'status': 'available'}
volume['attach_status'] = "detached"
volume['availability_zone'] = 'zone1'
instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
with mock.patch.object(cinder.az, 'get_instance_availability_zone',
side_effect=lambda context, instance: 'zone1'):
self.assertIsNone(self.api.check_attach(
self.ctx, volume, instance))
cinder.CONF.reset()
def test_check_detach(self):
volume = {'id': 'fake', 'status': 'available'}
self.assertRaises(exception.InvalidVolume,
self.api.check_detach, self.ctx, volume)
volume['status'] = 'non-available'
self.assertIsNone(self.api.check_detach(self.ctx, volume))
def test_reserve_volume(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'reserve',
use_mock_anything=True)
self.cinderclient.volumes.reserve('id1')
self.mox.ReplayAll()
self.api.reserve_volume(self.ctx, 'id1')
def test_unreserve_volume(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'unreserve',
use_mock_anything=True)
self.cinderclient.volumes.unreserve('id1')
self.mox.ReplayAll()
self.api.unreserve_volume(self.ctx, 'id1')
def test_begin_detaching(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'begin_detaching',
use_mock_anything=True)
self.cinderclient.volumes.begin_detaching('id1')
self.mox.ReplayAll()
self.api.begin_detaching(self.ctx, 'id1')
def test_roll_detaching(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'roll_detaching',
use_mock_anything=True)
self.cinderclient.volumes.roll_detaching('id1')
self.mox.ReplayAll()
self.api.roll_detaching(self.ctx, 'id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attach(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.attach(self.ctx, 'id1', 'uuid', 'point')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
mode='rw')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attach_with_mode(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
mode='ro')
def test_detach(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'detach',
use_mock_anything=True)
self.cinderclient.volumes.detach('id1')
self.mox.ReplayAll()
self.api.detach(self.ctx, 'id1')
def test_initialize_connection(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'initialize_connection',
use_mock_anything=True)
self.cinderclient.volumes.initialize_connection('id1', 'connector')
self.mox.ReplayAll()
self.api.initialize_connection(self.ctx, 'id1', 'connector')
def test_terminate_connection(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'terminate_connection',
use_mock_anything=True)
self.cinderclient.volumes.terminate_connection('id1', 'connector')
self.mox.ReplayAll()
self.api.terminate_connection(self.ctx, 'id1', 'connector')
def test_delete(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'delete',
use_mock_anything=True)
self.cinderclient.volumes.delete('id1')
self.mox.ReplayAll()
self.api.delete(self.ctx, 'id1')
def test_update(self):
self.assertRaises(NotImplementedError,
self.api.update, self.ctx, '', '')
def test_get_snapshot(self):
snapshot_id = 'snapshot_id'
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': snapshot_id})
self.mox.ReplayAll()
self.api.get_snapshot(self.ctx, snapshot_id)
def test_get_snapshot_failed(self):
snapshot_id = 'snapshot_id'
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound(''))
cinder.cinderclient(self.ctx).AndRaise(
cinder_exception.ConnectionError(''))
self.mox.ReplayAll()
self.assertRaises(exception.SnapshotNotFound,
self.api.get_snapshot, self.ctx, snapshot_id)
self.assertRaises(exception.CinderConnectionFailed,
self.api.get_snapshot, self.ctx, snapshot_id)
def test_get_all_snapshots(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'id1'}).AndReturn('id1')
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'id2'}).AndReturn('id2')
self.mox.ReplayAll()
self.assertEqual(['id1', 'id2'], self.api.get_all_snapshots(self.ctx))
def test_create_snapshot(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'created_id'})
self.mox.ReplayAll()
self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '')
def test_create_force(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'created_id'})
self.mox.ReplayAll()
self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '')
def test_delete_snapshot(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volume_snapshots,
'delete',
use_mock_anything=True)
self.cinderclient.volume_snapshots.delete('id1')
self.mox.ReplayAll()
self.api.delete_snapshot(self.ctx, 'id1')
def test_update_snapshot_status(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volume_snapshots,
'update_snapshot_status',
use_mock_anything=True)
self.cinderclient.volume_snapshots.update_snapshot_status(
'id1', {'status': 'error', 'progress': '90%'})
self.mox.ReplayAll()
self.api.update_snapshot_status(self.ctx, 'id1', 'error')
def test_get_volume_encryption_metadata(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'get_encryption_metadata',
use_mock_anything=True)
self.cinderclient.volumes.\
get_encryption_metadata({'encryption_key_id': 'fake_key'})
self.mox.ReplayAll()
self.api.get_volume_encryption_metadata(self.ctx,
{'encryption_key_id':
'fake_key'})
|
apache-2.0
|
quarkslab/irma
|
frontend/extras/migration/versions/2cc69d5c53eb_db_revision_creation.py
|
3
|
8295
|
"""DB revision creation
Revision ID: 2cc69d5c53eb
Revises:
Create Date: 2015-05-20 13:54:25.433439
"""
# revision identifiers, used by Alembic.
revision = '2cc69d5c53eb'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('irma_file',
sa.Column('id',
sa.Integer(),
nullable=False),
sa.Column('sha256',
sa.String(length=64),
nullable=True),
sa.Column('sha1',
sa.String(length=40),
nullable=True),
sa.Column('md5',
sa.String(length=32),
nullable=True),
sa.Column('timestamp_first_scan',
sa.Float(precision=2),
nullable=False),
sa.Column('timestamp_last_scan',
sa.Float(precision=2),
nullable=False),
sa.Column('size',
sa.Integer(),
nullable=True),
sa.Column('path',
sa.String(length=255),
nullable=True),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_index(op.f('ix_irma_file_md5'),
'irma_file', ['md5'],
unique=False)
op.create_index(op.f('ix_irma_file_sha1'),
'irma_file', ['sha1'],
unique=False)
op.create_index(op.f('ix_irma_file_sha256'),
'irma_file', ['sha256'],
unique=False)
op.create_table('irma_scan',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('external_id', sa.String(length=36),
nullable=False),
sa.Column('date', sa.Integer(), nullable=False),
sa.Column('ip', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_index(op.f('ix_irma_scan_external_id'),
'irma_scan',
['external_id'],
unique=False)
op.create_table('irma_submission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('external_id', sa.String(length=36),
nullable=False),
sa.Column('os_name', sa.String(), nullable=False),
sa.Column('username', sa.String(), nullable=False),
sa.Column('ip', sa.String(), nullable=False),
sa.Column('date', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_index(op.f('ix_irma_submission_external_id'),
'irma_submission',
['external_id'],
unique=False)
op.create_table('irma_tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_table('irma_fileAgent',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('submission_path',
sa.String(length=255),
nullable=False),
sa.Column('id_file', sa.Integer(), nullable=False),
sa.Column('id_s', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id_file'], ['irma_file.id'], ),
sa.ForeignKeyConstraint(['id_s'],
['irma_submission.id'],),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_table('irma_fileWeb',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('scan_file_idx', sa.Integer(), nullable=False),
sa.Column('id_file', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('id_scan', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id_file'], ['irma_file.id'], ),
sa.ForeignKeyConstraint(['id_scan'], ['irma_scan.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id_scan', 'scan_file_idx')
)
op.create_table('irma_probeResult',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('nosql_id', sa.String(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('id_file', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id_file'], ['irma_file.id'], ),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_table('irma_scanEvents',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.Float(precision=2),
nullable=False),
sa.Column('id_scan', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id_scan'], ['irma_scan.id'], ),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_index(op.f('ix_irma_scanEvents_id_scan'),
'irma_scanEvents',
['id_scan'], unique=False)
op.create_table('irma_tag_file',
sa.Column('id_tag', sa.Integer(), nullable=True),
sa.Column('id_file', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id_file'], ['irma_file.id'], ),
sa.ForeignKeyConstraint(['id_tag'], ['irma_tag.id'], )
)
op.create_table('irma_probeResult_fileWeb',
sa.Column('id_fw', sa.Integer(), nullable=True),
sa.Column('id_pr', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id_fw'], ['irma_fileWeb.id'], ),
sa.ForeignKeyConstraint(['id_pr'],
['irma_probeResult.id'],)
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('irma_probeResult_fileWeb')
op.drop_table('irma_tag_file')
op.drop_index(op.f('ix_irma_scanEvents_id_scan'),
table_name='irma_scanEvents')
op.drop_table('irma_scanEvents')
op.drop_table('irma_probeResult')
op.drop_table('irma_fileWeb')
op.drop_table('irma_fileAgent')
op.drop_table('irma_tag')
op.drop_index(op.f('ix_irma_submission_external_id'),
table_name='irma_submission')
op.drop_table('irma_submission')
op.drop_index(op.f('ix_irma_scan_external_id'),
table_name='irma_scan')
op.drop_table('irma_scan')
op.drop_index(op.f('ix_irma_file_sha256'),
table_name='irma_file')
op.drop_index(op.f('ix_irma_file_sha1'),
table_name='irma_file')
op.drop_index(op.f('ix_irma_file_md5'),
table_name='irma_file')
op.drop_table('irma_file')
### end Alembic commands ###
|
apache-2.0
|
elennon/ndMVC
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py
|
1363
|
58344
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
generator_filelist_paths = None
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def CalculateGeneratorInputInfo(params):
toplevel = params['options'].toplevel_dir
if params.get('flavor') == 'ninja':
generator_dir = os.path.relpath(params['options'].generator_output or '.')
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
output_dir = os.path.normpath(os.path.join(generator_dir, output_dir))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles-xcode-ninja'))
else:
output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild'))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
upgrade_check_project_version = \
generator_flags.get('xcode_upgrade_check_project_version', None)
# Format upgrade_check_project_version with leading zeros as needed.
if upgrade_check_project_version:
upgrade_check_project_version = str(upgrade_check_project_version)
while len(upgrade_check_project_version) < 4:
upgrade_check_project_version = '0' + upgrade_check_project_version
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
# Set project-level attributes from multiple options
project_attributes = {};
if parallel_builds:
project_attributes['BuildIndependentTargetsInParallel'] = 'YES'
if upgrade_check_project_version:
project_attributes['LastUpgradeCheck'] = upgrade_check_project_version
project_attributes['LastTestingUpgradeCheck'] = \
upgrade_check_project_version
project_attributes['LastSwiftUpdateCheck'] = \
upgrade_check_project_version
pbxp.SetProperty('attributes', project_attributes)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'mac_kernel_extension': 'com.apple.product-type.kernel-extension',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle':
'com.apple.product-type.application.watchapp',
'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
|
mit
|
kasioumis/invenio
|
invenio/modules/formatter/api.py
|
6
|
13012
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Formatter API."""
import zlib
from sqlalchemy.exc import SQLAlchemyError
from invenio.ext.sqlalchemy import db
from invenio.modules.records.models import Record as Bibrec
from invenio.modules.search.models import Tag
from invenio.utils.date import convert_datetime_to_utc_string, strftime
from .models import Format, Formatname, Bibfmt
def get_creation_date(sysno, fmt="%Y-%m-%dT%H:%M:%SZ"):
"""
Returns the creation date of the record 'sysno'.
:param sysno: the record ID for which we want to retrieve creation date
:param fmt: output format for the returned date
:return: creation date of the record
:rtype: string
"""
try:
return convert_datetime_to_utc_string(
Bibrec.query.get(sysno).creation_date, fmt)
except SQLAlchemyError:
return ""
def get_modification_date(sysno, fmt="%Y-%m-%dT%H:%M:%SZ"):
"""
Returns the date of last modification for the record 'sysno'.
:param sysno: the record ID for which we want to retrieve modification date
:param fmt: output format for the returned date
:return: modification date of the record
:rtype: string
"""
try:
return convert_datetime_to_utc_string(
Bibrec.query.get(sysno).modification_date, fmt)
except SQLAlchemyError:
return ""
# XML Marc related functions
def get_tag_from_name(name):
"""
Returns the marc code corresponding the given name
:param name: name for which we want to retrieve the tag
:return: a tag corresponding to X{name} or None if not found
"""
try:
return Tag.query.filter(Tag.name.like(name)).one().value
except SQLAlchemyError:
return None
def get_tags_from_name(name):
"""
Returns the marc codes corresponding the given name,
ordered by value
:param name: name for which we want to retrieve the tags
:return: list of tags corresponding to X{name} or None if not found
"""
try:
return [tag.value for tag in
Tag.query.filter(Tag.name.like(name))
.order_by(Tag.value).all()]
except SQLAlchemyError:
return None
def tag_exists_for_name(name):
"""
Returns True if a tag exists for name in 'tag' table.
:param name: name for which we want to check if a tag exist
:return: True if a tag exist for X{name} or False
"""
return (Tag.query.filter(Tag.name.like(name)).count() > 0)
def get_name_from_tag(tag):
"""
Returns the name corresponding to a marc code
:param tag: tag to consider
:return: a name corresponding to X{tag}
"""
try:
return Tag.query.filter(Tag.value.like(tag)).one().name
except SQLAlchemyError:
return None
def name_exists_for_tag(tag):
"""
Returns True if a name exists for tag in 'tag' table.
:param tag: tag for which we want to check if a name exist
:return: True if a name exist for X{tag} or False
"""
return (Tag.query.filter(Tag.value.like(tag)).count() > 0)
def get_all_name_tag_mappings():
"""
Return the list of mappings name<->tag from 'tag' table.
The returned object is a dict with name as key (if 2 names are the same
we will take the value of one of them, as we cannot make the difference
in format templates)
:return: a dict containing list of mapping in 'tag' table
"""
result = dict()
for tag in Tag.query.all():
result[tag.name] = tag.value
return result
# Output formats related functions
def get_format_by_code(code):
"""
Returns the output format object given by code in the database.
Output formats are located inside 'format' table
:param code: the code of an output format
:return: Format object with given ID. None if not found
"""
f_code = code
if len(code) > 6:
f_code = code[:6]
try:
return Format.query.filter(Format.code == f_code.lower()).one()
except SQLAlchemyError:
return None
def get_format_property(code, property_name, default_value=None):
"""
Returns the value of a property of the output format given by code.
If code or property does not exist, return default_value
:param code: the code of the output format to get the value from
:param property_name: name of property to return
:param default_value: value to be returned if format not found
:return: output format property value
"""
return getattr(get_format_by_code(code), property_name, default_value)
def set_format_property(code, property_name, value):
"""
Sets the property of an output format, given by its code
If 'code' does not exist, create format
:param code: the code of the output format to update
:param property_name: name of property to set
:param value: value to assign
"""
format = get_format_by_code(code)
if format is None:
format = Format()
setattr(format, property_name, value)
if(property == 'name'):
format.set_name(value)
db.session.add(format)
db.session.commit()
def get_output_format_id(code):
"""
Returns the id of output format given by code in the database.
Output formats are located inside 'format' table
:param code: the code of an output format
:return: the id in the database of the output format. None if not found
"""
return get_format_property(code, 'id', None)
def add_output_format(code, name="", description="",
content_type="text/html", visibility=1):
"""
Add output format into format table.
If format with given code already exists, do nothing
:param code: the code of the new format
:param name: a new for the new format
:param description: a description for the new format
:param content_type: the content_type (if applicable)
of the new output format
:param visibility: if the output format is shown to users (1) or not (0)
:return: None
"""
format = get_format_by_code(code)
if format is None:
format = Format()
format.code = code.lower()
format.description = description
format.content_type = content_type
format.visibility = visibility
format.set_name(name)
db.session.add(format)
db.session.commit()
def remove_output_format(code):
"""
Removes the output format with 'code'
If code does not exist in database, do nothing
The function also removes all localized names in formatname table
:param code: the code of the output format to remove
:return: None
"""
format = get_format_by_code(code)
if format is not None:
db.session.query(Formatname)\
.filter(Formatname.id_format == format.id).delete()
db.session.delete(format)
db.session.commit()
def get_output_format_description(code):
"""
Returns the description of the output format given by code
If code or description does not exist, return empty string
:param code: the code of the output format to get the description from
:return: output format description
"""
return get_format_property(code, 'description', '')
def set_output_format_description(code, description):
"""
Sets the description of an output format, given by its code
If 'code' does not exist, create format
:param code: the code of the output format to update
:param description: the new description
:return: None
"""
set_format_property(code, 'description', description)
def get_output_format_visibility(code):
"""
Returns the visibility of the output format, given by its code
If code does not exist, return 0
:param code: the code of an output format
:return: output format visibility (0 if not visible, 1 if visible
"""
visibility = get_format_property(code, 'visibility', 0)
if visibility is not None and int(visibility) in range(0, 2):
return int(visibility)
else:
return 0
def set_output_format_visibility(code, visibility):
"""
Sets the visibility of an output format, given by its code
If 'code' does not exist, create format
:param code: the code of the output format to update
:param visibility: the new visibility (0: not visible, 1:visible)
:return: None
"""
set_format_property(code, 'visibility', visibility)
def get_output_format_content_type(code):
"""
Returns the content_type of the output format given by code
If code or content_type does not exist, return empty string
:param code: the code of the output format to get the description from
:return: output format content_type
"""
return get_format_property(code, 'content_type', '') or ''
def set_output_format_content_type(code, content_type):
"""
Sets the content_type of an output format, given by its code
If 'code' does not exist, create format
:param code: the code of the output format to update
:param content_type: the content type for the format
:return: None
"""
set_format_property(code, 'content_type', content_type)
def set_output_format_name(code, name, lang="generic", type='ln'):
"""
Sets the name of an output format given by code.
if 'type' different from 'ln' or 'sn', do nothing
if 'name' exceeds 256 chars, 'name' is truncated to first 256 chars.
if 'code' does not correspond to exisiting output format,
create format if "generic" is given as lang
The localized names of output formats are located in formatname table.
:param code: the code of an ouput format
:param type: either 'ln' (for long name) and 'sn' (for short name)
:param lang: the language in which the name is given
:param name: the name to give to the output format
:return: None
"""
if type.lower() != "sn" and type.lower() != "ln":
return
format = get_format_by_code(code)
if format is None and lang == "generic" and type.lower() == "ln":
# Create output format inside table if it did not exist
# Happens when the output format was added not through web interface
format = Format()
if format is not None:
format.set_name(name, lang, type)
def change_output_format_code(old_code, new_code):
"""
Change the code of an output format
:param old_code: the code of the output format to change
:param new_code: the new code
:return: None
"""
set_format_property(old_code, 'code', new_code.lower())
def get_preformatted_record(recID, of, decompress=zlib.decompress):
"""
Returns the preformatted record with id 'recID' and format 'of'
If corresponding record does not exist for given output format,
returns None
:param recID: the id of the record to fetch
:param of: the output format code
:param decompress: the method used to decompress
the preformatted record in database
:return: formatted record as String, or None if not exist
"""
try:
value = Bibfmt.query\
.filter(Bibfmt.id_bibrec == recID)\
.filter(Bibfmt.format == of)\
.one().value
return str(decompress(value))
except SQLAlchemyError:
return None
# Decide whether to use DB slave:
# if of in ('xm', 'recstruct'):
# run_on_slave = False # for master formats, use DB master
# else:
# run_on_slave = True # for other formats, we can use DB slave
def get_preformatted_record_date(recID, of):
"""
Returns the date of the last update of the cache for the considered
preformatted record in bibfmt
If corresponding record does not exist for given output format,
returns None
:param recID: the id of the record to fetch
:param of: the output format code
:return: the date of the last update of the cache, or None if not exist
"""
try:
last_updated = Bibfmt.query\
.filter(Bibfmt.id_bibrec == recID)\
.filter(Bibfmt.format == of)\
.one().last_updated
return strftime("%Y-%m-%d %H:%M:%S", last_updated)
except SQLAlchemyError:
return None
|
gpl-2.0
|
MichaelNedzelsky/intellij-community
|
python/lib/Lib/encodings/mac_centeuro.py
|
593
|
14358
|
""" Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
zonk1024/moto
|
tests/test_swf/models/test_domain.py
|
3
|
4032
|
from collections import namedtuple
from sure import expect
from moto.swf.exceptions import SWFUnknownResourceFault
from moto.swf.models import Domain
# Fake WorkflowExecution for tests purposes
WorkflowExecution = namedtuple(
"WorkflowExecution",
["workflow_id", "run_id", "execution_status", "open"]
)
def test_domain_short_dict_representation():
domain = Domain("foo", "52")
domain.to_short_dict().should.equal({"name":"foo", "status":"REGISTERED"})
domain.description = "foo bar"
domain.to_short_dict()["description"].should.equal("foo bar")
def test_domain_full_dict_representation():
domain = Domain("foo", "52")
domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict())
_config = domain.to_full_dict()["configuration"]
_config["workflowExecutionRetentionPeriodInDays"].should.equal("52")
def test_domain_string_representation():
domain = Domain("my-domain", "60")
str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)")
def test_domain_add_to_activity_task_list():
domain = Domain("my-domain", "60")
domain.add_to_activity_task_list("foo", "bar")
domain.activity_task_lists.should.equal({
"foo": ["bar"]
})
def test_domain_activity_tasks():
domain = Domain("my-domain", "60")
domain.add_to_activity_task_list("foo", "bar")
domain.add_to_activity_task_list("other", "baz")
sorted(domain.activity_tasks).should.equal(["bar", "baz"])
def test_domain_add_to_decision_task_list():
domain = Domain("my-domain", "60")
domain.add_to_decision_task_list("foo", "bar")
domain.decision_task_lists.should.equal({
"foo": ["bar"]
})
def test_domain_decision_tasks():
domain = Domain("my-domain", "60")
domain.add_to_decision_task_list("foo", "bar")
domain.add_to_decision_task_list("other", "baz")
sorted(domain.decision_tasks).should.equal(["bar", "baz"])
def test_domain_get_workflow_execution():
domain = Domain("my-domain", "60")
wfe1 = WorkflowExecution(workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True)
wfe2 = WorkflowExecution(workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False)
wfe3 = WorkflowExecution(workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True)
wfe4 = WorkflowExecution(workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False)
domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4]
# get workflow execution through workflow_id and run_id
domain.get_workflow_execution("wf-id-1", run_id="run-id-1").should.equal(wfe1)
domain.get_workflow_execution("wf-id-1", run_id="run-id-2").should.equal(wfe2)
domain.get_workflow_execution("wf-id-3", run_id="run-id-4").should.equal(wfe4)
domain.get_workflow_execution.when.called_with(
"wf-id-1", run_id="non-existent"
).should.throw(
SWFUnknownResourceFault,
"Unknown execution: WorkflowExecution=[workflowId=wf-id-1, runId=non-existent]"
)
# get OPEN workflow execution by default if no run_id
domain.get_workflow_execution("wf-id-1").should.equal(wfe1)
domain.get_workflow_execution.when.called_with(
"wf-id-3"
).should.throw(
SWFUnknownResourceFault, "Unknown execution, workflowId = wf-id-3"
)
domain.get_workflow_execution.when.called_with(
"wf-id-non-existent"
).should.throw(
SWFUnknownResourceFault, "Unknown execution, workflowId = wf-id-non-existent"
)
# raise_if_closed attribute
domain.get_workflow_execution("wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1)
domain.get_workflow_execution.when.called_with(
"wf-id-3", run_id="run-id-4", raise_if_closed=True
).should.throw(
SWFUnknownResourceFault,
"Unknown execution: WorkflowExecution=[workflowId=wf-id-3, runId=run-id-4]"
)
# raise_if_none attribute
domain.get_workflow_execution("foo", raise_if_none=False).should.be.none
|
apache-2.0
|
NewpTone/stacklab-nova
|
nova/tests/compute/test_compute.py
|
3
|
234769
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute service"""
import base64
import copy
import datetime
import functools
import sys
import time
import mox
import nova
from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova.openstack.common import policy as common_policy
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
from nova import utils
import nova.volume
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
FAKE_IMAGE_REF = 'fake-image-ref'
def nop_report_driver_status(self):
pass
class FakeSchedulerAPI(object):
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
pass
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
self.compute = importutils.import_object(FLAGS.compute_manager)
# override tracker with a version that doesn't need the database:
self.compute.resource_tracker = \
fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute.update_available_resource(
context.get_admin_context())
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
fake_rpcapi = FakeSchedulerAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
fake_network.set_stub_network_methods(self.stubs)
def tearDown(self):
fake_image.FakeImageService_reset()
instances = db.instance_get_all(self.context.elevated())
notifier_api._reset_drivers()
for instance in instances:
db.instance_destroy(self.context.elevated(), instance['uuid'])
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
"""Create a test instance"""
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst.update(params)
_create_service_entries(self.context.elevated(),
{'fake_zone': [inst['host']]})
return db.instance_create(self.context, inst)
def _create_instance(self, params=None, type_name='m1.tiny'):
"""Create a test instance. Returns uuid"""
return self._create_fake_instance(params, type_name=type_name)
def _create_instance_type(self, params=None):
"""Create a test instance type"""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = 1024
inst['vcpus'] = 1
inst['root_gb'] = 20
inst['ephemeral_gb'] = 10
inst['flavorid'] = '1'
inst['swap'] = 2048
inst['rxtx_factor'] = 1
inst.update(params)
return db.instance_type_create(context, inst)['id']
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
'user_id': self.user_id,
'project_id': self.project_id}
return db.security_group_create(self.context, values)
class ComputeTestCase(BaseTestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True)
super(ComputeTestCase, self).setUp()
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
fake_get_nw_info)
self.stubs.Set(nova.network.API, 'allocate_for_instance',
fake_get_nw_info)
self.compute_api = compute.API()
def tearDown(self):
super(ComputeTestCase, self).tearDown()
timeutils.clear_time_override()
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@nova.compute.manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_no_instance(self):
inst_uuid = "fake_uuid"
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@nova.compute.manager.wrap_instance_fault
def failer(self2, context, instance_uuid):
raise exception.InstanceNotFound()
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst_uuid)
self.assertFalse(called['fault_added'])
def test_create_instance_with_img_ref_associates_config_drive(self):
"""Make sure create associates a config drive."""
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
try:
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertTrue(instance.config_drive)
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
"""Make sure create associates a config drive."""
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
try:
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertTrue(instance.config_drive)
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
"""Default of memory limit=None is unlimited"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(999999999999,
self.compute.resource_tracker.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEquals(1024,
self.compute.resource_tracker.compute_node['memory_mb_used'])
self.assertEquals(256,
self.compute.resource_tracker.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEquals(3072,
self.compute.resource_tracker.compute_node['memory_mb_used'])
self.assertEquals(768,
self.compute.resource_tracker.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance(params)
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_ram(self):
"""Test passing of oversubscribed ram policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.45)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance(params)
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(instance_mb,
self.compute.resource_tracker.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.55)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance(params)
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_cpu(self):
"""Test passing of oversubscribed cpu policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(2,
self.compute.resource_tracker.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 1}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(3,
self.compute.resource_tracker.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.compute.resource_tracker.update_usage(self.context,
instance=instance)
self.assertEqual(2,
self.compute.resource_tracker.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance(params)
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_disk(self):
"""Test passing of oversubscribed disk policy from the scheduler."""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.45)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance=instance,
filter_properties=filter_properties)
self.assertEqual(instance_gb,
self.compute.resource_tracker.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.compute.resource_tracker.update_available_resource(self.context)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource()
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.55)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
instance = jsonutils.to_primitive(self._create_fake_instance())
try:
self.compute.run_instance(self.context, instance=instance,
is_first_time=True)
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertEqual(instance.access_ip_v4, '192.168.1.100')
self.assertEqual(instance.access_ip_v6, '2001:db8:0:1::1')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_no_default_access_ip(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
try:
self.compute.run_instance(self.context, instance=instance,
is_first_time=True)
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertFalse(instance.access_ip_v4)
self.assertFalse(instance.access_ip_v6)
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
"""check the persistence of the ERROR(scheduling) state"""
self._create_instance(params={'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
#check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
""" block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, keeping the task state
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(nova.compute.manager.ComputeManager,
'_setup_block_device_mapping', fake)
instance = self._create_instance()
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance)
#check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_spawn_fail(self):
""" spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, keeping the task state"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'spawn', fake)
instance = self._create_instance()
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance)
#check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_can_terminate_on_error_state(self):
"""Make sure that the instance can be terminated in ERROR state"""
elevated = context.get_admin_context()
#check failed to schedule --> terminate
instance = self._create_instance(params={'vm_state': vm_states.ERROR})
self.compute.terminate_instance(self.context, instance=instance)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
elevated, instance['uuid'])
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_with_vol_attached(self):
"""Make sure it is possible to run and terminate instance with volume
attached
"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
def fake_check_attach(*args, **kwargs):
pass
def fake_reserve_volume(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
self.stubs.Set(nova.volume.api.API, 'reserve_volume',
fake_reserve_volume)
self.compute_api.attach_volume(self.context, instance, 1,
'/dev/vdc')
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
bdms = db.block_device_mapping_get_all_by_instance(self.context,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
# Make it look like this is no instance
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute._get_instance_nw_info(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exception.NetworkNotFound())
self.mox.ReplayAll()
self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_terminate_failure_leaves_task_state(self):
"""Ensure that a failure in terminate_instance does not result
in the task state being reverted from DELETING (see LP 1046236).
"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
# Network teardown fails ungracefully
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute._get_instance_nw_info(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(TypeError())
self.mox.ReplayAll()
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.DELETING})
try:
self.compute.terminate_instance(self.context, instance=instance)
except TypeError:
pass
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['task_state'], 'deleting')
def test_run_terminate_timestamps(self):
"""Make sure timestamps are set for launched and destroyed"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
launch = timeutils.utcnow()
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assert_(instance['launched_at'] > launch)
self.assertEqual(instance['deleted_at'], None)
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context, instance=instance)
context = self.context.elevated(read_deleted="only")
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assert_(instance['launched_at'] < terminate)
self.assert_(instance['deleted_at'] > terminate)
def test_stop(self):
"""Ensure instance can be stopped"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.STOPPING})
self.compute.stop_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_start(self):
"""Ensure instance can be started"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.STOPPING})
self.compute.stop_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.STARTING})
self.compute.start_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_rescue(self):
"""Ensure instance can be rescued and unrescued"""
called = {'rescued': False,
'unrescued': False}
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
called['rescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESCUING})
self.compute.rescue_instance(self.context, instance=instance)
self.assertTrue(called['rescued'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.UNRESCUING})
self.compute.unrescue_instance(self.context, instance=instance)
self.assertTrue(called['unrescued'])
self.compute.terminate_instance(self.context, instance=instance)
def test_power_on(self):
"""Ensure instance can be powered on"""
called = {'power_on': False}
def fake_driver_power_on(self, instance):
called['power_on'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
fake_driver_power_on)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_ON})
self.compute.power_on_instance(self.context, instance=instance)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, instance=instance)
def test_power_off(self):
"""Ensure instance can be powered off"""
called = {'power_off': False}
def fake_driver_power_off(self, instance):
called['power_off'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
fake_driver_power_off)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
self.compute.power_off_instance(self.context, instance=instance)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, instance=instance)
def test_pause(self):
"""Ensure instance can be paused and unpaused"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.PAUSING})
self.compute.pause_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.UNPAUSING})
self.compute.unpause_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend(self):
"""ensure instance can be suspended and resumed"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.SUSPENDING})
self.compute.suspend_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESUMING})
self.compute.resume_instance(self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend_error(self):
"""Ensure vm_state is ERROR when suspend error occurs"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'suspend', fake)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(test.TestingException,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild(self):
"""Ensure instance can be rebuilt"""
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata)
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_launch_time(self):
"""Ensure instance can be rebuilt"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
image_ref = instance['image_ref']
self.compute.run_instance(self.context, instance=instance)
timeutils.set_time_override(cur_time)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, instance,
image_ref, image_ref,
injected_files=[],
new_pass="new_password")
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEquals(cur_time, instance['launched_at'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_reboot_soft(self):
"""Ensure instance can be soft rebooted"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.REBOOTING})
reboot_type = "SOFT"
self.compute.reboot_instance(self.context,
instance=instance,
reboot_type=reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
self.assertEqual(inst_ref['task_state'], None)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_reboot_hard(self):
"""Ensure instance can be hard rebooted"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.REBOOTING_HARD})
reboot_type = "HARD"
self.compute.reboot_instance(self.context, instance=instance,
reboot_type=reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
self.assertEqual(inst_ref['task_state'], None)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password(self):
"""Ensure instance can have its admin password set"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.UPDATING_PASSWORD})
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
self.compute.set_admin_password(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], None)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_bad_state(self):
"""Test setting password while instance is rebuilding."""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {
"power_state": power_state.NOSTATE,
})
instance = jsonutils.to_primitive(db.instance_get_by_uuid(
self.context, instance['uuid']))
self.assertEqual(instance['power_state'], power_state.NOSTATE)
def fake_driver_get_info(self2, _instance):
return {'state': power_state.NOSTATE,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
self.stubs.Set(nova.virt.fake.FakeDriver, 'get_info',
fake_driver_get_info)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.UPDATING_PASSWORD})
self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context,
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
expected_task_state):
"""Ensure expected exception is raised if set_admin_password fails"""
def fake_sleep(_time):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
def fake_driver_set_pass(self2, _instance, _pwd):
raise exc
self.stubs.Set(nova.virt.fake.FakeDriver, 'set_admin_password',
fake_driver_set_pass)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.UPDATING_PASSWORD})
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
#error raised from the driver should not reveal internal information
#so a new error is raised
self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context,
instance=jsonutils.to_primitive(inst_ref))
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], expected_vm_state)
self.assertEqual(inst_ref['task_state'], expected_task_state)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_driver_not_authorized(self):
"""
Ensure expected exception is raised if set_admin_password not
authorized.
"""
exc = exception.NotAuthorized(_('Internal error'))
self._do_test_set_admin_password_driver_error(exc,
vm_states.ERROR,
None)
def test_set_admin_password_driver_not_implemented(self):
"""
Ensure expected exception is raised if set_admin_password not
implemented by driver.
"""
exc = NotImplementedError()
self._do_test_set_admin_password_driver_error(exc,
vm_states.ACTIVE,
None)
def test_inject_file(self):
"""Ensure we can write a file to an instance"""
called = {'inject': False}
def fake_driver_inject_file(self2, instance, path, contents):
self.assertEqual(path, "/tmp/test")
self.assertEqual(contents, "File Contents")
called['inject'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_file',
fake_driver_inject_file)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.compute.inject_file(self.context, "/tmp/test",
"File Contents", instance=instance)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context, instance=instance)
def test_inject_network_info(self):
"""Ensure we can inject network info"""
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
called['inject'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
fake_driver_inject_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.compute.inject_network_info(self.context, instance=instance)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context, instance=instance)
def test_reset_network(self):
"""Ensure we can reset networking on an instance"""
called = {'count': 0}
def fake_driver_reset_network(self, instance):
called['count'] += 1
self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
fake_driver_reset_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.compute.reset_network(self.context, instance=instance)
self.assertEqual(called['count'], 1)
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.IMAGE_SNAPSHOT})
self.compute.snapshot_instance(self.context, name, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot_fails(self):
"""Ensure task_state is set to None if snapshot fails"""
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.IMAGE_SNAPSHOT})
self.assertRaises(test.TestingException,
self.compute.snapshot_instance,
self.context, "failing_snapshot", instance=instance)
self._assert_state({'task_state': None})
self.compute.terminate_instance(self.context, instance=instance)
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter"""
instances = db.instance_get_all(context.get_admin_context())
self.assertEqual(len(instances), 1)
if 'vm_state' in state_dict:
self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
if 'task_state' in state_dict:
self.assertEqual(state_dict['task_state'],
instances[0]['task_state'])
if 'power_state' in state_dict:
self.assertEqual(state_dict['power_state'],
instances[0]['power_state'])
def test_console_output(self):
"""Make sure we can get console output from instance"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
output = self.compute.get_console_output(self.context,
instance=instance)
self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance=instance)
def test_console_output_tail(self):
"""Make sure we can get console output from instance"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=2)
self.assertEqual(output, 'ANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance=instance)
def test_novnc_vnc_console(self):
"""Make sure we can a vnc console for an instance."""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
# Try with the full instance
console = self.compute.get_vnc_console(self.context, 'novnc',
instance=instance)
self.assert_(console)
self.compute.terminate_instance(self.context, instance=instance)
def test_xvpvnc_vnc_console(self):
"""Make sure we can a vnc console for an instance."""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
console = self.compute.get_vnc_console(self.context, 'xvpvnc',
instance=instance)
self.assert_(console)
self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_vnc_console_type(self):
"""Raise useful error if console type is an unrecognised string"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
"""Raise useful error is console type is None"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_diagnostics(self):
"""Make sure we can get diagnostics for an instance."""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
diagnostics = self.compute.get_diagnostics(self.context,
instance=instance)
self.assertEqual(diagnostics, 'FAKE_DIAGNOSTICS')
diagnostics = self.compute.get_diagnostics(self.context,
instance=instance)
self.assertEqual(diagnostics, 'FAKE_DIAGNOSTICS')
self.compute.terminate_instance(self.context, instance=instance)
def test_add_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(nova.network.API, 'add_fixed_ip_to_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance=instance)
def test_remove_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(nova.network.API, 'remove_fixed_ip_from_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.compute.remove_fixed_ip_from_instance(self.context, 1,
instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_usage_notification(self):
"""Ensure run instance generates appropriate usage notification"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['event_type'], 'compute.instance.create.start')
self.assertEquals(msg['payload']['image_name'], 'fake_name')
# The last event is the one with the sugar in it.
msg = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create.end')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['image_name'], 'fake_name')
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], inst_ref.uuid)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertEquals(payload['state'], 'active')
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue(payload['launched_at'])
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_terminate_usage_notification(self):
"""Ensure terminate_instance generates correct usage notification"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
test_notifier.NOTIFICATIONS = []
timeutils.set_time_override(cur_time)
self.compute.terminate_instance(self.context, instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 4)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.delete.start')
msg1 = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg1['event_type'],
'compute.instance.shutdown.start')
msg1 = test_notifier.NOTIFICATIONS[2]
self.assertEquals(msg1['event_type'], 'compute.instance.shutdown.end')
msg1 = test_notifier.NOTIFICATIONS[3]
self.assertEquals(msg1['event_type'], 'compute.instance.delete.end')
payload = msg1['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue('deleted_at' in payload)
self.assertEqual(payload['deleted_at'], str(cur_time))
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.Invalid,
self.compute.run_instance,
self.context,
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_instance_set_to_error_on_uncaught_exception(self):
"""Test that instance is set to error state when exception is raised"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
self.mox.ReplayAll()
self.assertRaises(rpc_common.RemoteError,
self.compute.run_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(context.get_admin_context(),
instance['uuid'])
self.assertEqual(vm_states.ERROR, instance['vm_state'])
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_delete_instance_succedes_on_volume_fail(self):
instance = self._create_fake_instance()
def fake_cleanup_volumes(context, instance):
raise test.TestingException()
self.stubs.Set(self.compute, '_cleanup_volumes',
fake_cleanup_volumes)
self.compute._delete_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
which is propagated up from the underlying driver.
"""
instance = self._create_fake_instance()
def fake_delete_instance(context, instance):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
fake_delete_instance)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
"""When a spawn fails the network must be deallocated"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, "_setup_block_device_mapping")
self.compute._setup_block_device_mapping(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(rpc.common.RemoteError('', '', ''))
self.mox.ReplayAll()
self.assertRaises(rpc.common.RemoteError,
self.compute.run_instance,
self.context, instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
def test_lock(self):
"""ensure locked instance cannot be changed"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
non_admin_context = context.RequestContext(None,
None,
is_admin=False)
def check_task_state(task_state):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_state)
# should fail with locked nonadmin context
self.compute_api.lock(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertRaises(exception.InstanceIsLocked,
self.compute_api.reboot,
non_admin_context, instance, 'SOFT')
check_task_state(None)
# should fail with invalid task state
self.compute_api.unlock(self.context, instance)
instance = db.instance_update(self.context, instance_uuid,
{'task_state': task_states.REBOOTING})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
non_admin_context, instance, 'SOFT')
check_task_state(task_states.REBOOTING)
# should succeed with admin context
instance = db.instance_update(self.context, instance_uuid,
{'task_state': None})
self.compute_api.reboot(self.context, instance, 'SOFT')
check_task_state(task_states.REBOOTING)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def _test_state_revert(self, operation, pre_task_state,
post_task_state):
instance = self._create_fake_instance()
self.compute.run_instance(self.context, instance=instance)
# The API would have set task_state, so do that here to test
# that the state gets reverted on failure
db.instance_update(self.context, instance['uuid'],
{"task_state": pre_task_state})
orig_elevated = self.context.elevated
orig_notify = self.compute._notify_about_instance_usage
def _get_an_exception(*args, **kwargs):
raise Exception("This fails every single time!")
self.stubs.Set(self.context, 'elevated', _get_an_exception)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', _get_an_exception)
raised = False
try:
ret_val = getattr(self.compute, operation)(self.context,
instance=instance)
except Exception:
raised = True
finally:
# self.context.elevated() is called in tearDown()
self.stubs.Set(self.context, 'elevated', orig_elevated)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', orig_notify)
self.assertTrue(raised)
# Fetch the instance's task_state and make sure it went to expected
# post-state
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance["task_state"], post_task_state)
def test_state_revert(self):
"""ensure that task_state is reverted after a failed operation"""
actions = [
("reboot_instance", task_states.REBOOTING, None),
("stop_instance", task_states.STOPPING, None),
("start_instance", task_states.STARTING, None),
("terminate_instance", task_states.DELETING,
task_states.DELETING),
("power_off_instance", task_states.POWERING_OFF, None),
("power_on_instance", task_states.POWERING_ON, None),
("rebuild_instance", task_states.REBUILDING, None),
("set_admin_password", task_states.UPDATING_PASSWORD, None),
("rescue_instance", task_states.RESCUING, None),
("unrescue_instance", task_states.UNRESCUING, None),
("revert_resize", task_states.RESIZE_REVERTING, None),
("prep_resize", task_states.RESIZE_PREP, None),
("resize_instance", task_states.RESIZE_PREP, None),
("pause_instance", task_states.PAUSING, None),
("unpause_instance", task_states.UNPAUSING, None),
("suspend_instance", task_states.SUSPENDING, None),
("resume_instance", task_states.RESUMING, None),
]
for operation, pre_state, post_state in actions:
self._test_state_revert(operation, pre_state, post_state)
def _ensure_quota_reservations_committed(self):
"""Mock up commit of quota reservations"""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations)
self.mox.ReplayAll()
return reservations
def _ensure_quota_reservations_rolledback(self):
"""Mock up rollback of quota reservations"""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations)
self.mox.ReplayAll()
return reservations
def test_finish_resize(self):
"""Contrived test to ensure finish_resize doesn't raise anything"""
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
reservations = self._ensure_quota_reservations_committed()
context = self.context.elevated()
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_PREP})
self.compute.prep_resize(context, instance=instance,
instance_type=instance_type,
image={})
migration_ref = db.migration_get_by_instance_and_status(context,
instance['uuid'], 'pre-migrating')
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
self.compute.finish_resize(context,
migration_id=int(migration_ref['id']),
disk_info={}, image={}, instance=instance,
reservations=reservations)
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
"""Make sure we don't leave the instance in RESIZE on error"""
def throw_up(*args, **kwargs):
raise test.TestingException()
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
reservations = self._ensure_quota_reservations_rolledback()
context = self.context.elevated()
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
self.compute.prep_resize(context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(context,
instance['uuid'], 'pre-migrating')
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
self.assertRaises(test.TestingException, self.compute.finish_resize,
context, migration_id=int(migration_ref['id']),
disk_info={}, image={}, instance=instance,
reservations=reservations)
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_rebuild_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
inst_ref = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=inst_ref)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
orig_sys_metadata = db.instance_system_metadata_get(self.context,
inst_ref['uuid'])
image_ref = instance["image_ref"]
new_image_ref = image_ref + '-new_image_ref'
db.instance_update(self.context, inst_ref['uuid'],
{'image_ref': new_image_ref})
password = "new_password"
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context.elevated(),
jsonutils.to_primitive(instance),
image_ref, new_image_ref,
injected_files=[],
new_pass=password,
orig_sys_metadata=orig_sys_metadata)
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
image_ref_url = utils.generate_image_url(image_ref)
new_image_ref_url = utils.generate_image_url(new_image_ref)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['event_type'],
'compute.instance.exists')
self.assertEquals(msg['payload']['image_ref_url'], image_ref_url)
msg = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg['event_type'],
'compute.instance.rebuild.start')
self.assertEquals(msg['payload']['image_ref_url'], new_image_ref_url)
self.assertEquals(msg['payload']['image_name'], 'fake_name')
msg = test_notifier.NOTIFICATIONS[2]
self.assertEquals(msg['event_type'],
'compute.instance.rebuild.end')
self.assertEquals(msg['priority'], 'INFO')
payload = msg['payload']
self.assertEquals(payload['image_name'], 'fake_name')
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], inst_ref['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEqual(payload['launched_at'], str(cur_time))
self.assertEquals(payload['image_ref_url'], new_image_ref_url)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_finish_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
context = self.context.elevated()
old_type_id = instance_types.get_instance_type_by_name(
'm1.tiny')['id']
new_type = instance_types.get_instance_type_by_name('m1.small')
new_type = jsonutils.to_primitive(new_type)
new_type_id = new_type['id']
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_PREP})
self.compute.prep_resize(context, instance=instance,
instance_type=new_type, image={})
migration_ref = db.migration_get_by_instance_and_status(context,
instance['uuid'],
'pre-migrating')
self.compute.resize_instance(context, instance=instance,
migration_id=migration_ref['id'],
image={})
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
self.compute.finish_resize(context,
migration_id=int(migration_ref['id']), disk_info={}, image={},
instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['event_type'],
'compute.instance.finish_resize.start')
msg = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg['event_type'],
'compute.instance.finish_resize.end')
self.assertEquals(msg['priority'], 'INFO')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.small')
self.assertEquals(str(payload['instance_type_id']), str(new_type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
self.assertEqual(payload['launched_at'], str(cur_time))
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(instance))
def test_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
context = self.context.elevated()
self.compute.run_instance(self.context, instance=instance)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
instance_type = instance_types.get_default_instance_type()
self.compute.prep_resize(context, instance=instance,
instance_type=instance_type, image={})
db.migration_get_by_instance_and_status(context,
instance['uuid'],
'pre-migrating')
self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['event_type'],
'compute.instance.exists')
msg = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg['event_type'],
'compute.instance.resize.prep.start')
msg = test_notifier.NOTIFICATIONS[2]
self.assertEquals(msg['event_type'],
'compute.instance.resize.prep.end')
self.assertEquals(msg['priority'], 'INFO')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
self.assertTrue('display_name' in payload)
self.assertTrue('created_at' in payload)
self.assertTrue('launched_at' in payload)
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(context, instance=instance)
def test_prep_resize_instance_migration_error(self):
"""Ensure prep_resize raise a migration error"""
self.flags(host="foo", allow_resize_to_same_host=False)
instance = jsonutils.to_primitive(self._create_fake_instance())
context = self.context.elevated()
reservations = self._ensure_quota_reservations_rolledback()
self.compute.run_instance(self.context, instance=instance)
new_instance = db.instance_update(self.context, instance['uuid'],
{'host': self.compute.host})
new_instance = jsonutils.to_primitive(new_instance)
instance_type = instance_types.get_default_instance_type()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
context, instance=new_instance,
instance_type=instance_type, image={},
reservations=reservations)
self.compute.terminate_instance(context, instance=new_instance)
def test_resize_instance_driver_error(self):
"""Ensure instance status set to Error on resize error"""
def throw_up(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
context = self.context.elevated()
reservations = self._ensure_quota_reservations_rolledback()
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {'host': 'foo'})
self.compute.prep_resize(context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(context,
instance['uuid'], 'pre-migrating')
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
#verify
self.assertRaises(test.TestingException, self.compute.resize_instance,
context, instance=instance,
migration_id=migration_ref['id'], image={},
reservations=reservations)
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(instance))
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
context = self.context.elevated()
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'host': 'foo'})
self.compute.prep_resize(context, instance=instance,
instance_type=instance_type, image={})
migration_ref = db.migration_get_by_instance_and_status(context,
instance['uuid'], 'pre-migrating')
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(context, instance=instance,
migration_id=migration_ref['id'],
image={})
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(instance))
def test_finish_revert_resize(self):
"""Ensure that the flavor is reverted to the original on revert"""
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration', fake)
reservations = self._ensure_quota_reservations_committed()
context = self.context.elevated()
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
# Confirm the instance size before the resize starts
inst_ref = db.instance_get_by_uuid(context, instance['uuid'])
instance_type_ref = db.instance_type_get(context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
new_inst_ref = db.instance_update(self.context, instance['uuid'],
{'host': 'foo'})
new_instance_type_ref = db.instance_type_get_by_flavor_id(context, 3)
self.compute.prep_resize(context,
instance=jsonutils.to_primitive(new_inst_ref),
instance_type=jsonutils.to_primitive(new_instance_type_ref),
image={}, reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(context,
inst_ref['uuid'], 'pre-migrating')
instance = jsonutils.to_primitive(inst_ref)
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(context, instance=instance,
migration_id=migration_ref['id'],
image={})
self.compute.finish_resize(context,
migration_id=int(migration_ref['id']), disk_info={},
image={}, instance=instance)
# Prove that the instance size is now the new size
inst_ref = db.instance_get_by_uuid(context, instance['uuid'])
instance_type_ref = db.instance_type_get(context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '3')
# Finally, revert and confirm the old flavor has been applied
rpcinst = jsonutils.to_primitive(inst_ref)
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_REVERTING})
self.compute.revert_resize(context,
migration_id=migration_ref['id'], instance=rpcinst,
reservations=reservations)
self.compute.finish_revert_resize(context,
migration_id=migration_ref['id'], instance=rpcinst,
reservations=reservations)
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], None)
inst_ref = db.instance_get_by_uuid(context, instance['uuid'])
instance_type_ref = db.instance_type_get(context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
self.assertEqual(inst_ref['host'], migration_ref['source_compute'])
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(inst_ref))
def test_get_by_flavor_id(self):
type = instance_types.get_instance_type_by_flavor_id(1)
self.assertEqual(type['name'], 'm1.tiny')
def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are
the same host"""
reservations = self._ensure_quota_reservations_rolledback()
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance_type = instance_types.get_default_instance_type()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_resize_instance_handles_migration_error(self):
"""Ensure vm_state is ERROR when error occurs"""
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
'migrate_disk_and_power_off',
raise_migration_failure)
reservations = self._ensure_quota_reservations_rolledback()
inst_ref = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
context = self.context.elevated()
self.compute.run_instance(self.context, instance=inst_ref)
db.instance_update(self.context, inst_ref['uuid'], {'host': 'foo'})
self.compute.prep_resize(context, instance=inst_ref,
instance_type=instance_type,
image={}, reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(context,
inst_ref['uuid'], 'pre-migrating')
db.instance_update(self.context, inst_ref['uuid'],
{"task_state": task_states.RESIZE_PREP})
self.assertRaises(test.TestingException, self.compute.resize_instance,
context, instance=inst_ref,
migration_id=migration_ref['id'], image={},
reservations=reservations)
inst_ref = db.instance_get_by_uuid(context, inst_ref['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(inst_ref))
def test_check_can_live_migrate_source_works_correctly(self):
"""Confirm check_can_live_migrate_source works on positive path"""
context = self.context.elevated()
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
inst_id = inst_ref["id"]
dest = "fake_host_1"
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_source')
dest_check_data = {"test": "data"}
self.compute.driver.check_can_live_migrate_source(context,
inst_ref,
dest_check_data)
self.mox.ReplayAll()
self.compute.check_can_live_migrate_source(context,
dest_check_data=dest_check_data, instance=inst_ref)
def test_check_can_live_migrate_destination_works_correctly(self):
"""Confirm check_can_live_migrate_destination works on positive path"""
context = self.context.elevated()
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
inst_id = inst_ref["id"]
dest = "fake_host_1"
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
dest_check_data = {"test": "data"}
self.compute.driver.check_can_live_migrate_destination(context,
inst_ref, True, False).AndReturn(dest_check_data)
self.compute.compute_rpcapi.check_can_live_migrate_source(context,
inst_ref, dest_check_data)
self.compute.driver.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
self.mox.ReplayAll()
self.compute.check_can_live_migrate_destination(context,
block_migration=True, disk_over_commit=False,
instance=inst_ref)
def test_check_can_live_migrate_destination_fails_dest_check(self):
"""Confirm check_can_live_migrate_destination works on positive path"""
context = self.context.elevated()
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
inst_id = inst_ref["id"]
dest = "fake_host_1"
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.compute.driver.check_can_live_migrate_destination(context,
inst_ref, True, False).AndRaise(exception.Invalid())
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.compute.check_can_live_migrate_destination,
context, block_migration=True,
disk_over_commit=False, instance=inst_ref)
def test_check_can_live_migrate_destination_fails_source(self):
"""Confirm check_can_live_migrate_destination works on positive path"""
context = self.context.elevated()
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
inst_id = inst_ref["id"]
dest = "fake_host_1"
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
dest_check_data = {"test": "data"}
self.compute.driver.check_can_live_migrate_destination(context,
inst_ref, True, False).AndReturn(dest_check_data)
self.compute.compute_rpcapi.check_can_live_migrate_source(context,
inst_ref, dest_check_data).AndRaise(exception.Invalid())
self.compute.driver.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.compute.check_can_live_migrate_destination,
context, block_migration=True,
disk_over_commit=False, instance=inst_ref)
def test_pre_live_migration_instance_has_no_fixed_ip(self):
"""Confirm raising exception if instance doesn't have fixed_ip."""
# creating instance testdata
context = self.context.elevated()
instance = jsonutils.to_primitive(self._create_fake_instance())
inst_id = instance["id"]
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForInstance,
self.compute.pre_live_migration, context,
instance=instance)
def test_pre_live_migration_works_correctly(self):
"""Confirm setup_compute_volume is called when volume is mounted."""
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self.stubs.Set(nova.compute.manager.ComputeManager,
'_get_instance_nw_info', stupid)
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'dummy'}))
inst_id = instance['id']
c = context.get_admin_context()
nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute.driver,
'ensure_filtering_rules_for_instance')
self.compute.driver.ensure_filtering_rules_for_instance(
mox.IsA(instance), nw_info)
# start test
self.mox.ReplayAll()
ret = self.compute.pre_live_migration(c, instance=instance)
self.assertEqual(ret, None)
# cleanup
db.instance_destroy(c, instance['uuid'])
def test_live_migration_dest_raises_exception(self):
"""Confirm exception when pre_live_migration fails."""
# creating instance testdata
instance_ref = self._create_fake_instance({'host': 'dummy'})
instance = jsonutils.to_primitive(instance_ref)
inst_uuid = instance['uuid']
inst_id = instance['id']
c = context.get_admin_context()
topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
# creating volume testdata
volume_id = db.volume_create(c, {'size': 1})['id']
values = {'instance_uuid': inst_uuid, 'device_name': '/dev/vdc',
'delete_on_termination': False, 'volume_id': volume_id}
db.block_device_mapping_create(c, values)
# creating mocks
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
self.compute.driver.get_instance_disk_info(instance['name'])
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.compute.compute_rpcapi.pre_live_migration(c,
mox.IsA(instance), True, None, instance['host']).AndRaise(
rpc.common.RemoteError('', '', ''))
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.MIGRATING})
# mocks for rollback
rpc.call(c, 'network', {'method': 'setup_networks_on_host',
'args': {'instance_id': inst_id,
'host': self.compute.host,
'teardown': False}})
rpcinst = jsonutils.to_primitive(
db.instance_get_by_uuid(self.context, instance['uuid']))
rpc.call(c, topic,
{"method": "remove_volume_connection",
"args": {'instance': rpcinst,
'volume_id': volume_id},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
None)
rpc.cast(c, topic,
{"method": "rollback_live_migration_at_destination",
"args": {'instance': rpcinst},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
# start test
self.mox.ReplayAll()
self.assertRaises(rpc_common.RemoteError,
self.compute.live_migration,
c, dest=instance['host'], block_migration=True,
instance=rpcinst)
# cleanup
for bdms in db.block_device_mapping_get_all_by_instance(
c, inst_uuid):
db.block_device_mapping_destroy(c, bdms['id'])
db.volume_destroy(c, volume_id)
db.instance_destroy(c, inst_uuid)
def test_live_migration_works_correctly(self):
"""Confirm live_migration() works as expected correctly."""
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance({'host': 'dummy'})
inst_uuid = instance_ref['uuid']
inst_id = instance_ref['id']
instance = jsonutils.to_primitive(db.instance_get(c, inst_id))
# create
self.mox.StubOutWithMock(rpc, 'call')
topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
rpc.call(c, topic,
{"method": "pre_live_migration",
"args": {'instance': instance,
'block_migration': False,
'disk': None},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
None)
# start test
self.mox.ReplayAll()
ret = self.compute.live_migration(c, dest=instance['host'],
instance=instance)
self.assertEqual(ret, None)
# cleanup
db.instance_destroy(c, inst_uuid)
def test_post_live_migration_working_correctly(self):
"""Confirm post_live_migration() works as expected correctly."""
dest = 'desthost'
flo_addr = '1.2.1.2'
# creating testdata
c = context.get_admin_context()
inst_ref = jsonutils.to_primitive(self._create_fake_instance({
'state_description': 'migrating',
'state': power_state.PAUSED}))
inst_uuid = inst_ref['uuid']
inst_id = inst_ref['id']
db.instance_update(c, inst_uuid,
{'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': inst_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_uuid': inst_ref['uuid']})
fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
db.floating_ip_create(c, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']})
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, rpc.queue_get_for(c, FLAGS.compute_topic, dest),
{"method": "post_live_migration_at_destination",
"args": {'instance': inst_ref, 'block_migration': False},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
None)
self.mox.StubOutWithMock(self.compute.driver, 'unplug_vifs')
self.compute.driver.unplug_vifs(inst_ref, [])
rpc.call(c, 'network', {'method': 'setup_networks_on_host',
'args': {'instance_id': inst_id,
'host': self.compute.host,
'teardown': True}})
# start test
self.mox.ReplayAll()
self.compute._post_live_migration(c, inst_ref, dest)
# make sure floating ips are rewritten to destinatioin hostname.
flo_refs = db.floating_ip_get_all_by_host(c, dest)
self.assertTrue(flo_refs)
self.assertEqual(flo_refs[0]['address'], flo_addr)
# cleanup
db.instance_destroy(c, inst_uuid)
db.volume_destroy(c, v_ref['id'])
db.floating_ip_destroy(c, flo_addr)
def test_run_kill_vm(self):
"""Detect when a vm is terminated behind the scenes"""
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
instance_name = instances[0].name
self.compute.driver.test_remove_vm(instance_name)
# Force the compute manager to do its periodic poll
ctxt = context.get_admin_context()
self.compute._sync_power_states(ctxt)
instances = db.instance_get_all(ctxt)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.assertEqual(task_states.STOPPING, instances[0]['task_state'])
def test_add_instance_fault(self):
exc_info = None
instance_uuid = str(utils.gen_uuid())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('test'))
self.assertTrue('raise NotImplementedError' in values['details'])
del values['details']
expected = {
'code': 500,
'message': 'NotImplementedError',
'instance_uuid': instance_uuid,
}
self.assertEquals(expected, values)
try:
raise NotImplementedError('test')
except Exception:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
NotImplementedError('test'),
exc_info)
def test_add_instance_fault_user_error(self):
exc_info = None
instance_uuid = str(utils.gen_uuid())
def fake_db_fault_create(ctxt, values):
expected = {
'code': 400,
'message': 'Invalid',
'details': 'fake details',
'instance_uuid': instance_uuid,
}
self.assertEquals(expected, values)
user_exc = exception.Invalid('fake details', code=400)
try:
raise user_exc
except Exception:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
instance_uuid = str(utils.gen_uuid())
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': 'NotImplementedError',
'details': 'test',
'instance_uuid': instance_uuid,
}
self.assertEquals(expected, values)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
NotImplementedError('test'))
def test_cleanup_running_deleted_instances(self):
admin_context = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
self.compute.host = instance['host']
self.mox.StubOutWithMock(self.compute.driver, 'list_instances')
self.compute.driver.list_instances().AndReturn([instance['name']])
FLAGS.running_deleted_instance_timeout = 3600
FLAGS.running_deleted_instance_action = 'reap'
self.mox.StubOutWithMock(self.compute.db, "instance_get_all_by_host")
self.compute.db.instance_get_all_by_host(admin_context,
self.compute.host
).AndReturn([instance])
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
self.compute._shutdown_instance(admin_context,
instance).AndReturn(None)
self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
self.compute._cleanup_volumes(admin_context,
instance['uuid']).AndReturn(None)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(admin_context)
def test_running_deleted_instances(self):
self.mox.StubOutWithMock(self.compute.driver, 'list_instances')
self.compute.driver.list_instances().AndReturn(['herp', 'derp'])
self.compute.host = 'host'
instance1 = mox.MockAnything()
instance1.name = 'herp'
instance1.deleted = True
instance1.deleted_at = "sometimeago"
instance2 = mox.MockAnything()
instance2.name = 'derp'
instance2.deleted = False
instance2.deleted_at = None
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
FLAGS.running_deleted_instance_timeout).AndReturn(True)
self.mox.StubOutWithMock(self.compute.db, "instance_get_all_by_host")
self.compute.db.instance_get_all_by_host('context',
'host').AndReturn(
[instance1,
instance2])
self.mox.ReplayAll()
val = self.compute._running_deleted_instances('context')
self.assertEqual(val, [instance1])
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
ctxt = context.get_admin_context()
instance_map = {}
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host}
instances.append(instance_map[uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
'get_nw_info': 0, 'expected_instance': None}
def fake_instance_get_all_by_host(context, host):
call_info['get_all_by_host'] += 1
return instances[:]
def fake_instance_get_by_uuid(context, instance_uuid):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound
call_info['get_by_uuid'] += 1
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
def fake_get_instance_nw_info(context, instance):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
self.assertEqual(instance, call_info['expected_instance'])
call_info['get_nw_info'] += 1
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
fake_get_instance_nw_info)
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 1)
self.assertEqual(call_info['get_by_uuid'], 0)
self.assertEqual(call_info['get_nw_info'], 1)
call_info['expected_instance'] = instances[1]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 1)
self.assertEqual(call_info['get_by_uuid'], 1)
self.assertEqual(call_info['get_nw_info'], 2)
# Make an instance switch hosts
instances[2]['host'] = 'not-me'
# Make an instance disappear
instance_map.pop(instances[3]['uuid'])
# '2' and '3' should be skipped..
call_info['expected_instance'] = instances[4]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 1)
# Incremented for '2' and '4'.. '3' caused a raise above.
self.assertEqual(call_info['get_by_uuid'], 3)
self.assertEqual(call_info['get_nw_info'], 3)
# Should be no more left.
self.assertEqual(len(self.compute._instance_uuids_to_heal), 0)
# This should cause a DB query now so we get first instance
# back again
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 2)
# Stays the same, beacuse the instance came from the DB
self.assertEqual(call_info['get_by_uuid'], 3)
self.assertEqual(call_info['get_nw_info'], 4)
def test_poll_unconfirmed_resizes(self):
instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESIZED,
'task_state': None},
{'uuid': 'noexist'},
{'uuid': 'fake_uuid2', 'vm_state': vm_states.ERROR,
'task_state': None},
{'uuid': 'fake_uuid3', 'vm_state': vm_states.ACTIVE,
'task_state': task_states.REBOOTING},
{'uuid': 'fake_uuid4', 'vm_state': vm_states.RESIZED,
'task_state': None},
{'uuid': 'fake_uuid5', 'vm_state': vm_states.ACTIVE,
'task_state': None},
{'uuid': 'fake_uuid6', 'vm_state': vm_states.RESIZED,
'task_state': 'deleting'}]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
'fake_uuid3': 'error',
'fake_uuid4': None,
'fake_uuid5': 'error',
'fake_uuid6': 'error'}
migrations = []
for i, instance in enumerate(instances, start=1):
migrations.append({'id': i,
'instance_uuid': instance['uuid'],
'status': None})
def fake_instance_get_by_uuid(context, instance_uuid):
# raise InstanceNotFound exception for uuid 'noexist'
if instance_uuid == 'noexist':
raise exception.InstanceNotFound(instance_id=instance_uuid)
for instance in instances:
if instance['uuid'] == instance_uuid:
return instance
def fake_migration_get_unconfirmed_by_dest_compute(context,
resize_confirm_window, dest_compute):
self.assertEqual(dest_compute, FLAGS.host)
return migrations
def fake_migration_update(context, migration_id, values):
for migration in migrations:
if migration['id'] == migration_id and 'status' in values:
migration['status'] = values['status']
def fake_confirm_resize(context, instance):
# raise exception for 'fake_uuid4' to check migration status
# does not get set to 'error' on confirm_resize failure.
if instance['uuid'] == 'fake_uuid4':
raise test.TestingException
for migration in migrations:
if migration['instance_uuid'] == instance['uuid']:
migration['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
self.stubs.Set(db, 'migration_update',
fake_migration_update)
self.stubs.Set(self.compute.compute_api, 'confirm_resize',
fake_confirm_resize)
def fetch_instance_migration_status(instance_uuid):
for migration in migrations:
if migration['instance_uuid'] == instance_uuid:
return migration['status']
self.flags(resize_confirm_window=60)
ctxt = context.get_admin_context()
self.compute._poll_unconfirmed_resizes(ctxt)
for uuid, status in expected_migration_status.iteritems():
self.assertEqual(status, fetch_instance_migration_status(uuid))
def test_instance_build_timeout_disabled(self):
self.flags(instance_build_timeout=0)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
return instances[:]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
def fake_set_instance_error_state(_ctxt, instance_uuid, **kwargs):
called['set_error_state'] += 1
self.stubs.Set(self.compute, '_set_instance_error_state',
fake_set_instance_error_state)
instance_map = {}
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)
self.assertFalse(called['get_all'])
self.assertEqual(called['set_error_state'], 0)
def test_instance_build_timeout(self):
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
return instances[:]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
def fake_set_instance_error_state(_ctxt, instance_uuid, **kwargs):
called['set_error_state'] += 1
self.stubs.Set(self.compute, '_set_instance_error_state',
fake_set_instance_error_state)
instance_map = {}
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)
self.assertTrue(called['get_all'])
self.assertEqual(called['set_error_state'], 5)
def test_instance_build_timeout_mixed_instances(self):
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
called = {'get_all': False, 'set_error_state': 0}
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
def fake_instance_get_all_by_filters(*args, **kwargs):
called['get_all'] = True
return instances[:]
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_instance_get_all_by_filters)
def fake_set_instance_error_state(_ctxt, instance_uuid, **kwargs):
called['set_error_state'] += 1
self.stubs.Set(self.compute, '_set_instance_error_state',
fake_set_instance_error_state)
instance_map = {}
instances = []
#expired instances
for x in xrange(4):
uuid = 'fake-uuid-%s' % x
instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
#not expired
uuid = 'fake-uuid-5'
instance_map[uuid] = {
'uuid': uuid,
'host': FLAGS.host,
'vm_state': vm_states.BUILDING,
'created_at': timeutils.utcnow(),
}
instances.append(instance_map[uuid])
self.compute._check_instance_build_time(ctxt)
self.assertTrue(called['get_all'])
self.assertEqual(called['set_error_state'], 4)
class ComputeAPITestCase(BaseTestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True)
super(ComputeAPITestCase, self).setUp()
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
fake_get_nw_info)
self.security_group_api = compute.api.SecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
self.fake_image = {
'id': 1,
'name': 'fake_name',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'},
}
def _run_instance(self, params=None):
instance = jsonutils.to_primitive(self._create_fake_instance(params))
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
return instance, instance_uuid
def test_create_with_too_little_ram(self):
"""Test an instance type with too little memory"""
inst_type = instance_types.get_default_instance_type()
inst_type['memory_mb'] = 1
def fake_show(*args):
img = copy.copy(self.fake_image)
img['min_ram'] = 2
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InstanceTypeMemoryTooSmall,
self.compute_api.create, self.context, inst_type, None)
# Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, None)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
"""Test an instance type with too little disk space"""
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
def fake_show(*args):
img = copy.copy(self.fake_image)
img['min_disk'] = 2
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InstanceTypeDiskTooSmall,
self.compute_api.create, self.context, inst_type, None)
# Now increase the inst_type disk space and make sure all is fine.
inst_type['root_gb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, None)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
"""Test an instance type with just enough ram and disk space"""
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 2
inst_type['memory_mb'] = 2
def fake_show(*args):
img = copy.copy(self.fake_image)
img['min_ram'] = 2
img['min_disk'] = 2
img['name'] = 'fake_name'
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, None)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
"""Test an instance type with no min_ram or min_disk"""
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
inst_type['memory_mb'] = 1
def fake_show(*args):
return copy.copy(self.fake_image)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, None)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None, **instance)
try:
self.assertNotEqual(ref[0]['display_name'], None)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
"""Make sure image properties are copied into system metadata."""
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None)
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
image_props = {'image_kernel_id': 'fake_kernel_id',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow', }
for key, value in image_props.iteritems():
self.assertTrue(key in sys_metadata)
self.assertEqual(value, sys_metadata[key])
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
"""Make sure create associates security groups"""
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None,
security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
self.context, ref[0]['id'])), 1)
group = db.security_group_get(self.context, group['id'])
self.assert_(len(group.instances) == 1)
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_with_invalid_security_group_raises(self):
instance_type = instance_types.get_default_instance_type()
pre_build_len = len(db.instance_get_all(context.get_admin_context()))
self.assertRaises(exception.SecurityGroupNotFoundForProject,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
security_group=['this_is_a_fake_sec_group'])
self.assertEqual(pre_build_len,
len(db.instance_get_all(context.get_admin_context())))
def test_create_with_large_user_data(self):
"""Test an instance type with too much user data."""
inst_type = instance_types.get_default_instance_type()
def fake_show(*args):
img = copy.copy(self.fake_image)
img['min_ram'] = 2
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InstanceUserDataTooLarge,
self.compute_api.create, self.context, inst_type, None,
user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
"""Test an instance type with malformed user data."""
inst_type = instance_types.get_default_instance_type()
def fake_show(*args):
img = copy.copy(self.fake_image)
img['min_ram'] = 2
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InstanceUserDataMalformed,
self.compute_api.create, self.context, inst_type, None,
user_data='banana')
def test_create_with_base64_user_data(self):
"""Test an instance type with ok much user data."""
inst_type = instance_types.get_default_instance_type()
def fake_show(*args):
img = copy.copy(self.fake_image)
img['min_ram'] = 2
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
# NOTE(mikal): a string of length 48510 encodes to 65532 characters of
# base64
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, None,
user_data=base64.encodestring('1' * 48510))
db.instance_destroy(self.context, refs[0]['uuid'])
def test_default_hostname_generator(self):
fake_uuids = [str(utils.gen_uuid()) for x in xrange(4)]
orig_populate = self.compute_api._populate_instance_for_create
def _fake_populate(base_options, *args, **kwargs):
base_options['uuid'] = fake_uuids.pop(0)
return orig_populate(base_options, *args, **kwargs)
self.stubs.Set(self.compute_api,
'_populate_instance_for_create',
_fake_populate)
cases = [(None, 'server-%s' % fake_uuids[0]),
('Hello, Server!', 'hello-server'),
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')]
for display_name, hostname in cases:
(ref, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
display_name=display_name)
try:
self.assertEqual(ref[0]['hostname'], hostname)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
"""Make sure destroying disassociates security groups"""
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None,
security_group=['testgroup'])
try:
db.instance_destroy(self.context, ref[0]['uuid'])
group = db.security_group_get(self.context, group['id'])
self.assert_(len(group.instances) == 0)
finally:
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
"""Make sure destroying security groups disassociates instances"""
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None,
security_group=['testgroup'])
try:
db.security_group_destroy(self.context, group['id'])
admin_deleted_context = context.get_admin_context(
read_deleted="only")
group = db.security_group_get(admin_deleted_context, group['id'])
self.assert_(len(group.instances) == 0)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_start(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.STOPPING})
self.compute.stop_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
self.compute_api.start(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.STARTING)
db.instance_destroy(self.context, instance['uuid'])
def test_stop(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
self.compute_api.stop(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.STOPPING)
db.instance_destroy(self.context, instance['uuid'])
def test_start_shutdown(self):
def check_state(instance_uuid, power_state_, vm_state_, task_state_):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['power_state'], power_state_)
self.assertEqual(instance['vm_state'], vm_state_)
self.assertEqual(instance['task_state'], task_state_)
def start_check_state(instance_uuid,
power_state_, vm_state_, task_state_):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.start(self.context, instance)
check_state(instance_uuid, power_state_, vm_state_, task_state_)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
check_state(instance['uuid'], power_state.RUNNING, vm_states.ACTIVE,
None)
# NOTE(yamahata): emulate compute.manager._sync_power_state() that
# the instance is shutdown by itself
db.instance_update(self.context, instance['uuid'],
{'power_state': power_state.NOSTATE,
'vm_state': vm_states.STOPPED})
check_state(instance['uuid'], power_state.NOSTATE, vm_states.STOPPED,
None)
start_check_state(instance['uuid'], power_state.NOSTATE,
vm_states.STOPPED, task_states.STARTING)
db.instance_destroy(self.context, instance['uuid'])
def test_delete(self):
instance, instance_uuid = self._run_instance(params={
'host': FLAGS.host})
self.compute_api.delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.DELETING)
db.instance_destroy(self.context, instance['uuid'])
def test_repeated_delete_quota(self):
in_use = {'instances': 1}
def fake_reserve(context, **deltas):
return dict(deltas.iteritems())
self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
def fake_commit(context, deltas):
for k, v in deltas.iteritems():
in_use[k] = in_use.get(k, 0) + v
self.stubs.Set(QUOTAS, 'commit', fake_commit)
instance, instance_uuid = self._run_instance(params={
'host': FLAGS.host})
self.compute_api.delete(self.context, instance)
self.compute_api.delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.DELETING)
self.assertEquals(in_use['instances'], 0)
db.instance_destroy(self.context, instance['uuid'])
def test_delete_fast_if_host_not_set(self):
instance = self._create_fake_instance({'host': None})
self.compute_api.delete(self.context, instance)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context, instance['uuid'])
def test_delete_handles_host_setting_race_condition(self):
instance, instance_uuid = self._run_instance(params={
'host': FLAGS.host})
instance['host'] = None # make it think host was never set
self.compute_api.delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.DELETING)
db.instance_destroy(self.context, instance['uuid'])
def test_delete_fail(self):
instance, instance_uuid = self._run_instance(params={
'host': FLAGS.host})
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
self.compute_api.delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
db.instance_destroy(self.context, instance['uuid'])
def test_delete_soft(self):
instance, instance_uuid = self._run_instance()
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
db.instance_destroy(self.context, instance['uuid'])
def test_delete_soft_fail(self):
instance, instance_uuid = self._run_instance()
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
db.instance_destroy(self.context, instance['uuid'])
def test_force_delete(self):
"""Ensure instance can be deleted after a soft delete"""
instance = jsonutils.to_primitive(self._create_fake_instance(params={
'host': FLAGS.host}))
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
{'vm_state': vm_states.SOFT_DELETED,
'task_state': None})
self.compute_api.force_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.DELETING)
def test_suspend(self):
"""Ensure instance can be suspended"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEqual(instance['task_state'], None)
self.compute_api.suspend(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.SUSPENDING)
db.instance_destroy(self.context, instance['uuid'])
def test_resume(self):
"""Ensure instance can be resumed (if suspended)"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
{'vm_state': vm_states.SUSPENDED})
instance = db.instance_get(self.context, instance['id'])
self.assertEqual(instance['task_state'], None)
self.compute_api.resume(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['task_state'], task_states.RESUMING)
db.instance_destroy(self.context, instance['uuid'])
def test_pause(self):
"""Ensure instance can be paused"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEqual(instance['task_state'], None)
self.compute_api.pause(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.PAUSING)
db.instance_destroy(self.context, instance['uuid'])
def test_unpause(self):
"""Ensure instance can be unpaused"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
self.assertEqual(instance['task_state'], None)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.PAUSING})
self.compute.pause_instance(self.context, instance=instance)
# set the state that the instance gets when pause finishes
instance = db.instance_update(self.context, instance['uuid'],
{'vm_state': vm_states.PAUSED})
self.compute_api.unpause(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.UNPAUSING)
db.instance_destroy(self.context, instance['uuid'])
def test_restore(self):
"""Ensure instance can be restored from a soft delete"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.soft_delete(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.POWERING_OFF)
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
{'vm_state': vm_states.SOFT_DELETED,
'task_state': None})
self.compute_api.restore(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.POWERING_ON)
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
# Set some image metadata that should get wiped out and reset
# as well as some other metadata that should be preserved.
db.instance_system_metadata_update(self.context, instance_uuid,
{'image_kernel_id': 'old-data',
'image_ramdisk_id': 'old_data',
'image_something_else': 'old-data',
'image_should_remove': 'bye-bye',
'preserved': 'preserve this!'},
True)
# Make sure Compute API updates the image_ref before casting to
# compute manager.
orig_update = self.compute_api.update
info = {'image_ref': None}
def update_wrapper(*args, **kwargs):
if 'image_ref' in kwargs:
info['image_ref'] = kwargs['image_ref']
return orig_update(*args, **kwargs)
self.stubs.Set(self.compute_api, 'update', update_wrapper)
image_ref = instance["image_ref"] + '-new_image_ref'
password = "new_password"
self.compute_api.rebuild(self.context, instance, image_ref, password)
self.assertEqual(info['image_ref'], image_ref)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
sys_metadata = db.instance_system_metadata_get(self.context,
instance_uuid)
self.assertEqual(sys_metadata,
{'image_kernel_id': 'fake_kernel_id',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow',
'preserved': 'preserve this!'})
db.instance_destroy(self.context, instance['uuid'])
def test_reboot_soft(self):
"""Ensure instance can be soft rebooted"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['task_state'], None)
reboot_type = "SOFT"
self.compute_api.reboot(self.context, inst_ref, reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
self.assertEqual(inst_ref['task_state'], task_states.REBOOTING)
db.instance_destroy(self.context, inst_ref['uuid'])
def test_reboot_hard(self):
"""Ensure instance can be hard rebooted"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['task_state'], None)
reboot_type = "HARD"
self.compute_api.reboot(self.context, inst_ref, reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
self.assertEqual(inst_ref['task_state'], task_states.REBOOTING_HARD)
db.instance_destroy(self.context, inst_ref['uuid'])
def test_hard_reboot_of_soft_rebooting_instance(self):
"""Ensure instance can be hard rebooted while soft rebooting"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBOOTING})
reboot_type = "HARD"
self.compute_api.reboot(self.context, inst_ref, reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
self.assertEqual(inst_ref['task_state'], task_states.REBOOTING_HARD)
db.instance_destroy(self.context, inst_ref['uuid'])
def test_soft_reboot_of_rebooting_instance(self):
"""Ensure instance can't be soft rebooted while rebooting"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBOOTING})
inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
reboot_type = "SOFT"
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
self.context,
inst_ref,
reboot_type)
def test_hostname_create(self):
"""Ensure instance hostname is set during creation."""
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
None,
display_name='test host')
self.assertEqual('test-host', instances[0]['hostname'])
def test_set_admin_password(self):
"""Ensure instance can have its admin password set"""
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], None)
def fake_rpc_method(context, topic, msg, do_cast=True):
self.assertFalse(do_cast)
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.compute_api.set_admin_password(self.context, inst_ref)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'],
task_states.UPDATING_PASSWORD)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(inst_ref))
def test_rescue_unrescue(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], None)
self.compute_api.rescue(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.RESCUING)
params = {'vm_state': vm_states.RESCUED, 'task_state': None}
db.instance_update(self.context, instance_uuid, params)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.unrescue(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.RESCUED)
self.assertEqual(instance['task_state'], task_states.UNRESCUING)
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
def test_snapshot(self):
"""Ensure a snapshot of an instance can be created"""
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_minram_mindisk_VHD(self):
"""Ensure a snapshots min_ram and min_disk are correct.
A snapshot of a non-shrinkable VHD should have min_ram
and min_disk set to that of the original instances flavor.
"""
def fake_show(*args):
img = copy.copy(self.fake_image)
img['disk_format'] = 'vhd'
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
inst_params = {'root_gb': 2, 'memory_mb': 256}
instance['instance_type'].update(inst_params)
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
self.assertEqual(image['min_ram'], 256)
self.assertEqual(image['min_disk'], 2)
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_minram_mindisk(self):
"""Ensure a snapshots min_ram and min_disk are correct.
A snapshot of an instance should have min_ram and min_disk
set to that of the instances original image unless that
image had a disk format of vhd.
"""
def fake_show(*args):
img = copy.copy(self.fake_image)
img['disk_format'] = 'raw'
img['min_ram'] = 512
img['min_disk'] = 1
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
self.assertEqual(image['min_ram'], 512)
self.assertEqual(image['min_disk'], 1)
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_minram_mindisk_img_missing_minram(self):
"""Ensure a snapshots min_ram and min_disk are correct.
Do not show an attribute that the orig img did not have.
"""
def fake_show(*args):
img = copy.copy(self.fake_image)
img['disk_format'] = 'raw'
img['min_disk'] = 1
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
self.assertFalse('min_ram' in image)
self.assertEqual(image['min_disk'], 1)
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_minram_mindisk_no_image(self):
"""Ensure a snapshots min_ram and min_disk are correct.
A snapshots min_ram and min_disk should be set to default if
an instances original image cannot be found.
"""
def fake_show(*args):
raise exception.ImageNotFound
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
# min_ram and min_disk are not returned when set to default
self.assertFalse('min_ram' in image)
self.assertFalse('min_disk' in image)
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
db.instance_destroy(self.context, instance['uuid'])
def test_backup(self):
"""Can't backup an instance which is already being backed up."""
instance = self._create_fake_instance()
image = self.compute_api.backup(self.context, instance,
'backup1', 'DAILY', None,
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'backup1')
properties = image['properties']
self.assertEqual(properties['backup_type'], 'DAILY')
self.assertEqual(properties['image_type'], 'backup')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
db.instance_destroy(self.context, instance['uuid'])
def test_backup_conflict(self):
"""Can't backup an instance which is already being backed up."""
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_BACKUP}
db.instance_update(self.context, instance['uuid'], instance_values)
instance = self.compute_api.get(self.context, instance['uuid'])
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context,
instance,
None,
None,
None)
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_conflict(self):
"""Can't snapshot an instance which is already being snapshotted."""
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_SNAPSHOT}
db.instance_update(self.context, instance['uuid'], instance_values)
instance = self.compute_api.get(self.context, instance['uuid'])
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context,
instance,
None)
db.instance_destroy(self.context, instance['uuid'])
def test_resize_confirm_through_api(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
context = self.context.elevated()
self.compute.run_instance(self.context, instance=instance)
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.compute_api.resize(context, instance, '4')
# create a fake migration record (manager does this)
db.migration_create(context,
{'instance_uuid': instance['uuid'],
'status': 'finished'})
# set the state that the instance gets when resize finishes
instance = db.instance_update(self.context, instance['uuid'],
{'task_state': None,
'vm_state': vm_states.RESIZED})
self.compute_api.confirm_resize(context, instance)
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(instance))
def test_resize_revert_through_api(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
context = self.context.elevated()
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.compute.run_instance(self.context, instance=instance)
self.compute_api.resize(context, instance, '4')
# create a fake migration record (manager does this)
db.migration_create(context,
{'instance_uuid': instance['uuid'],
'status': 'finished'})
# set the state that the instance gets when resize finishes
instance = db.instance_update(self.context, instance['uuid'],
{'task_state': None,
'vm_state': vm_states.RESIZED})
self.compute_api.revert_resize(context, instance)
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.RESIZED)
self.assertEqual(instance['task_state'], task_states.RESIZE_REVERTING)
self.compute.terminate_instance(context,
instance=jsonutils.to_primitive(instance))
def test_resize_invalid_flavor_fails(self):
"""Ensure invalid flavors raise"""
instance = self._create_fake_instance()
context = self.context.elevated()
instance = db.instance_get_by_uuid(context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.NotFound, self.compute_api.resize,
context, instance, 200)
self.compute.terminate_instance(context, instance=instance)
def test_resize_same_flavor_fails(self):
"""Ensure invalid flavors raise"""
context = self.context.elevated()
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.CannotResizeToSameFlavor,
self.compute_api.resize, context, instance, 1)
self.compute.terminate_instance(context, instance=instance)
def test_migrate(self):
context = self.context.elevated()
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
self.compute.run_instance(self.context, instance=instance)
# Migrate simply calls resize() without a flavor_id.
self.compute_api.resize(context, instance, None)
self.compute.terminate_instance(context, instance=instance)
def test_resize_request_spec(self):
def _fake_cast(context, topic, msg):
request_spec = msg['args']['request_spec']
filter_properties = msg['args']['filter_properties']
instance_properties = request_spec['instance_properties']
# resize with flavor_id = None will still send instance_type
self.assertEqual(request_spec['instance_type'],
orig_instance_type)
self.assertEqual(request_spec['instance_uuids'],
[instance['uuid']])
self.assertEqual(instance_properties['uuid'], instance['uuid'])
self.assertEqual(instance_properties['host'], 'host2')
# Ensure the instance passed to us has been updated with
# progress set to 0 and task_state set to RESIZE_PREP.
self.assertEqual(instance_properties['task_state'],
task_states.RESIZE_PREP)
self.assertEqual(instance_properties['progress'], 0)
self.assertIn('host2', filter_properties['ignore_hosts'])
self.stubs.Set(rpc, 'cast', _fake_cast)
context = self.context.elevated()
instance = self._create_fake_instance(dict(host='host2'))
instance = db.instance_get_by_uuid(context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
orig_instance_type = instance['instance_type']
self.compute.run_instance(self.context, instance=instance)
# We need to set the host to something 'known'. Unfortunately,
# the compute manager is using a cached copy of FLAGS.host,
# so we can't just self.flags(host='host2') before calling
# run_instance above. Also, set progress to 10 so we ensure
# it is reset to 0 in compute_api.resize(). (verified in
# _fake_cast above).
instance = db.instance_update(self.context, instance['uuid'],
dict(host='host2', progress=10))
# different host
self.flags(host='host3')
try:
self.compute_api.resize(context, instance, None)
finally:
self.compute.terminate_instance(context, instance=instance)
def test_resize_request_spec_noavoid(self):
def _fake_cast(context, topic, msg):
request_spec = msg['args']['request_spec']
filter_properties = msg['args']['filter_properties']
instance_properties = request_spec['instance_properties']
self.assertEqual(instance_properties['host'], 'host2')
# Ensure the instance passed to us has been updated with
# progress set to 0 and task_state set to RESIZE_PREP.
self.assertEqual(instance_properties['task_state'],
task_states.RESIZE_PREP)
self.assertEqual(instance_properties['progress'], 0)
self.assertNotIn('host2', filter_properties['ignore_hosts'])
self.stubs.Set(rpc, 'cast', _fake_cast)
self.flags(allow_resize_to_same_host=True)
context = self.context.elevated()
instance = self._create_fake_instance(dict(host='host2'))
instance = db.instance_get_by_uuid(context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
self.compute.run_instance(self.context, instance=instance)
# We need to set the host to something 'known'. Unfortunately,
# the compute manager is using a cached copy of FLAGS.host,
# so we can't just self.flags(host='host2') before calling
# run_instance above. Also, set progress to 10 so we ensure
# it is reset to 0 in compute_api.resize(). (verified in
# _fake_cast above).
instance = db.instance_update(self.context, instance['uuid'],
dict(host='host2', progress=10))
# different host
try:
self.compute_api.resize(context, instance, None)
finally:
self.compute.terminate_instance(context, instance=instance)
def test_get(self):
"""Test get instance"""
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
def fake_db_get(context, instance_uuid):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(c, exp_instance['uuid'])
self.assertEquals(expected, instance)
def test_get_with_integer_id(self):
"""Test get instance with an integer id"""
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
def fake_db_get(context, instance_id):
return exp_instance
self.stubs.Set(db, 'instance_get', fake_db_get)
instance = self.compute_api.get(c, exp_instance['id'])
self.assertEquals(expected, instance)
def test_get_all_by_name_regexp(self):
"""Test searching instances by name (display_name)"""
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
'display_name': 'woo'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot'})
instances = self.compute_api.get_all(c,
search_opts={'name': '^woo.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance1['uuid'] in instance_uuids)
self.assertTrue(instance2['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^woot.*'})
instance_uuids = [instance['uuid'] for instance in instances]
self.assertEqual(len(instances), 1)
self.assertTrue(instance1['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '.*oot.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance1['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^n.*'})
self.assertEqual(len(instances), 1)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance3['uuid'] in instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': 'noth.*'})
self.assertEqual(len(instances), 0)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_multiple_options_at_once(self):
"""Test searching by multiple options at once"""
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager()
self.stubs.Set(self.compute_api.network_api,
'get_instance_uuids_by_ip_filter',
network_manager.get_instance_uuids_by_ip_filter)
instance1 = self._create_fake_instance({
'display_name': 'woot',
'id': 0,
'uuid': '00000000-0000-0000-0000-000000000010'})
instance2 = self._create_fake_instance({
'display_name': 'woo',
'id': 20,
'uuid': '00000000-0000-0000-0000-000000000020'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot',
'id': 30,
'uuid': '00000000-0000-0000-0000-000000000030'})
# ip ends up matching 2nd octet here.. so all 3 match ip
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1', 'name': 'not.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
# ip ends up matching any ip with a '1' in the last octet..
# so instance 1 and 3.. but name should only match #1
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
# same as above but no match on name (name matches instance1
# but the ip query doesn't
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
self.assertEqual(len(instances), 0)
# ip matches all 3... ipv6 matches #2+#3...name matches #3
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1',
'name': 'not.*',
'ip6': '^.*12.*34.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
"""Test searching instances by image"""
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
instance2 = self._create_fake_instance({'image_ref': '4567'})
instance3 = self._create_fake_instance({'image_ref': '4567'})
instances = self.compute_api.get_all(c, search_opts={'image': '123'})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance2['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'image': ['1234', '4567']})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
"""Test searching instances by image"""
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
instance2 = self._create_fake_instance({'instance_type_id': 2})
instance3 = self._create_fake_instance({'instance_type_id': 2})
# NOTE(comstud): Migrations set up the instance_types table
# for us. Therefore, we assume the following is true for
# these tests:
# instance_type_id 1 == flavor 3
# instance_type_id 2 == flavor 1
# instance_type_id 3 == flavor 4
# instance_type_id 4 == flavor 5
# instance_type_id 5 == flavor 2
instances = self.compute_api.get_all(c,
search_opts={'flavor': 5})
self.assertEqual(len(instances), 0)
# ensure unknown filter maps to an empty list, not an exception
instances = self.compute_api.get_all(c, search_opts={'flavor': 99})
self.assertEqual(instances, [])
instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['id'], instance1['id'])
instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance2['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
"""Test searching instances by state"""
c = context.get_admin_context()
instance1 = self._create_fake_instance({
'power_state': power_state.SHUTDOWN,
})
instance2 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instance3 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance2['uuid'] in instance_uuids)
self.assertTrue(instance3['uuid'] in instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
"""Test searching instances by metadata"""
c = context.get_admin_context()
instance0 = self._create_fake_instance()
instance1 = self._create_fake_instance({
'metadata': {'key1': 'value1'}})
instance2 = self._create_fake_instance({
'metadata': {'key2': 'value2'}})
instance3 = self._create_fake_instance({
'metadata': {'key3': 'value3'}})
instance4 = self._create_fake_instance({
'metadata': {'key3': 'value3',
'key4': 'value4'}})
# get all instances
instances = self.compute_api.get_all(c,
search_opts={'metadata': {}})
self.assertEqual(len(instances), 5)
# wrong key/value combination
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key1': 'value3'}})
self.assertEqual(len(instances), 0)
# non-existing keys
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key5': 'value1'}})
self.assertEqual(len(instances), 0)
# find existing instance
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key2': 'value2'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance2['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3'}})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertTrue(instance3['uuid'] in instance_uuids)
self.assertTrue(instance4['uuid'] in instance_uuids)
# multiple criterias as a dict
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3',
'key4': 'value4'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
# multiple criterias as a list
instances = self.compute_api.get_all(c,
search_opts={'metadata': [{'key4': 'value4'},
{'key3': 'value3'}]})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
db.instance_destroy(c, instance0['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
db.instance_destroy(c, instance4['uuid'])
def test_instance_metadata(self):
meta_changes = [None]
self.flags(notify_on_any_change=True)
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
_context = context.get_admin_context()
instance = self._create_fake_instance({'metadata': {'key1': 'value1'}})
instance = dict(instance)
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1'})
self.compute_api.update_instance_metadata(_context, instance,
{'key2': 'value2'})
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
payload = msg['payload']
self.assertTrue('metadata' in payload)
self.assertEquals(payload['metadata'], metadata)
new_metadata = {'key2': 'bah', 'key3': 'value3'}
self.compute_api.update_instance_metadata(_context, instance,
new_metadata, delete=True)
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, new_metadata)
self.assertEqual(meta_changes, [{
'key1': ['-'],
'key2': ['+', 'bah'],
'key3': ['+', 'value3'],
}])
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
msg = test_notifier.NOTIFICATIONS[1]
payload = msg['payload']
self.assertTrue('metadata' in payload)
self.assertEquals(payload['metadata'], metadata)
self.compute_api.delete_instance_metadata(_context, instance, 'key2')
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key3': 'value3'})
self.assertEqual(meta_changes, [{'key2': ['-']}])
self.assertEquals(len(test_notifier.NOTIFICATIONS), 3)
msg = test_notifier.NOTIFICATIONS[2]
payload = msg['payload']
self.assertTrue('metadata' in payload)
self.assertEquals(payload['metadata'], {})
db.instance_destroy(_context, instance['uuid'])
def test_get_instance_faults(self):
"""Get an instances latest fault"""
instance = self._create_fake_instance()
fault_fixture = {
'code': 404,
'instance_uuid': instance['uuid'],
'message': "HTTPNotFound",
'details': "Stock details for test",
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
def return_fault(_ctxt, instance_uuids):
return dict.fromkeys(instance_uuids, [fault_fixture])
self.stubs.Set(nova.db,
'instance_fault_get_by_instance_uuids',
return_fault)
_context = context.get_admin_context()
output = self.compute_api.get_instance_faults(_context, [instance])
expected = {instance['uuid']: [fault_fixture]}
self.assertEqual(output, expected)
db.instance_destroy(_context, instance['uuid'])
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
bdm = {}
for attr in attr_list:
val = bdm_ref.get(attr, None)
if val:
bdm[attr] = val
return bdm
def test_update_block_device_mapping(self):
swap_size = 1
instance_type = {'swap': swap_size}
instance = self._create_fake_instance()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'swap', 'device': 'sdb3'},
{'virtual': 'swap', 'device': 'sdb2'},
{'virtual': 'swap', 'device': 'sdb1'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
{'virtual': 'ephemeral2', 'device': 'sdc3'}]
block_device_mapping = [
# root
{'device_name': '/dev/sda1',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc2',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'snapshot_id': '44444444-aaaa-bbbb-cccc-444444444444'},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'snapshot_id': '55555555-aaaa-bbbb-cccc-555555555555',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'snapshot_id': '66666666-aaaa-bbbb-cccc-666666666666'},
{'device_name': '/dev/sdd3',
'snapshot_id': '77777777-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd4',
'no_device': True}]
self.compute_api._update_image_block_device_mapping(
self.context, instance_type, instance['uuid'], mappings)
bdms = [self._parse_db_block_device_mapping(bdm_ref)
for bdm_ref in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'virtual_name': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size},
{'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'},
# NOTE(yamahata): ATM only ephemeral0 is supported.
# they're ignored for now
#{'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'},
#{'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}
]
bdms.sort()
expected_result.sort()
self.assertDictListMatch(bdms, expected_result)
self.compute_api._update_block_device_mapping(
self.context, instance_types.get_default_instance_type(),
instance['uuid'], block_device_mapping)
bdms = [self._parse_db_block_device_mapping(bdm_ref)
for bdm_ref in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'device_name': '/dev/sda1'},
{'virtual_name': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size},
{'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'device_name': '/dev/sdb2'},
{'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222',
'device_name': '/dev/sdb3'},
{'no_device': True, 'device_name': '/dev/sdb4'},
{'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'},
{'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'device_name': '/dev/sdc2'},
{'snapshot_id': '44444444-aaaa-bbbb-cccc-444444444444',
'device_name': '/dev/sdc3'},
{'no_device': True, 'device_name': '/dev/sdc4'},
{'snapshot_id': '55555555-aaaa-bbbb-cccc-555555555555',
'device_name': '/dev/sdd1'},
{'snapshot_id': '66666666-aaaa-bbbb-cccc-666666666666',
'device_name': '/dev/sdd2'},
{'snapshot_id': '77777777-aaaa-bbbb-cccc-777777777777',
'device_name': '/dev/sdd3'},
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort()
expected_result.sort()
self.assertDictListMatch(bdms, expected_result)
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.terminate_instance(self.context, instance)
def test_volume_size(self):
ephemeral_size = 2
swap_size = 3
inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
self.assertEqual(self.compute_api._volume_size(inst_type,
'ephemeral0'),
ephemeral_size)
self.assertEqual(self.compute_api._volume_size(inst_type,
'ephemeral1'),
0)
self.assertEqual(self.compute_api._volume_size(inst_type,
'swap'),
swap_size)
def test_reservation_id_one_instance(self):
"""Verify building an instance has a reservation_id that
matches return value from create"""
(refs, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None)
try:
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0]['reservation_id'], resv_id)
finally:
db.instance_destroy(self.context, refs[0]['uuid'])
def test_reservation_ids_two_instances(self):
"""Verify building 2 instances at once results in a
reservation_id being returned equal to reservation id set
in both instances
"""
(refs, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
min_count=2, max_count=2)
try:
self.assertEqual(len(refs), 2)
self.assertNotEqual(resv_id, None)
finally:
for instance in refs:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_instance_architecture(self):
"""Test the instance architecture"""
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], 'x86_64')
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
"""Test if the architecture is unknown."""
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'architecture': ''}))
try:
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertNotEqual(instance['architecture'], 'Unknown')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
"""Test the instance_name template"""
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='instance-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='%(id)d-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], '%d-%s' %
(i_ref['id'], i_ref['uuid']))
db.instance_destroy(self.context, i_ref['uuid'])
# not allowed.. default is uuid
self.flags(instance_name_template='%(name)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
def test_add_remove_fixed_ip(self):
instance = self._create_fake_instance(params={'host': FLAGS.host})
self.compute_api.add_fixed_ip(self.context, instance, '1')
self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1')
self.compute_api.delete(self.context, instance)
def test_attach_volume_invalid(self):
self.assertRaises(exception.InvalidDevicePath,
self.compute_api.attach_volume,
self.context,
{'locked': False},
None,
'/invalid')
def test_vnc_console(self):
"""Make sure we can a vnc console for an instance."""
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "novnc"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path'}
fake_connect_info2 = copy.deepcopy(fake_connect_info)
fake_connect_info2['access_url'] = 'fake_console_url'
self.mox.StubOutWithMock(rpc, 'call')
rpc_msg1 = {'method': 'get_vnc_console',
'args': {'instance': fake_instance,
'console_type': fake_console_type},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
rpc_msg2 = {'method': 'authorize_console',
'args': fake_connect_info,
'version': '1.0'}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
rpc.call(self.context, FLAGS.consoleauth_topic,
rpc_msg2, None).AndReturn(None)
self.mox.ReplayAll()
console = self.compute_api.get_vnc_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_vnc_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_vnc_console,
self.context, instance, 'novnc')
db.instance_destroy(self.context, instance['uuid'])
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_tail_length = 699
fake_console_output = 'fake console output'
self.mox.StubOutWithMock(rpc, 'call')
rpc_msg = {'method': 'get_console_output',
'args': {'instance': fake_instance,
'tail_length': fake_tail_length},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg, None).AndReturn(fake_console_output)
self.mox.ReplayAll()
output = self.compute_api.get_console_output(self.context,
fake_instance, tail_length=fake_tail_length)
self.assertEqual(output, fake_console_output)
def test_attach_volume(self):
"""Ensure instance can be soft rebooted"""
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
self.stubs.Set(nova.volume.api.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdb')
self.assertTrue(called.get('fake_check_attach'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_rpc_attach_volume'))
def test_attach_volume_no_device(self):
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
self.stubs.Set(nova.volume.api.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, device=None)
self.assertTrue(called.get('fake_check_attach'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_rpc_attach_volume'))
def test_inject_network_info(self):
instance = self._create_fake_instance(params={'host': FLAGS.host})
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
self.compute_api.inject_network_info(self.context, instance)
self.compute_api.delete(self.context, instance)
def test_reset_network(self):
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
self.compute_api.reset_network(self.context, instance)
def test_lock(self):
instance = self._create_fake_instance()
self.compute_api.lock(self.context, instance)
self.compute_api.delete(self.context, instance)
def test_unlock(self):
instance = self._create_fake_instance()
self.compute_api.unlock(self.context, instance)
self.compute_api.delete(self.context, instance)
def test_get_lock(self):
instance = self._create_fake_instance()
self.assertFalse(self.compute_api.get_lock(self.context, instance))
db.instance_update(self.context, instance['uuid'], {'locked': True})
self.assertTrue(self.compute_api.get_lock(self.context, instance))
def test_add_remove_security_group(self):
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
security_group_name = self._create_group()['name']
self.security_group_api.add_to_instance(self.context,
instance,
security_group_name)
self.security_group_api.remove_from_instance(self.context,
instance,
security_group_name)
def test_get_diagnostics(self):
instance = self._create_fake_instance()
self.compute_api.get_diagnostics(self.context, instance)
self.compute_api.delete(self.context, instance)
def test_inject_file(self):
"""Ensure we can write a file to an instance"""
instance = self._create_fake_instance()
self.compute_api.inject_file(self.context, instance,
"/tmp/test", "File Contents")
db.instance_destroy(self.context, instance['uuid'])
def test_secgroup_refresh(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secgroup_refresh_once(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1, 2])
def test_secgroup_refresh_none(self):
def rule_get(*args, **kwargs):
mock_rule = FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = FakeModel({'instances': []})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secrule_refresh(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1])
def test_secrule_refresh_once(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
"args": {'instance': jsonutils.to_primitive(instance)},
"version":
compute_rpcapi.SecurityGroupAPI.BASE_RPC_API_VERSION})
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_secrule_refresh_none(self):
def group_get(*args, **kwargs):
mock_group = FakeModel({'instances': []})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_live_migrate(self):
instance, instance_uuid = self._run_instance()
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
host='fake_dest_host')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
db.instance_destroy(self.context, instance['uuid'])
def fake_rpc_method(context, topic, msg, do_cast=True):
pass
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0,
'availability_zone': avail_zone})
return values
class ComputeAPIAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.api."""
def setUp(self):
super(ComputeAPIAggrTestCase, self).setUp()
self.api = compute_api.AggregateAPI()
self.context = context.get_admin_context()
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.stubs.Set(rpc, 'cast', fake_rpc_method)
def test_create_invalid_availability_zone(self):
"""Ensure InvalidAggregateAction is raised with wrong avail_zone."""
self.assertRaises(exception.InvalidAggregateAction,
self.api.create_aggregate,
self.context, 'fake_aggr', 'fake_avail_zone')
def test_update_aggregate_metadata(self):
"""Ensure metadata can be updated"""
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2', }
aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
metadata)
metadata['foo_key1'] = None
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
self.assertDictMatch(expected['metadata'], {'foo_key2': 'foo_value2'})
def test_delete_aggregate(self):
"""Ensure we can delete an aggregate."""
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.api.delete_aggregate(self.context, aggr['id'])
expected = db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
"""Ensure InvalidAggregateAction is raised when non empty aggregate."""
_create_service_entries(self.context,
{'fake_availability_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_availability_zone')
self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
self.assertRaises(exception.InvalidAggregateAction,
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
"""Ensure we can add a host to an aggregate."""
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggregate_multiple(self):
"""Ensure we can add multiple hosts to an aggregate."""
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
def test_add_host_to_aggregate_zones_mismatch(self):
"""Ensure InvalidAggregateAction is raised when zones don't match."""
_create_service_entries(self.context, {'fake_zoneX': ['fake_host1'],
'fake_zoneY': ['fake_host2']})
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', 'fake_zoneY')
self.assertRaises(exception.InvalidAggregateAction,
self.api.add_host_to_aggregate,
self.context, aggr['id'], 'fake_host1')
def test_add_host_to_aggregate_raise_not_found(self):
"""Ensure ComputeHostNotFound is raised when adding invalid host."""
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
self.api.add_host_to_aggregate,
self.context, aggr['id'], 'invalid_host')
def test_remove_host_from_aggregate_active(self):
"""Ensure we can remove a host from an aggregate."""
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
"""Ensure ComputeHostNotFound is raised when removing invalid host."""
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
self.api.remove_host_from_aggregate,
self.context, aggr['id'], 'invalid_host')
class ComputeAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.manager."""
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
values = {'name': 'test_aggr',
'availability_zone': 'test_zone'}
self.aggr = db.aggregate_create(self.context, values)
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
fake_driver_add_to_aggregate.called = True
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, self.aggr.id, "host")
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
def fake_driver_remove_from_aggregate(context, aggregate, host,
**_ignore):
fake_driver_remove_from_aggregate.called = True
self.assertEqual("host", host, "host")
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
fake_driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context, self.aggr.id, "host")
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
self.assertEquals(aggregate.id, self.aggr.id)
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, self.aggr.id,
"the_host", slave_info="SLAVE_INFO")
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
self.assertEquals(aggregate.id, self.aggr.id)
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
self.aggr.id, "the_host", slave_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
def setUp(self):
super(ComputePolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.compute_api = compute.API()
def tearDown(self):
super(ComputePolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
common_policy.set_brain(common_policy.HttpBrain(rules))
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
self.mox.ReplayAll()
nova.compute.api.check_policy(self.context, 'reboot', {})
def test_wrapped_method(self):
instance = self._create_fake_instance(params={'host': None})
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.delete, self.context, instance)
# reset rules to allow deletion
rules = {"compute:delete": []}
self._set_rules(rules)
self.compute_api.delete(self.context, instance)
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1')
def test_create_attach_volume_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [["false:false"]],
"compute:create:attach_volume": [],
}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_create_attach_network_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [],
"compute:create:attach_volume": [["false:false"]],
}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_get_fail(self):
instance = self._create_fake_instance()
rules = {
"compute:get": [["false:false"]],
}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance['uuid'])
def test_get_all_fail(self):
rules = {
"compute:get_all": [["false:false"]],
}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
def test_get_instance_faults(self):
instance1 = self._create_fake_instance()
instance2 = self._create_fake_instance()
instances = [instance1, instance2]
rules = {
"compute:get_instance_faults": [["false:false"]],
}
self._set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
self.context, instances)
class ComputeHostAPITestCase(BaseTestCase):
def setUp(self):
super(ComputeHostAPITestCase, self).setUp()
self.host_api = compute_api.HostAPI()
def _rpc_call_stub(self, call_info):
def fake_rpc_call(context, topic, msg, timeout=None):
call_info['context'] = context
call_info['topic'] = topic
call_info['msg'] = msg
self.stubs.Set(rpc, 'call', fake_rpc_call)
def test_set_host_enabled(self):
ctxt = context.RequestContext('fake', 'fake')
call_info = {}
self._rpc_call_stub(call_info)
self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
self.assertEqual(call_info['msg'],
{'method': 'set_host_enabled',
'args': {'enabled': 'fake_enabled'},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_get_host_uptime(self):
ctxt = context.RequestContext('fake', 'fake')
call_info = {}
self._rpc_call_stub(call_info)
self.host_api.get_host_uptime(ctxt, 'fake_host')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
self.assertEqual(call_info['msg'],
{'method': 'get_host_uptime',
'args': {},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_host_power_action(self):
ctxt = context.RequestContext('fake', 'fake')
call_info = {}
self._rpc_call_stub(call_info)
self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
self.assertEqual(call_info['msg'],
{'method': 'host_power_action',
'args': {'action': 'fake_action'},
'version':
compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_set_host_maintenance(self):
ctxt = context.RequestContext('fake', 'fake')
call_info = {}
self._rpc_call_stub(call_info)
self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
self.assertEqual(call_info['msg'],
{'method': 'host_maintenance_mode',
'args': {'host': 'fake_host', 'mode': 'fake_mode'},
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
class KeypairAPITestCase(BaseTestCase):
def setUp(self):
super(KeypairAPITestCase, self).setUp()
self.keypair_api = compute_api.KeypairAPI()
self.ctxt = context.RequestContext('fake', 'fake')
self._keypair_db_call_stubs()
self.existing_key_name = 'fake existing key name'
self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf'
'/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR'
'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/'
'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu'
'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8'
'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK'
'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU'
'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz')
self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a'
def _keypair_db_call_stubs(self):
def db_key_pair_get_all_by_user(self, user_id):
return []
def db_key_pair_create(self, keypair):
pass
def db_key_pair_destroy(context, user_id, name):
pass
def db_key_pair_get(context, user_id, name):
if name == self.existing_key_name:
return {'name': self.existing_key_name,
'public_key': self.pub_key,
'fingerprint': self.fingerprint}
else:
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get)
def test_create_keypair(self):
keypair = self.keypair_api.create_key_pair(self.ctxt,
self.ctxt.user_id, 'foo')
self.assertEqual('foo', keypair['name'])
def test_create_keypair_name_too_long(self):
self.assertRaises(exception.InvalidKeypair,
self.keypair_api.create_key_pair,
self.ctxt, self.ctxt.user_id, 'x' * 256)
def test_create_keypair_invalid_chars(self):
self.assertRaises(exception.InvalidKeypair,
self.keypair_api.create_key_pair,
self.ctxt, self.ctxt.user_id, '* BAD CHARACTERS! *')
def test_create_keypair_already_exists(self):
self.assertRaises(exception.KeyPairExists,
self.keypair_api.create_key_pair,
self.ctxt, self.ctxt.user_id,
self.existing_key_name)
def test_create_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return FLAGS.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.create_key_pair,
self.ctxt, self.ctxt.user_id, 'foo')
def test_import_keypair(self):
keypair = self.keypair_api.import_key_pair(self.ctxt,
self.ctxt.user_id,
'foo',
self.pub_key)
self.assertEqual('foo', keypair['name'])
self.assertEqual(self.fingerprint, keypair['fingerprint'])
self.assertEqual(self.pub_key, keypair['public_key'])
def test_import_keypair_bad_public_key(self):
self.assertRaises(exception.InvalidKeypair,
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'foo', 'bad key data')
def test_import_keypair_name_too_long(self):
self.assertRaises(exception.InvalidKeypair,
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'x' * 256,
self.pub_key)
def test_import_keypair_invalid_chars(self):
self.assertRaises(exception.InvalidKeypair,
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id,
'* BAD CHARACTERS! *', self.pub_key)
def test_import_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return FLAGS.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'foo', self.pub_key)
def test_get_keypair(self):
keypair = self.keypair_api.get_key_pair(self.ctxt,
self.ctxt.user_id,
self.existing_key_name)
self.assertEqual(self.existing_key_name, keypair['name'])
class DisabledInstanceTypesTestCase(BaseTestCase):
"""
Some instance-types are marked 'disabled' which means that they will not
show up in customer-facing listings. We do, however, want those
instance-types to be availble for emergency migrations and for rebuilding
of existing instances.
One legitimate use of the 'disabled' field would be when phasing out a
particular instance-type. We still want customers to be able to use an
instance that of the old type, and we want Ops to be able perform
migrations against it, but we *don't* want customers building new slices
with ths phased-out instance-type.
"""
def setUp(self):
super(DisabledInstanceTypesTestCase, self).setUp()
self.compute_api = compute.API()
self.inst_type = instance_types.get_default_instance_type()
def test_can_build_instance_from_visible_instance_type(self):
self.inst_type['disabled'] = False
self.assertNotRaises(exception.InstanceTypeNotFound,
self.compute_api.create, self.context, self.inst_type, None,
exc_msg="Visible instance-types can be built from")
def test_cannot_build_instance_from_disabled_instance_type(self):
self.inst_type['disabled'] = True
self.assertRaises(exception.InstanceTypeNotFound,
self.compute_api.create, self.context, self.inst_type, None)
def test_can_rebuild_instance_from_visible_instance_type(self):
instance = self._create_fake_instance()
image_href = None
admin_password = 'blah'
instance['instance_type']['disabled'] = True
# Assert no errors were raised
self.assertNotRaises(None,
self.compute_api.rebuild, self.context, instance, image_href,
admin_password,
exc_msg="Visible instance-types can be rebuilt from")
def test_can_rebuild_instance_from_disabled_instance_type(self):
"""
A rebuild or a restore should only change the 'image',
not the 'instance_type'. Therefore, should be allowed even
when the slice is on disabled type already.
"""
instance = self._create_fake_instance()
image_href = None
admin_password = 'blah'
instance['instance_type']['disabled'] = True
# Assert no errors were raised
self.assertNotRaises(None,
self.compute_api.rebuild, self.context, instance, image_href,
admin_password,
exc_msg="Disabled instance-types can be rebuilt from")
def test_can_resize_to_visible_instance_type(self):
instance = self._create_fake_instance()
orig_get_instance_type_by_flavor_id =\
instance_types.get_instance_type_by_flavor_id
def fake_get_instance_type_by_flavor_id(flavor_id):
instance_type = orig_get_instance_type_by_flavor_id(flavor_id)
instance_type['disabled'] = False
return instance_type
self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id',
fake_get_instance_type_by_flavor_id)
# FIXME(sirp): for legacy this raises FlavorNotFound instead of
# InstanceTypeNot; we should eventually make it raise
# InstanceTypeNotFound for consistency.
self.assertNotRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, '4',
exc_msg="Visible flavors can be resized to")
def test_cannot_resize_to_disabled_instance_type(self):
instance = self._create_fake_instance()
orig_get_instance_type_by_flavor_id = \
instance_types.get_instance_type_by_flavor_id
def fake_get_instance_type_by_flavor_id(flavor_id):
instance_type = orig_get_instance_type_by_flavor_id(flavor_id)
instance_type['disabled'] = True
return instance_type
self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id',
fake_get_instance_type_by_flavor_id)
# FIXME(sirp): for legacy this raises FlavorNotFound instead of
# InstanceTypeNot; we should eventually make it raise
# InstanceTypeNotFound for consistency.
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, '4')
def test_can_migrate_to_visible_instance_type(self):
instance = self._create_fake_instance()
instance['instance_type']['disabled'] = False
# FIXME(sirp): for legacy this raises FlavorNotFound instead of
# InstanceTypeNot; we should eventually make it raise
# InstanceTypeNotFound for consistency.
self.assertNotRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, None,
exc_msg="Visible flavors can be migrated to")
def test_can_migrate_to_disabled_instance_type(self):
"""
We don't want to require a customers instance-type to change when ops
is migrating a failed server.
"""
instance = self._create_fake_instance()
instance['instance_type']['disabled'] = True
# FIXME(sirp): for legacy this raises FlavorNotFound instead of
# InstanceTypeNot; we should eventually make it raise
# InstanceTypeNotFound for consistency.
self.assertNotRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, None,
exc_msg="Disabled flavors can be migrated to")
class ComputeReschedulingTestCase(BaseTestCase):
"""Tests related to re-scheduling build requests"""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
self._reschedule = self._reschedule_partial()
def fake_update(*args, **kwargs):
self.updated_task_state = kwargs.get('task_state')
self.stubs.Set(self.compute, '_instance_update', fake_update)
def _reschedule_partial(self):
uuid = "12-34-56-78-90"
requested_networks = None
admin_password = None
injected_files = None
is_first_time = False
return functools.partial(self.compute._reschedule, self.context, uuid,
requested_networks, admin_password, injected_files,
is_first_time, request_spec=None, filter_properties={})
def test_reschedule_no_filter_properties(self):
"""no filter_properties will disable re-scheduling"""
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
"""no retry info will also disable re-scheduling"""
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
"""no request spec will also disable re-scheduling"""
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_success(self):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = {'instance_uuids': ['foo', 'bar']}
self.assertTrue(self._reschedule(filter_properties=filter_properties,
request_spec=request_spec))
self.assertEqual(1, len(request_spec['instance_uuids']))
self.assertEqual(self.updated_task_state, task_states.SCHEDULING)
class ThatsNoOrdinaryRabbitException(Exception):
pass
class ComputeReschedulingExceptionTestCase(BaseTestCase):
"""Tests for re-scheduling exception handling logic"""
def setUp(self):
super(ComputeReschedulingExceptionTestCase, self).setUp()
# cause _spawn to raise an exception to test the exception logic:
def exploding_spawn(*args, **kwargs):
raise ThatsNoOrdinaryRabbitException()
self.stubs.Set(self.compute, '_spawn',
exploding_spawn)
self.fake_instance = jsonutils.to_primitive(
self._create_fake_instance())
self.instance_uuid = self.fake_instance['uuid']
def test_exception_with_rescheduling_disabled(self):
"""Spawn fails and re-scheduling is disabled."""
# this won't be re-scheduled:
self.assertRaises(ThatsNoOrdinaryRabbitException,
self.compute._run_instance, self.context,
None, {}, None, None, None, None, self.fake_instance)
def test_exception_with_rescheduling_enabled(self):
"""Spawn fails and re-scheduling is enabled. Original exception
should *not* be re-raised.
"""
# provide the expected status so that this one will be re-scheduled:
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = dict(num_attempts=1)
self.assertNotRaises(ThatsNoOrdinaryRabbitException,
self.compute._run_instance, self.context,
filter_properties=filter_properties, request_spec=request_spec,
instance=self.fake_instance)
def test_exception_context_cleared(self):
"""Test with no rescheduling and an additional exception occurs
clearing the original build error's exception context.
"""
# clears the original exception context:
class FleshWoundException(Exception):
pass
def reschedule_explode(*args, **kwargs):
raise FleshWoundException()
self.stubs.Set(self.compute, '_reschedule', reschedule_explode)
# the original exception should now be raised:
self.assertRaises(ThatsNoOrdinaryRabbitException,
self.compute._run_instance, self.context,
None, {}, None, None, None, None, self.fake_instance)
|
apache-2.0
|
xzturn/tensorflow
|
tensorflow/python/distribute/parameter_server_strategy_test.py
|
3
|
33048
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ParameterServerStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
CHIEF = run_config.TaskType.CHIEF
WORKER = run_config.TaskType.WORKER
PS = run_config.TaskType.PS
def _get_replica_id_integer():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if isinstance(replica_id, ops.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id
def create_test_objects(cluster_spec=None,
task_type=None,
task_id=None,
num_gpus=None,
sess_config=None):
sess_config = sess_config or config_pb2.ConfigProto()
if num_gpus is None:
num_gpus = context.num_gpus()
if cluster_spec and task_type and task_id is not None:
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={'GPU': num_gpus})
distribution = parameter_server_strategy.ParameterServerStrategy(
cluster_resolver)
target = 'grpc://' + cluster_spec[WORKER][task_id]
else:
distribution = (
central_storage_strategy.CentralStorageStrategy._from_num_gpus(num_gpus)
)
target = ''
sess_config = copy.deepcopy(sess_config)
sess_config = distribution.update_config_proto(sess_config)
return distribution, target, sess_config
class ParameterServerStrategyTestBase(
multi_worker_test_base.MultiWorkerTestBase):
def setUp(self):
self._result = 0
self._lock = threading.Lock()
self._init_condition = threading.Condition()
self._init_reached = 0
self._finish_condition = threading.Condition()
self._finish_reached = 0
self._sess_config = config_pb2.ConfigProto(allow_soft_placement=True)
super(ParameterServerStrategyTestBase, self).setUp()
def _get_test_objects(self, task_type, task_id, num_gpus):
return create_test_objects(
cluster_spec=self._cluster_spec,
task_type=task_type,
task_id=task_id,
num_gpus=num_gpus,
sess_config=self._sess_config)
def _test_device_assignment_distributed(self, task_type, task_id, num_gpus):
worker_device = '/job:%s/replica:0/task:%d' % (task_type, task_id)
d, _, sess_config = self._get_test_objects(task_type, task_id, num_gpus)
with ops.Graph().as_default(), \
self.cached_session(target=self._default_target,
config=sess_config) as sess, \
d.scope():
# Define a variable outside the call_for_each_replica scope.
n = variable_scope.get_variable('n', initializer=10.0)
self.assertEqual(n.device, '/job:ps/task:0')
def model_fn():
if num_gpus == 0:
last_part_device = 'device:CPU:0'
else:
replica_id = _get_replica_id_integer()
last_part_device = ('device:GPU:%d' % replica_id)
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
self.assertEqual(a.device, worker_device + '/' + last_part_device)
self.assertEqual(b.device, worker_device + '/' + last_part_device)
self.assertEqual(c.device, worker_device + '/' + last_part_device)
# The device scope is ignored for variables but not for normal ops.
with ops.device('/job:worker/task:0'):
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
x_add = x.assign_add(c)
e = a + c
# The variable x is on the task 1 since the device_function has been
# called once before the model_fn.
self.assertEqual(x.device, '/job:ps/task:1')
self.assertEqual(x_add.device, x.device)
self.assertEqual(e.device,
'/job:worker/replica:0/task:0/%s' % last_part_device)
# The colocate_vars_with can override the distribution's device.
with d.extended.colocate_vars_with(x):
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We add an identity here to avoid complaints about summing
# non-distributed values.
y_add = y.assign_add(array_ops.identity(x_add))
self.assertEqual(y.device, '/job:ps/task:1')
self.assertEqual(y_add.device, y.device)
self.assertEqual(y.device, x.device)
z = variable_scope.get_variable(
'z', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(z.device, '/job:ps/task:0')
self.assertNotEqual(z.device, x.device)
with ops.control_dependencies([y_add]):
# We add an identity here to avoid complaints about summing
# non-distributed values.
z_add = z.assign_add(array_ops.identity(y))
with ops.control_dependencies([z_add]):
f = z + c
self.assertEqual(f.device, worker_device + '/' + last_part_device)
# The device scope would merge with the default worker device.
with ops.device('/CPU:1'):
g = e + 1.0
self.assertEqual(g.device, worker_device + '/device:CPU:1')
# Ths ops.colocate_with will be ignored when defining a variable but not
# for a normal tensor.
with ops.colocate_with(x):
u = variable_scope.get_variable('u', initializer=30.0)
v = variable_scope.get_variable('v', initializer=30.0)
h = f + 1.0
self.assertIn('/job:ps/', u.device)
self.assertIn('/job:ps/', v.device)
# u and v are on different parameter servers.
self.assertTrue(u.device != x.device or v.device != x.device)
self.assertTrue(u.device == x.device or v.device == x.device)
# Here h is not on one worker. Note h.device is canonical while x.device
# is not but.
self.assertIn('/job:ps/', h.device)
return y_add, z_add, f
y, z, f = d.extended.call_for_each_replica(model_fn)
self.assertNotEqual(y, None)
self.assertNotEqual(z, None)
self.assertNotEqual(f, None)
if context.num_gpus() >= 1 and num_gpus <= 1:
variables.global_variables_initializer().run()
y_val, z_val, f_val = sess.run([y, z, f])
self.assertEqual(y_val, 33.0)
self.assertEqual(z_val, 43.0)
self.assertEqual(f_val, 46.0)
def _test_device_assignment_distributed_enable_partitioner(
self, task_type, task_id, num_gpus):
d, _, sess_config = self._get_test_objects(task_type, task_id, num_gpus)
num_shards = len(d.extended.parameter_devices)
partitioner = partitioned_variables.fixed_size_partitioner(num_shards)
with ops.Graph().as_default(), \
self.cached_session(target=self._default_target,
config=sess_config) as sess, \
d.scope():
n = variable_scope.get_variable(
'n',
initializer=constant_op.constant([10.0, 20.0]),
aggregation=variable_scope.VariableAggregation.SUM,
partitioner=partitioner)
for part_id, var in enumerate(n):
self.assertEqual(var.device, '/job:ps/task:%d' % part_id)
def model_fn():
a = constant_op.constant([3.0, 5.0])
# The device scope is ignored for variables but not for normal ops.
with ops.device('/job:worker/task:0'):
x = variable_scope.get_variable(
'x',
initializer=constant_op.constant([10.0, 20.0]),
aggregation=variable_scope.VariableAggregation.SUM,
partitioner=partitioner)
x_add = x.assign_add(a, name='x_add')
# The variable x is on the task 1 since the device_function has been
# called once before the model_fn.
for part_id, var in enumerate(x):
self.assertEqual(var.device, '/job:ps/task:%d' % part_id)
self.assertEqual(var.device, x_add[part_id].device)
return x_add
x = d.extended.call_for_each_replica(model_fn)
if context.num_gpus() >= 1:
variables.global_variables_initializer().run()
x_val = sess.run(x)
if num_gpus < 1:
self.assertEqual(x_val, [13.0, 25.0])
else:
x_expect = [10.0 + 3 * num_gpus, 20.0 + 5 * num_gpus]
self.assertEqual(x_val, x_expect)
def _test_device_assignment_local(self,
d,
compute_device='CPU',
variable_device='CPU',
num_gpus=0):
with ops.Graph().as_default(), \
self.cached_session(target=self._default_target,
config=self._sess_config) as sess, \
d.scope():
def model_fn():
if 'CPU' in compute_device:
replica_compute_device = '/device:CPU:0'
else:
replica_id = _get_replica_id_integer()
replica_compute_device = ('/device:GPU:%d' % replica_id)
replica_compute_device = device_util.canonicalize(
replica_compute_device)
if 'CPU' in variable_device:
replica_variable_device = '/device:CPU:0'
else:
replica_id = _get_replica_id_integer()
replica_variable_device = ('/device:GPU:%d' % replica_id)
replica_variable_device = device_util.canonicalize(
replica_variable_device)
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = a + b
self.assertEqual(a.device, replica_compute_device)
self.assertEqual(b.device, replica_compute_device)
self.assertEqual(c.device, replica_compute_device)
# The device scope is ignored for variables but not for normal ops.
with ops.device('/device:GPU:2'):
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
x_add = x.assign_add(c)
e = a + c
self.assertEqual(
device_util.canonicalize(x.device), replica_variable_device)
self.assertEqual(x_add.device, x.device)
self.assertEqual(e.device, device_util.canonicalize('/device:GPU:2'))
# The colocate_vars_with can override the distribution's device.
with d.extended.colocate_vars_with(x):
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
# We add an identity here to avoid complaints about summing
# non-distributed values.
y_add = y.assign_add(array_ops.identity(x_add))
self.assertEqual(
device_util.canonicalize(y.device), replica_variable_device)
self.assertEqual(y_add.device, y.device)
self.assertEqual(y.device, x.device)
z = variable_scope.get_variable(
'z', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertEqual(
device_util.canonicalize(z.device), replica_variable_device)
with ops.control_dependencies([y_add]):
# We add an identity here to avoid complaints about summing
# non-distributed values.
z_add = z.assign_add(array_ops.identity(y))
with ops.control_dependencies([z_add]):
f = z + c
self.assertEqual(f.device, replica_compute_device)
# The device scope would merge with the default worker device.
with ops.device('/CPU:1'):
g = e + 1.0
self.assertEqual(g.device, device_util.canonicalize('/device:CPU:1'))
# Ths ops.colocate_with will be ignored when defining a variable but not
# for a normal tensor.
with ops.colocate_with(x):
u = variable_scope.get_variable('u', initializer=30.0)
h = f + 1.0
self.assertEqual(
device_util.canonicalize(u.device), replica_variable_device)
self.assertEqual(
device_util.canonicalize(x.device),
device_util.canonicalize(h.device))
return y_add, z_add, f
y, z, f = d.extended.call_for_each_replica(model_fn)
self.assertNotEqual(y, None)
self.assertNotEqual(z, None)
self.assertNotEqual(f, None)
if context.num_gpus() >= 1 and num_gpus <= 1:
variables.global_variables_initializer().run()
y_val, z_val, f_val = sess.run([y, z, f])
self.assertEqual(y_val, 33.0)
self.assertEqual(z_val, 43.0)
self.assertEqual(f_val, 46.0)
def _test_simple_increment(self, task_type, task_id, num_gpus):
d, master_target, sess_config = self._get_test_objects(
task_type, task_id, num_gpus)
if d.extended._cluster_spec:
num_workers = len(d.extended._cluster_spec.as_dict().get(WORKER))
if 'chief' in d.extended._cluster_spec.as_dict():
num_workers += 1
else:
num_workers = 1
with ops.Graph().as_default(), \
self.cached_session(target=master_target,
config=sess_config) as sess, \
d.scope():
def model_fn():
x = variable_scope.get_variable(
'x', initializer=10.0,
aggregation=variable_scope.VariableAggregation.SUM)
y = variable_scope.get_variable(
'y', initializer=20.0,
aggregation=variable_scope.VariableAggregation.SUM)
z = variable_scope.get_variable(
'z', initializer=30.0,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
# We explicitly make a constant tensor here to avoid complaints about
# summing non-distributed values.
one = constant_op.constant(1.0)
x_add = x.assign_add(one, use_locking=True)
y_add = y.assign_add(one, use_locking=True)
z_add = z.assign_add(one, use_locking=True)
train_op = control_flow_ops.group(x_add, y_add, z_add)
return x, y, z, train_op
x, y, z, train_op = d.extended.call_for_each_replica(model_fn)
train_op = d.group(train_op)
if task_id == 0:
variables.global_variables_initializer().run()
# Workers waiting for chief worker's initializing variables.
self._init_condition.acquire()
self._init_reached += 1
while self._init_reached != num_workers:
self._init_condition.wait()
self._init_condition.notify_all()
self._init_condition.release()
sess.run(train_op)
# Wait for other workers to finish training.
self._finish_condition.acquire()
self._finish_reached += 1
while self._finish_reached != num_workers:
self._finish_condition.wait()
self._finish_condition.notify_all()
self._finish_condition.release()
x_val, y_val, z_val = sess.run([x, y, z])
self.assertEqual(x_val, 10.0 + 1.0 * num_workers * d.num_replicas_in_sync)
self.assertEqual(y_val, 20.0 + 1.0 * num_workers * d.num_replicas_in_sync)
self.assertEqual(z_val, 30.0 + 1.0 * num_workers)
def _test_minimize_loss_graph(self, task_type, task_id, num_gpus):
d, master_target, sess_config = self._get_test_objects(
task_type, task_id, num_gpus)
if task_type:
# Multi-worker
assert hasattr(d.extended, '_cluster_spec') and d.extended._cluster_spec
num_workers = len(d.extended._cluster_spec.as_dict().get(WORKER))
if CHIEF in d.extended._cluster_spec.as_dict():
num_workers += 1
else:
# local
num_workers = 1
with ops.Graph().as_default(), \
self.cached_session(target=master_target,
config=sess_config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss_fn(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(yuefengz, apassos): eager.backprop.implicit_grad is not safe for
# multiple graphs (b/111216820).
def grad_fn(x):
loss = loss_fn(x)
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads = gradients.gradients(loss, var_list)
ret = list(zip(grads, var_list))
return ret
def update(v, g):
return v.assign_sub(0.05 * g, use_locking=True)
one = constant_op.constant([[1.]])
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
# TODO(yuefengz): support non-Mirrored variable as destinations.
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
before_out, after_out = step()
if (not task_type or
multi_worker_util.is_chief(
d.extended._cluster_spec, task_type, task_id)):
variables.global_variables_initializer().run()
# Workers waiting for chief worker's initializing variables.
self._init_condition.acquire()
self._init_reached += 1
while self._init_reached != num_workers:
self._init_condition.wait()
self._init_condition.notify_all()
self._init_condition.release()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_input_fn_iterator(self,
task_type,
task_id,
num_gpus,
input_fn,
expected_values,
test_reinitialize=True,
ignore_order=False):
distribution, master_target, config = self._get_test_objects(
task_type, task_id, num_gpus)
devices = distribution.extended.worker_devices
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess:
iterator = distribution.make_input_fn_iterator(input_fn)
sess.run(iterator.initializer)
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
# After re-initializing the iterator, should be able to iterate again.
if test_reinitialize:
sess.run(iterator.initializer)
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
class ParameterServerStrategyTest(
ParameterServerStrategyTestBase,
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2)
cls._default_target = 'grpc://' + cls._cluster_spec[WORKER][0]
@combinations.generate(combinations.combine(mode=['graph']))
def test_num_replicas_in_sync(self):
strategy, _, _ = create_test_objects(num_gpus=2)
# All the devices on a given worker are in sync which in this case is the
# number of gpus on each worker.
self.assertEqual(2, strategy.num_replicas_in_sync)
@combinations.generate(combinations.combine(mode=['graph']))
def testDeviceAssignmentLocalCPU(self):
strategy, _, _ = create_test_objects(num_gpus=0)
self._test_device_assignment_local(
strategy, compute_device='CPU', variable_device='CPU', num_gpus=0)
@combinations.generate(combinations.combine(mode=['graph']))
def testDeviceAssignmentLocalOneGPU(self):
strategy, _, _ = create_test_objects(num_gpus=1)
self._test_device_assignment_local(
strategy, compute_device='GPU', variable_device='GPU', num_gpus=1)
@combinations.generate(combinations.combine(mode=['graph']))
def testDeviceAssignmentLocalTwoGPUs(self):
strategy, _, _ = create_test_objects(num_gpus=2)
self._test_device_assignment_local(
strategy, compute_device='GPU', variable_device='CPU', num_gpus=2)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testDeviceAssignmentDistributed(self, num_gpus):
self._test_device_assignment_distributed('worker', 1, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testDeviceAssignmentDistributedEnablePartitioner(self, num_gpus):
self._test_device_assignment_distributed_enable_partitioner(
'worker', 1, num_gpus)
@combinations.generate(combinations.combine(mode=['graph']))
def testSimpleBetweenGraph(self):
self._run_between_graph_clients(self._test_simple_increment,
self._cluster_spec, context.num_gpus())
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testLocalSimpleIncrement(self, required_gpus):
self._test_simple_increment(None, 0, required_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testMinimizeLossGraphDistributed(self, required_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, required_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testMinimizeLossGraphLocal(self, required_gpus):
self._test_minimize_loss_graph(None, None, required_gpus)
# TODO(priyag): Refactor this and other multi worker tests.
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=[1, 2], use_dataset=[True, False]))
def testMakeInputFnIteratorDistributed(self, required_gpus, use_dataset):
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i + j
for j in range(required_gpus)]
for i in range(0, 100, required_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=required_gpus,
expected_num_input_pipelines=3,
expected_input_pipeline_id=1) # because task_id = 1
self._test_input_fn_iterator(
'worker',
1,
required_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=[1, 2], use_dataset=[True, False]))
def testMakeInputFnIteratorLocal(self, required_gpus, use_dataset):
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i + j
for j in range(required_gpus)]
for i in range(0, 100, required_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=required_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0) # only one worker and pipeline for local.
self._test_input_fn_iterator(
None,
None,
required_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset)
@combinations.generate(combinations.combine(mode=['graph']))
def testGlobalStepUpdate(self):
strategy, _, _ = create_test_objects()
self._test_global_step_update(strategy)
@combinations.generate(combinations.combine(mode=['graph']))
def testUpdateConfigProtoMultiWorker(self):
strategy, _, _ = create_test_objects(
cluster_spec=self._cluster_spec,
task_type='worker',
task_id=1,
num_gpus=2)
config_proto = config_pb2.ConfigProto(device_filters=['to_be_overridden'])
new_config = strategy.update_config_proto(config_proto)
# Verify device filters.
self.assertEqual(['/job:worker/task:1', '/job:ps'],
new_config.device_filters)
# Verify isolate_session_state
self.assertFalse(new_config.isolate_session_state)
@combinations.generate(combinations.combine(mode=['graph']))
def testUpdateConfigProtoLocal(self):
strategy, _, _ = create_test_objects(num_gpus=2)
config_proto = config_pb2.ConfigProto()
new_config = strategy.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInMultiWorkerMode(self):
strategy, _, _ = create_test_objects(
cluster_spec=self._cluster_spec,
task_type='worker',
task_id=1,
num_gpus=0)
self.assertTrue(strategy.extended._in_multi_worker_mode())
class ParameterServerStrategyWithChiefTest(ParameterServerStrategyTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2, has_chief=True)
cls._default_target = 'grpc://' + cls._cluster_spec[CHIEF][0]
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testSimpleBetweenGraph(self, required_gpus):
self._run_between_graph_clients(self._test_simple_increment,
self._cluster_spec, required_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(combinations.combine(mode=['graph']))
def testGlobalStepIsWrappedOnTwoGPUs(self):
strategy, _, _ = create_test_objects(num_gpus=2)
with ops.Graph().as_default(), strategy.scope():
created_step = training_util.create_global_step()
get_step = training_util.get_global_step()
self.assertEqual(created_step, get_step,
msg=('created_step %s type %s vs. get_step %s type %s' %
(id(created_step), created_step.__class__.__name__,
id(get_step), get_step.__class__.__name__)))
self.assertIs(values.AggregatingVariable, type(created_step))
self.assertIs(values.AggregatingVariable, type(get_step))
self.assertIs(strategy, created_step.distribute_strategy)
@combinations.generate(combinations.combine(mode=['graph']))
def testGlobalStepIsNotWrappedOnOneGPU(self):
strategy, _, _ = create_test_objects(num_gpus=1)
with ops.Graph().as_default(), strategy.scope():
created_step = training_util.create_global_step()
get_step = training_util.get_global_step()
self.assertEqual(created_step, get_step,
msg=('created_step %s type %s vs. get_step %s type %s' %
(id(created_step), created_step.__class__.__name__,
id(get_step), get_step.__class__.__name__)))
self.assertIs(resource_variable_ops.ResourceVariable, type(created_step))
self.assertIs(resource_variable_ops.ResourceVariable, type(get_step))
# All variables have an _distribute_strategy parameter. Only variable
# subclasses in distribution strategy expose it publicly.
self.assertFalse(hasattr(strategy, 'distribute_strategy'))
self.assertIs(strategy, created_step._distribute_strategy)
@combinations.generate(combinations.combine(mode=['graph'], required_gpus=2))
def testValueContainer(self):
strategy, _, _ = create_test_objects(num_gpus=2)
with ops.Graph().as_default(), strategy.scope():
def f():
with backprop.GradientTape() as tape:
v = variable_scope.get_variable('v', initializer=10.0)
_ = v * v
v, = tape.watched_variables()
w = strategy.extended.value_container(v)
self.assertIs(values.AggregatingVariable, type(w))
strategy.extended.call_for_each_replica(f)
class CentralStorageStrategyTest(strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager'],
required_gpus=2))
def testNumpyDataset(self):
strategy, _, _ = create_test_objects(num_gpus=2)
self._test_numpy_dataset(strategy)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testInMultiWorkerMode(self):
strategy, _, _ = create_test_objects(num_gpus=0)
self.assertFalse(strategy.extended._in_multi_worker_mode())
if __name__ == '__main__':
test.main()
|
apache-2.0
|
MITPERG/oilsands-mop
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/charsetgroupprober.py
|
2929
|
3791
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.