gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from mutagen import FileType, Tags, StreamInfo, PaddingInfo
from mutagen._constants import GENRES
from mutagen._util import cdata, insert_bytes, DictProxy, MutagenError, \
hashable, enum, get_size, resize_bytes, loadfile, convert_error
from mutagen._compat import (reraise, PY2, string_types, text_type, chr_,
iteritems, PY3, cBytesIO, izip, xrange)
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4NoTrackError(MP4StreamInfoError):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType']
@enum
class AtomDataType(object):
"""Enum for ``dataformat`` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
imageformat (`AtomDataType`): format of the image
(either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.imageformat == other.imageformat)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
dataformat (`AtomDataType`): format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.dataformat == other.dataformat and
self.version == other.version)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
if PY2:
return name
return name.decode("latin-1")
def _key2name(key):
if PY2:
return key
return key.encode("latin-1")
def _find_padding(atom_path):
# Check for padding "free" atom
# XXX: we only use them if they are adjacent to ilst, and only one.
# and there also is a top level free atom which we could use maybe..?
meta, ilst = atom_path[-2:]
assert meta.name == b"meta" and ilst.name == b"ilst"
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
return prev
except IndexError:
pass
try:
next_ = meta.children[index + 1]
if next_.name == b"free":
return next_
except IndexError:
pass
def _item_sort_key(key, value):
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr"]
order = dict(izip(order, xrange(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(value)), repr(value))
class MP4Tags(DictProxy, Tags):
r"""MP4Tags()
Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
* '\\xa9wrk' -- work
* '\\xa9mvn' -- movement
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Integer values:
* 'tmpo' -- tempo/BPM
* '\\xa9mvc' -- Movement Count
* '\\xa9mvi' -- Movement Index
* 'shwm' -- work/movement
* 'stik' -- Media Kind
* 'rtng' -- Content Rating
* 'tves' -- TV Episode
* 'tvsn' -- TV Season
* 'plID', 'cnID', 'geID', 'atID', 'sfID', 'cmID', 'akID' -- Various iTunes
Internal IDs
Others:
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def load(self, atoms, fileobj):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError as key:
raise MP4MetadataError(key)
free = _find_padding(path)
self._padding = free.datalength if free is not None else 0
ilst = path[-1]
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
self._render(key, value)
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
def _render(self, key, value):
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
render_args = self.__atoms[atom_name][2:]
else:
render_func = type(self).__render_text
render_args = []
return render_func(self, key, value, *render_args)
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, padding=None):
values = []
items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv))
for key, value in items:
try:
values.append(self._render(key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in iteritems(self._failed_atoms):
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if key in self:
assert _key2name(key) != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(key), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
try:
atoms = Atoms(filething.fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.__save(filething.fileobj, atoms, data, padding)
def __save(self, fileobj, atoms, data, padding):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data, padding)
else:
self.__save_existing(fileobj, atoms, path, data, padding)
def __save_new(self, fileobj, atoms, ilst_data, padding_func):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
path = atoms.path(b"moov")
offset = path[-1]._dataoffset
# ignoring some atom overhead... but we don't have padding left anyway
# and padding_size is guaranteed to be less than zero
content_size = get_size(fileobj) - offset
padding_size = -len(meta_data)
assert padding_size < 0
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
new_padding = min(0xFFFFFFFF, new_padding)
free = Atom.render(b"free", b"\x00" * new_padding)
meta = Atom.render(b"meta", meta_data + free)
if path[-1].name != b"udta":
# moov.udta not found -- create one
data = Atom.render(b"udta", meta)
else:
data = meta
insert_bytes(fileobj, len(data), offset)
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, len(data))
self.__update_offsets(fileobj, atoms, len(data), offset)
def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func):
# Replace the old ilst atom.
ilst = path[-1]
offset = ilst.offset
length = ilst.length
# Use adjacent free atom if there is one
free = _find_padding(path)
if free is not None:
offset = min(offset, free.offset)
length += free.length
# Always add a padding atom to make things easier
padding_overhead = len(Atom.render(b"free", b""))
content_size = get_size(fileobj) - (offset + length)
padding_size = length - (len(ilst_data) + padding_overhead)
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
# Limit padding size so we can be sure the free atom overhead is as we
# calculated above (see Atom.render)
new_padding = min(0xFFFFFFFF, new_padding)
ilst_data += Atom.render(b"free", b"\x00" * new_padding)
resize_bytes(fileobj, length, len(ilst_data), offset)
delta = len(ilst_data) - length
fileobj.seek(offset)
fileobj.write(ilst_data)
self.__update_parents(fileobj, path[:-1], delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
if delta == 0:
return
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for v in value:
try:
track, total = v
except TypeError:
raise ValueError
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_integer(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
if version != 0:
raise MP4MetadataValueError("unsupported version")
if flags not in (AtomDataType.IMPLICIT, AtomDataType.INTEGER):
raise MP4MetadataValueError("unsupported type")
if len(data) == 1:
value = cdata.int8(data)
elif len(data) == 2:
value = cdata.int16_be(data)
elif len(data) == 3:
value = cdata.int32_be(data + b"\x00") >> 8
elif len(data) == 4:
value = cdata.int32_be(data)
elif len(data) == 8:
value = cdata.int64_be(data)
else:
raise MP4MetadataValueError(
"invalid value size %d" % len(data))
values.append(value)
key = _name2key(atom.name)
self.__add(key, values)
def __render_integer(self, key, value, min_bytes):
assert min_bytes in (1, 2, 4, 8)
data_list = []
try:
for v in value:
# We default to the int size of the usual values written
# by itunes for compatibility.
if cdata.int8_min <= v <= cdata.int8_max and min_bytes <= 1:
data = cdata.to_int8(v)
if cdata.int16_min <= v <= cdata.int16_max and min_bytes <= 2:
data = cdata.to_int16_be(v)
elif cdata.int32_min <= v <= cdata.int32_max and \
min_bytes <= 4:
data = cdata.to_int32_be(v)
elif cdata.int64_min <= v <= cdata.int64_max and \
min_bytes <= 8:
data = cdata.to_int64_be(v)
else:
raise MP4MetadataValueError(
"value out of range: %r" % value)
data_list.append(data)
except (TypeError, ValueError, cdata.error) as e:
raise MP4MetadataValueError(e)
return self.__render_data(key, 0, AtomDataType.INTEGER, data_list)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [chr_(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, string_types):
value = [value]
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
try:
v = v.decode("utf-8")
except (AttributeError, UnicodeDecodeError) as e:
raise TypeError(e)
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"plID": (__parse_integer, __render_integer, 8),
b"cnID": (__parse_integer, __render_integer, 4),
b"geID": (__parse_integer, __render_integer, 4),
b"atID": (__parse_integer, __render_integer, 4),
b"sfID": (__parse_integer, __render_integer, 4),
b"cmID": (__parse_integer, __render_integer, 4),
b"akID": (__parse_integer, __render_integer, 1),
b"tvsn": (__parse_integer, __render_integer, 4),
b"tves": (__parse_integer, __render_integer, 4),
b"tmpo": (__parse_integer, __render_integer, 2),
b"\xa9mvi": (__parse_integer, __render_integer, 2),
b"\xa9mvc": (__parse_integer, __render_integer, 2),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"shwm": (__parse_integer, __render_integer, 1),
b"stik": (__parse_integer, __render_integer, 1),
b"rtng": (__parse_integer, __render_integer, 1),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
def to_line(key, value):
assert isinstance(key, text_type)
if isinstance(value, text_type):
return u"%s=%s" % (key, value)
return u"%s=%r" % (key, value)
values = []
for key, value in sorted(iteritems(self)):
if not isinstance(key, text_type):
key = key.decode("latin-1")
if key == "covr":
values.append(u"%s=%s" % (key, u", ".join(
[u"[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
for v in value:
values.append(to_line(key, v))
else:
values.append(to_line(key, value))
return u"\n".join(values)
class MP4Info(StreamInfo):
"""MP4Info()
MPEG-4 stream information.
Attributes:
bitrate (`int`): bitrate in bits per second, as an int
length (`float`): file length in seconds, as a float
channels (`int`): number of audio channels
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): bits per sample
codec (`mutagen.text`):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g.
``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
codec_description (`mutagen.text`):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might
change in the future, use for display purposes only.
"""
bitrate = 0
length = 0.0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_description = u""
def __init__(self, *args, **kwargs):
if args or kwargs:
self.load(*args, **kwargs)
@convert_error(IOError, MP4StreamInfoError)
def load(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4NoTrackError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""MP4(filething)
An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
Arguments:
filething (filething)
Attributes:
info (`MP4Info`)
tags (`MP4Tags`)
"""
MP4Tags = MP4Tags
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
@loadfile()
def load(self, filething):
fileobj = filething.fileobj
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.info = MP4Info()
try:
self.info.load(atoms, fileobj)
except MP4NoTrackError:
pass
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
self._padding = 0
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
else:
self._padding = self.tags._padding
def save(self, *args, **kwargs):
"""save(filething=None, padding=None)"""
super(MP4, self).save(*args, **kwargs)
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = MP4(filething)
filething.fileobj.seek(0)
t.delete(filething)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'chris'
__version__ = '05.15'
import xml.etree.ElementTree as ET
from collections import OrderedDict as OD
from copy import deepcopy
from re import findall
from json import loads
bib = {}
retr = [28, 8, 18, 20, 43, 97]
def sanitize(texs, istex=None):
"""
Escapes any colliding characters with LaTeX, correct stupid unicode
characters, and do a clean-up on some other user/system errors.
:param texs: str abstract (or any) text in unicode
:return: str sanitized text ready for LaTeX compiling
"""
# LaTeX collides
cust = {'{': u'\{',
'}': u'\}',
" ''": u" ``",
" '": u" `",
"&": u"\&",
"~": u"\~",
"\%": u"%",
"%": u"\%", # wtf?
"_": u"\_",
# crappy mac unicode stuff
u'\u00A0': u" ",
u'\u00AD': u"",
# system errors
"&": u"&",
"amp;": u''
}
for orig, repl in cust.iteritems():
texs = texs.replace(orig, repl)
if not istex: # text only
texs = texs.replace('\n', ' ')
texs = token_url(texs, True)
return texs
def token_url(text, fn=False):
"""
Recognizes URLs in text and format them so that they will be
placed in a footnote underneath the abstract. It also makes sure
that certain stylistic clashes are omitted, for example a colon
before the footnote number.
:param text: str unicode text
:return: str with footnoted URLs
"""
# TODO: smarter handling of footnote
urls = findall(' ?(?:(?:http|ftp|https):\/\/[a-z./0-9%]|\/?www).*(?:\.[a-z]{2,5}|\/)(?: |\n|\.|\)|,|$)', text)
if urls:
for u in urls:
lm = u[len(u)-1:] # try to trim stuff before the URL
u = u[:-1] if (lm == '.' or lm == ')' or lm == ',') else u # trim
text = text.replace(u, ("\\footnote{" if fn else '') + " \\url{"+u+"}" + ("}" if fn else '')) # insert footnote
if fn:
burls = findall('(?:,|\.)' + ('\\\\footnote\{' if fn else '') + ' \\\\url\{.*\}' + ('\}' if fn else ''), text)
if burls: # if , or . before footnote, switch them
for bu in burls:
text = text.replace(bu, bu[1:]+bu[0])
return text
def format_ref(refs, label):
"""
Given a string with references able to be splitted by newline,
adds to global bib a tuple with a unique id, and the label it
will use to refer to the page it's mentioned on, as well as a
cleaned out version of the references. Custom part had to be
implemented because one of the references was split up by
newlines within references.
:param refs: str unicode text snippet with \n splittable refs
:param label: here sec:title is used to pageref to the abstract
:return: None (adds to global bib)
"""
global bib
refs = refs.split('\n')
# TODO: find a way to handle the custom line better
refs = list(' '.join(refs)) if 'Ramage' in ' '.join(refs) else refs # custom!
for n in refs:
if len(n) > 10:
n = n[1:] if n.startswith(' ') else n
n = token_url(n)
bib[(hash(n), label)] = n
def format_text(text, title):
"""
Finds the boundary between the list of references and the
abstract text, will provide a label for the abstract, and
pipe the found references towards format_ref function.
:param text: the abstracts text, including refs
:param title: the title of the abstract
:return: str unicode with the abstract text and label, no refs
"""
ref = findall(r'\n(?:[Rr]eference)|(?:REFERENCE)[sS]?[:]?', text)
brf = findall(r'\n\[[0-9]\]', text)
label = 'tit:' + str(hash(title))
if brf or ref:
tl = text.split((brf[-1:] if (brf and not ref) else ref[-1:])[0])
text, ref = tl[0], sanitize(tl[1], True)
format_ref(ref, label)
return '\\noindent '+sanitize(text)+'\n'+'\\label{'+label+'}'
def format_toc(tit, name_l):
"""
Accepts title and list of tuples with names from the authors
of the abstract, and will convert these into a formatted unicode
LaTeX toc entry.
:param tit: str unicode abstract title (no linebreaks)
:param name_l: list str with authors
:return: str unicode ToC entry for the abstract
"""
# TODO: refactor this function to look more like the one on fazzeh.github.io
aut = ', '.join([('\\newline ' if (name_l.index(n) == 4 and len(', '.join([n[0]+' '+n[1] for n in name_l[:3]]))
< 72) else '') + n[0]+' '+n[1] for n in name_l])
aut = aut.split(' \\newline')
tit = tit.replace('\\\\ ', '')
tit = "\\addcontentsline{toc}{section}{\\emph{" + tit + "}} \n" + \
"\\addtocontents{toc}{\\protect\\vspace{0.2em}} \n" + \
"\\addtocontents{toc}{" + aut[0] + " \\hfill" + ("\\newline" if len(aut) > 1 else '') + "} \n"
if len(aut) > 1:
tit += "\\addtocontents{toc}{\\hspace*{1.2em}" + aut[1] + "} \n"
tit += "\\addtocontents{toc}{\\protect\\vspace{1em}} \n"
return tit
def check_prio(tl):
"""
Checks if there is a character in the title which has priority
as being a split marker.
:param tl: str unicode title of the abstract
:return: int index of the priority if exists, else None
"""
mark = [':' if ':' in i else False or '(' if '(' in i else False for i in tl]
if '(' in mark:
if mark.index('(') > 2:
return mark.index('(')
elif ':' in mark:
if mark.index(':') > 2:
return mark.index(':')+1
# TODO: check if for each function it is mentioned if they output LaTeX
def format_title(title):
"""
Will _try_ to make an intelligent decision on where to split
the inputted title. Please be aware that this is font/point/
page specific as it will incorporate length. 62 < len(title)
< 96. Will try to figure out some variables for this.
:param title: str title without any breaks (yet)
:return: 'intelligently' splitted title for LaTeX
"""
# TODO: try to make 62 < x < 96 user handable, check page 41
newline_indicators = ['and', 'the', 'a', 'for', 'in']
title = sanitize(title)
if 62 < len(title) < 96:
title_list = title.split()
title_list.insert(len(title_list)/2 if not check_prio(title_list) else check_prio(title_list), '\\\\')
for word in list(set(title_list) & set(newline_indicators)):
if title_list.index(word)-title_list.index('\\\\') == 1 and ':' not in title_list[title_list.index(word)-2]:
a, b = title_list.index(word), title_list.index('\\\\')
title_list[a], title_list[b] = title_list[b], title_list[a]
title = ' '.join(title_list)
if title[-1:] == '.':
title = title[:-1]
return title
def lower_dutch_prep(surename):
"""
Converts 'incorrectly' capitalized Dutch surenames to a more
conistent format.
:param surename:
:return:
"""
prep_list = {'Van ': 'van ', 'Den ': 'den ', 'Der ': 'der ', 'De ': 'de ',
'Het ': 'het ', "'T ": "'t ", 'Des ': 'des ', 'Op ': 'op '}
for ini in prep_list.iterkeys():
if ini in surename:
surename = surename.replace(ini, prep_list[ini])
return surename
def format_name(name):
"""
Joins and formats name with index. Might be superflous.
:param name: tup with (first, last) name.
:return: str joined name with index reference
"""
st_name = name[0]+' '+name[1]
return st_name + " \\index{" + name[1] + ", " + name[0] + "} "
def author_tab(bottom=False):
"""
Outputs table that is utilized to properly format the list
of authors, neatly centered, in the abstract.
:param bottom: bool want the bottom part of the table?
:return: str part of the author table
"""
return '''
\\begin{table}[t!]
\\makebox[\\textwidth]{
\\centering
\\begin{tabular}{---al---}
---ta--- ''' if not bottom else '''
\\end{tabular} }
\\end{table} '''
def format_table(namel, afil, maill):
"""
Pretty elaborate function that determines how to format the
tables used to format the auhtor lists. In this format, it
is chosen to keep positioning the authors with two in a row,
until there is only one left, that one will be centered in
a new table with an allignment of only one {c}.
:param namel: list with full author names in str format
:param afil: list with author affiliations in str format
:param maill: list with contact emails in str format
:return: one (in case of 1-2 authors) or two (>2) author
LaTeX tables
"""
ltab = []
while len(namel) > 0:
ntab = deepcopy(author_tab())
if len(namel) == 1:
if len(ltab) != 0:
ntab = author_tab(True) + ntab
ntab += author_tab(True)
name_e = format_name(namel.pop(0))
ta = "%s \\\\ {%s} \\\\ {\\texttt{%s}} \\\\" % (name_e, afil.pop(0), maill.pop(0))
al = 'c'
else:
name_e1 = format_name(namel.pop(0))
name_e2 = format_name(namel.pop(0))
ta = "%s & %s \\\\ {%s} & {%s} \\\\ {\\texttt{%s}} & {\\texttt{%s}} \\\\" % \
(name_e1, name_e2, afil.pop(0), afil.pop(0), maill.pop(0), maill.pop(0))
al = 'cc'
if al == 'cc' and len(ltab) >= 1:
ntab = " & \\\\ \n "+ta
else:
ntab = ntab.replace('---ta---', ta)
ntab = ntab.replace('---al---', al)
ltab.append(ntab)
if '\\end{table}' not in ltab[len(ltab)-1]:
ltab.append(author_tab(True))
return '\n'.join(ltab)
# TODO: make the two tables as in these functions
def agt(a, c):
"""
Formats the table for the agenda, in landscape, and uses set-
lenght to center.
:param a: str the alignments and borders for the table in
LaTeX format
:param c: list of str with table lines constructed in
get_agenda
:return: str LaTeX table with inserted content
"""
return '''
\\begin{landscape}
\\begin{centering}
\\begingroup
\\setlength{\LTleft}{-20cm plus -1fill}
\\setlength{\LTright}{\LTleft}
\\footnotesize
\\begin{longtable}{%s}
\\toprule
%s
\\bottomrule
\\end{longtable}
\\endgroup
\\end{centering}
\\end{landscape}''' % (a, c)
# TODO: it might a nice idea to store the LaTeX commands in a function to call in python
def get_agenda(d):
"""
This will construct a conference programme using a dict with
submissions id, author and titles according to the specified
ordering in agenda.json. In the newest version, HTML is also
incorporated which makes it an ugly-ass function, please make
sure to generalize and split into two seperate output functions,
using markdown as a base or something.
:param d: dict with int(id): tup(authors, title)
:return: str agenda in a LaTeX table
"""
# TODO: try to generalize this stuff to be LaTeX unspecific
lin, ltml = list(), list()
with open('agenda.json', 'r') as f:
l = loads(f.read())
for entry in sorted(l["agenda"]):
event = l["agenda"][entry]
if "what" in event: # Plenary
lin.append('\\midrule \n %s & \\multicolumn{5}{l}{\\textbf{%s:} %s %s in %s} \\\\' % (event["time"], event["name"], event["what"], event["by"], event["room"]))
ltml.append('<tr><td>%s</td><td colspan="5"><b>%s</b>: %s %s in %s</td></tr> \n' % (event["time"], event["name"], event["what"], event["by"], event["room"]))
elif "sessions" in event: # Talks
namel, rooml, block_m = [], [], []
for s in event["sessions"]:
inf = event["sessions"][s]
namel.append(inf["name"])
rooml.append(inf["room"])
block_m.append(inf["blocks"])
lin.append('\\midrule \n \\textbf{%s} & %s \\\\ \n %s & \\textbf{%s} \\\\' % (event["name"], ' & '.join(rooml), event["time"], '} & \\textbf{'.join(namel)))
ltml.append('<tr><td><b>%s</b></td><td>%s</td></tr>\n<tr><td>%s</td><td><b>%s</b></td></tr> \n' % (event["name"], '</td><td>'.join(rooml), event["time"], '</b></td><td><b>'.join(namel)))
for i in range(0, len(block_m)-1):
lin.append('\\midrule\n')
row = [' \\newline '.join(d[block_m[j][i]]) for j in range(0, len(block_m))]
rtml = [' <br/> '.join(d[block_m[j][i]]) for j in range(0, len(block_m))]
ctml = [x.replace('\\textbf{', '<a href="abstracts#'+str(block_m[j][i])+'">').replace('}', '</a>') for x in rtml]
lin.append(' & %s \\\\' % ' & '.join(row))
ltml.append('<tr><td></td><td>%s</td></tr> \n' % '</td><td>'.join(ctml))
else: # Break etc.
lin.append('\\midrule \n %s & \\textbf{%s} \\\\' % (event["time"], event["name"]))
ltml.append('<tr><td>%s</td><td><b>%s</b></td></tr> \n' % (event["time"], event["name"]))
agd_to_html(['<table>\n']+ltml+['</table>'])
return agt('lp{3.5cm}p{3.5cm}p{3.5cm}p{3.5cm}p{3.5cm}', '\n'.join(lin))
def get_refs():
"""
Constructs the references from an itemized LaTeX list with
pagerefs and the _raw_ references extracted from the
abstracts in format_text, put in bib.
:return: str LaTeX list of references
"""
global bib
bib = OD(sorted(bib.items(), key=lambda (k, v): v))
bibt = '\\chapter*{References} \n\\begin{itemize} \n'
for tup, cit in bib.iteritems():
bibt += '\\item[\\pageref{'+tup[1]+'}] '+cit+'\n'
bibt += '\n \\end{itemize}'
return bibt
# TODO: check if this namel tuple makes any sense (used more as string than tuple?)
def clean_info(title, namel):
"""
This will format the title and authors for the conference
programme in the desired format.
:param title: str abstract title
:param namel: str author names
:return: str LaTeX conference programme info
"""
if len(namel) > 5:
namel = namel[:5]
namel.append('et. al')
namel = ', '.join(namel)
title = '\\textbf{'+title.replace(' \\\\ ', ' ')+'}'
return title, namel
def tex(ti, tr, na, te):
"""
This is the main building table for each abstract, being a
centered figure with a title, a block for page and ToC
reference, a table of authors, and the content of the
abstract.
:param ti: str title with // for breaks
:param tr: str list with pagerefs and custom
toc entries from format_toc
:param na: str table with authors from format_table
:param te: str sanitized text from format_tex
:return: str abstract page in LaTeX
"""
return '''
\\newpage
\\begin{figure}[t!]
\\centering
\\large\\textbf{%s}
\\vspace*{0.5cm}
\\end{figure}
%s
%s
%s ''' % (ti, tr, na, te)
def divide_abstracts(ad):
key, pres, demo, post = [], [], [], []
for v in OD(sorted(ad.items(), key=lambda t: t[0])).itervalues():
if 'keynote' in v[0]:
key.append(v[1])
elif 'presentation' in v[0]:
pres.append(v[1])
elif 'demo' in v[0]:
demo.append(v[1])
elif 'poster' in v[0]:
post.append(v[1])
return key, pres, demo, post
def agd_to_html(lin):
o = open('./abstracts.html', 'ab+')
o.write('\n'.join(lin).encode('utf-8'))
def html_abst(aid, title, authors, abstract):
return """
<a name="%s">
<div style="background-color: #411939; color: white; padding-left: 5px;">
<h4>%s</h4>
%s
</div>
%s \n\n""" % (aid, title, authors, abstract)
def xml_to_html(d):
d = OD(sorted(d.items(), key=lambda (k, v): v[0]))
o = open('./abstracts.html', 'ab+')
for aid, infl in d.iteritems():
o.write(html_abst(str(aid), infl[0], ', '.join(infl[1]), infl[2]).encode("utf-8"))
def parse_one(submission):
submission_id = int(submission.attrib['id'])
# keywords = [k.text for k in submission[1]]
decision = submission[3].text
title = format_title(submission[0].text)
names = [(sanitize(entry[0].text), lower_dutch_prep(entry[1].text)) for entry in submission[4]]
namel = [(sanitize(entry[0].text) + ' ' + lower_dutch_prep(entry[1].text)) for entry in submission[4]]
afilliations = [sanitize(entry[3].text) for entry in submission[4]]
mails = [(sanitize(entry[2].text) if entry[2].text else '') for entry in submission[4]]
abstract = tex(title, format_text(submission[2].text, title),
format_toc(title, names), format_table(names, afilliations, mails))
# abstract_dict[submission[0].text] = (decision, abstract)
# agenda_dict[submission_id] = (clean_info(title, namel)) # TODO: clean that out earlier
# html_dict[submission_id] = [submission[0].text, namel, submission[2].text.replace('\n', '<br/>')]
def main():
tree = ET.parse('abstracts.xml')
submissions = tree.getroot()
abstract_dict, agenda_dict, html_dict = {}, {}, {}
for submission in submissions:
if not 'REJECT' in submission[3].text and int(submission.attrib['id']) not in retr:
parse_one(submission)
key, pres, demo, post = divide_abstracts(abstract_dict)
with open('./tex/bos_i.tex', 'r') as i:
o = open('./tex/bos_o.tex', 'w')
i = i.read()
i = i.replace('% agenda', get_agenda(agenda_dict).encode("utf-8"))
i = i.replace('% keynote', '\n'.join(key).encode("utf-8"))
i = i.replace('% presentations', '\n'.join(pres).encode("utf-8"))
i = i.replace('% demos', '\n'.join(demo).encode("utf-8"))
i = i.replace('% posters', '\n'.join(post).encode("utf-8"))
i = i.replace('% refl', get_refs().encode("utf-8"))
o.write(i)
o.close()
xml_to_html(html_dict)
|
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pysnmp/license.html
#
import sys
from pyasn1.codec.ber import decoder
from pyasn1.codec.ber import eoo
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
from pysnmp import debug
from pysnmp.proto import api
from pysnmp.proto import errind
from pysnmp.proto import error
from pysnmp.proto import rfc1905
from pysnmp.proto import rfc3411
from pysnmp.proto.mpmod.base import AbstractMessageProcessingModel
# API to rfc1905 protocol objects
pMod = api.PROTOCOL_MODULES[api.SNMP_VERSION_2C]
# SNMPv3 message format
class ScopedPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contextEngineId', univ.OctetString()),
namedtype.NamedType('contextName', univ.OctetString()),
namedtype.NamedType('data', rfc1905.PDUs()))
class ScopedPduData(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('plaintext', ScopedPDU()),
namedtype.NamedType('encryptedPDU', univ.OctetString()))
class HeaderData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'msgID', univ.Integer().subtype(
subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType(
'msgMaxSize', univ.Integer().subtype(
subtypeSpec=constraint.ValueRangeConstraint(484, 2147483647))),
namedtype.NamedType(
'msgFlags', univ.OctetString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, 1))),
# NOTE (etingof): constrain SNMPv3 message to only USM+ security models
# because SNMPv1/v2c seems incompatible in pysnmp implementation, not sure
# if it's intended by the SNMP standard at all...
namedtype.NamedType(
'msgSecurityModel', univ.Integer().subtype(
subtypeSpec=constraint.ValueRangeConstraint(3, 2147483647))))
class SNMPv3Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'msgVersion', univ.Integer().subtype(
subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgGlobalData', HeaderData()),
namedtype.NamedType('msgSecurityParameters', univ.OctetString()),
namedtype.NamedType('msgData', ScopedPduData()))
# XXX move somewhere?
_snmpErrors = {
(1, 3, 6, 1, 6, 3, 15, 1, 1, 1, 0): errind.unsupportedSecurityLevel,
(1, 3, 6, 1, 6, 3, 15, 1, 1, 2, 0): errind.notInTimeWindow,
(1, 3, 6, 1, 6, 3, 15, 1, 1, 3, 0): errind.unknownUserName,
(1, 3, 6, 1, 6, 3, 15, 1, 1, 4, 0): errind.unknownEngineID,
(1, 3, 6, 1, 6, 3, 15, 1, 1, 5, 0): errind.wrongDigest,
(1, 3, 6, 1, 6, 3, 15, 1, 1, 6, 0): errind.decryptionError
}
class SnmpV3MessageProcessingModel(AbstractMessageProcessingModel):
MESSAGE_PROCESSING_MODEL_ID = univ.Integer(3) # SNMPv3
SNMP_MSG_SPEC = SNMPv3Message
_emptyStr = univ.OctetString('')
_msgFlags = {
0: univ.OctetString('\x00'),
1: univ.OctetString('\x01'),
3: univ.OctetString('\x03'),
4: univ.OctetString('\x04'),
5: univ.OctetString('\x05'),
7: univ.OctetString('\x07')
}
def __init__(self):
AbstractMessageProcessingModel.__init__(self)
self._scopedPDU = ScopedPDU()
self._engineIdCache = {}
self._engineIdCacheExpQueue = {}
self._expirationTimer = 0
def getPeerEngineInfo(self, transportDomain, transportAddress):
k = transportDomain, transportAddress
if k in self._engineIdCache:
return (self._engineIdCache[k]['securityEngineId'],
self._engineIdCache[k]['contextEngineId'],
self._engineIdCache[k]['contextName'])
else:
return None, None, None
# 7.1.1a
def prepareOutgoingMessage(self, snmpEngine, transportDomain,
transportAddress, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion,
pdu, expectResponse, sendPduHandle):
mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
snmpEngineID, = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
snmpEngineID = snmpEngineID.syntax
# 7.1.1b
msgID = self._cache.newMsgID()
debug.logger & debug.FLAG_MP and debug.logger(
'prepareOutgoingMessage: new msgID %s' % msgID)
k = transportDomain, transportAddress
if k in self._engineIdCache:
peerSnmpEngineData = self._engineIdCache[k]
else:
peerSnmpEngineData = None
debug.logger & debug.FLAG_MP and debug.logger(
'prepareOutgoingMessage: peer SNMP engine data %s '
'for transport %s, address '
'%s' % (peerSnmpEngineData, transportDomain, transportAddress))
# 7.1.4
if contextEngineId is None:
if peerSnmpEngineData is None:
contextEngineId = snmpEngineID
else:
contextEngineId = peerSnmpEngineData['contextEngineId']
# Defaulting contextEngineID to securityEngineId should
# probably be done on Agent side (see 7.1.3.d.2,) so this
# is a sort of workaround.
if not contextEngineId:
contextEngineId = peerSnmpEngineData['securityEngineId']
# 7.1.5
if not contextName:
contextName = self._emptyStr
debug.logger & debug.FLAG_MP and debug.logger(
'prepareOutgoingMessage: using contextEngineId %r, contextName '
'%r' % (contextEngineId, contextName))
# 7.1.6
scopedPDU = self._scopedPDU
scopedPDU.setComponentByPosition(0, contextEngineId)
scopedPDU.setComponentByPosition(1, contextName)
scopedPDU.setComponentByPosition(2)
scopedPDU.getComponentByPosition(2).setComponentByType(
pdu.tagSet, pdu, verifyConstraints=False, matchTags=False,
matchConstraints=False)
# 7.1.7
msg = self._snmpMsgSpec
# 7.1.7a
msg.setComponentByPosition(
0, self.MESSAGE_PROCESSING_MODEL_ID, verifyConstraints=False,
matchTags=False, matchConstraints=False)
headerData = msg.setComponentByPosition(1).getComponentByPosition(1)
# 7.1.7b
headerData.setComponentByPosition(
0, msgID, verifyConstraints=False, matchTags=False,
matchConstraints=False)
snmpEngineMaxMessageSize, = mibBuilder.importSymbols(
'__SNMP-FRAMEWORK-MIB', 'snmpEngineMaxMessageSize')
# 7.1.7c
# XXX need to coerce MIB value as it has incompatible constraints set
headerData.setComponentByPosition(
1, snmpEngineMaxMessageSize.syntax, verifyConstraints=False,
matchTags=False, matchConstraints=False)
# 7.1.7d
msgFlags = 0
if securityLevel == 1:
pass
elif securityLevel == 2:
msgFlags |= 0x01
elif securityLevel == 3:
msgFlags |= 0x03
else:
raise error.ProtocolError(
'Unknown securityLevel %s' % securityLevel)
if pdu.tagSet in rfc3411.CONFIRMED_CLASS_PDUS:
msgFlags |= 0x04
headerData.setComponentByPosition(
2, self._msgFlags[msgFlags], verifyConstraints=False,
matchTags=False, matchConstraints=False)
# 7.1.7e
# XXX need to coerce MIB value as it has incompatible constraints set
headerData.setComponentByPosition(3, int(securityModel))
debug.logger & debug.FLAG_MP and debug.logger(
'prepareOutgoingMessage: %s' % (msg.prettyPrint(),))
if securityModel in snmpEngine.securityModels:
smHandler = snmpEngine.securityModels[securityModel]
else:
raise error.StatusInformation(
errorIndication=errind.unsupportedSecurityModel)
# 7.1.9.a
if pdu.tagSet in rfc3411.UNCONFIRMED_CLASS_PDUS:
securityEngineId = snmpEngineID
else:
if peerSnmpEngineData is None:
debug.logger & debug.FLAG_MP and debug.logger(
'prepareOutgoingMessage: peer SNMP engine is not known')
securityEngineId = None
else:
securityEngineId = peerSnmpEngineData['securityEngineId']
debug.logger & debug.FLAG_MP and debug.logger(
'prepareOutgoingMessage: securityModel %r, securityEngineId %r, '
'securityName %r, securityLevel '
'%r' % (securityModel, securityEngineId, securityName, securityLevel))
# 7.1.9.b
securityParameters, wholeMsg = smHandler.generateRequestMsg(
snmpEngine, self.MESSAGE_PROCESSING_MODEL_ID, msg,
snmpEngineMaxMessageSize.syntax, securityModel,
securityEngineId, securityName, securityLevel, scopedPDU
)
# Message size constraint verification
if len(wholeMsg) > snmpEngineMaxMessageSize.syntax:
raise error.StatusInformation(errorIndication=errind.tooBig)
# 7.1.9.c
if pdu.tagSet in rfc3411.CONFIRMED_CLASS_PDUS:
# XXX rfc bug? why stateReference should be created?
self._cache.pushByMsgId(
msgID, sendPduHandle=sendPduHandle,
msgID=msgID, snmpEngineID=snmpEngineID,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
transportDomain=transportDomain,
transportAddress=transportAddress)
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.prepareOutgoingMessage',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
wholeMsg=wholeMsg,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
pdu=pdu)
)
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareOutgoingMessage'
)
return transportDomain, transportAddress, wholeMsg
def prepareResponseMessage(self, snmpEngine, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion,
pdu, maxSizeResponseScopedPDU, stateReference,
statusInformation):
mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
snmpEngineID, = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
snmpEngineID = snmpEngineID.syntax
# 7.1.2.b
cachedParams = self._cache.popByStateRef(stateReference)
msgID = cachedParams['msgID']
contextEngineId = cachedParams['contextEngineId']
contextName = cachedParams['contextName']
securityModel = cachedParams['securityModel']
securityName = cachedParams['securityName']
securityLevel = cachedParams['securityLevel']
securityStateReference = cachedParams['securityStateReference']
reportableFlag = cachedParams['reportableFlag']
maxMessageSize = cachedParams['msgMaxSize']
transportDomain = cachedParams['transportDomain']
transportAddress = cachedParams['transportAddress']
debug.logger & debug.FLAG_MP and debug.logger(
'prepareResponseMessage: stateReference %s' % stateReference)
# 7.1.3
if statusInformation is not None and 'oid' in statusInformation:
# 7.1.3a
if pdu is None:
pduType = None
else:
requestID = pdu.getComponentByPosition(0)
pduType = pdu.tagSet
# 7.1.3b
if (pdu is None and not reportableFlag or
pduType is not None and
pduType not in rfc3411.CONFIRMED_CLASS_PDUS):
raise error.StatusInformation(
errorIndication=errind.loopTerminated)
# 7.1.3c
reportPDU = rfc1905.ReportPDU()
pMod.apiPDU.setVarBinds(
reportPDU, ((statusInformation['oid'], statusInformation['val']),))
pMod.apiPDU.setErrorStatus(reportPDU, 0)
pMod.apiPDU.setErrorIndex(reportPDU, 0)
if pdu is None:
pMod.apiPDU.setRequestID(reportPDU, 0)
else:
# noinspection PyUnboundLocalVariable
pMod.apiPDU.setRequestID(reportPDU, requestID)
# 7.1.3d.1
if 'securityLevel' in statusInformation:
securityLevel = statusInformation['securityLevel']
else:
securityLevel = 1
# 7.1.3d.2
if 'contextEngineId' in statusInformation:
contextEngineId = statusInformation['contextEngineId']
else:
contextEngineId = snmpEngineID
# 7.1.3d.3
if 'contextName' in statusInformation:
contextName = statusInformation['contextName']
else:
contextName = ""
# 7.1.3e
pdu = reportPDU
debug.logger & debug.FLAG_MP and debug.logger(
'prepareResponseMessage: prepare report PDU for '
'statusInformation %s' % statusInformation)
# 7.1.4
if not contextEngineId:
contextEngineId = snmpEngineID # XXX impl-dep manner
# 7.1.5
if not contextName:
contextName = self._emptyStr
debug.logger & debug.FLAG_MP and debug.logger(
'prepareResponseMessage: using contextEngineId %r, contextName '
'%r' % (contextEngineId, contextName))
# 7.1.6
scopedPDU = self._scopedPDU
scopedPDU.setComponentByPosition(0, contextEngineId)
scopedPDU.setComponentByPosition(1, contextName)
scopedPDU.setComponentByPosition(2)
scopedPDU.getComponentByPosition(2).setComponentByType(
pdu.tagSet, pdu, verifyConstraints=False, matchTags=False,
matchConstraints=False)
# 7.1.7
msg = self._snmpMsgSpec
# 7.1.7a
msg.setComponentByPosition(
0, self.MESSAGE_PROCESSING_MODEL_ID, verifyConstraints=False,
matchTags=False, matchConstraints=False)
headerData = msg.setComponentByPosition(1).getComponentByPosition(1)
# 7.1.7b
headerData.setComponentByPosition(
0, msgID, verifyConstraints=False, matchTags=False,
matchConstraints=False)
snmpEngineMaxMessageSize, = mibBuilder.importSymbols(
'__SNMP-FRAMEWORK-MIB', 'snmpEngineMaxMessageSize')
# 7.1.7c
# XXX need to coerce MIB value as it has incompatible constraints set
headerData.setComponentByPosition(
1, snmpEngineMaxMessageSize.syntax, verifyConstraints=False,
matchTags=False, matchConstraints=False)
# 7.1.7d
msgFlags = 0
if securityLevel == 1:
pass
elif securityLevel == 2:
msgFlags |= 0x01
elif securityLevel == 3:
msgFlags |= 0x03
else:
raise error.ProtocolError('Unknown securityLevel %s' % securityLevel)
if pdu.tagSet in rfc3411.CONFIRMED_CLASS_PDUS: # XXX not needed?
msgFlags |= 0x04
headerData.setComponentByPosition(
2, self._msgFlags[msgFlags], verifyConstraints=False, matchTags=False,
matchConstraints=False)
# 7.1.7e
headerData.setComponentByPosition(
3, securityModel, verifyConstraints=False, matchTags=False,
matchConstraints=False)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareResponseMessage: %s' % (msg.prettyPrint(),))
if securityModel in snmpEngine.securityModels:
smHandler = snmpEngine.securityModels[securityModel]
else:
raise error.StatusInformation(
errorIndication=errind.unsupportedSecurityModel)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareResponseMessage: securityModel %r, securityEngineId %r, '
'securityName %r, securityLevel %r' % (
securityModel, snmpEngineID, securityName, securityLevel))
# 7.1.8a
try:
securityParameters, wholeMsg = smHandler.generateResponseMsg(
snmpEngine, self.MESSAGE_PROCESSING_MODEL_ID, msg,
snmpEngineMaxMessageSize.syntax, securityModel,
snmpEngineID, securityName, securityLevel, scopedPDU,
securityStateReference)
except error.StatusInformation:
# 7.1.8.b
raise
debug.logger & debug.FLAG_MP and debug.logger(
'prepareResponseMessage: SM finished')
# Message size constraint verification
if len(wholeMsg) > min(snmpEngineMaxMessageSize.syntax, maxMessageSize):
raise error.StatusInformation(errorIndication=errind.tooBig)
snmpEngine.observer.storeExecutionContext(
snmpEngine,
'rfc3412.prepareResponseMessage',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
securityEngineId=snmpEngineID,
pdu=pdu))
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareResponseMessage')
return transportDomain, transportAddress, wholeMsg
# 7.2.1
def prepareDataElements(self, snmpEngine, transportDomain,
transportAddress, wholeMsg):
# 7.2.2
msg, restOfwholeMsg = decoder.decode(wholeMsg, asn1Spec=self._snmpMsgSpec)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: %s' % (msg.prettyPrint(),))
if eoo.endOfOctets.isSameTypeWith(msg):
raise error.StatusInformation(errorIndication=errind.parseError)
mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
# 7.2.3
headerData = msg.getComponentByPosition(1)
msgVersion = messageProcessingModel = msg.getComponentByPosition(0)
msgID = headerData.getComponentByPosition(0)
msgFlags, = headerData.getComponentByPosition(2).asNumbers()
maxMessageSize = headerData.getComponentByPosition(1)
securityModel = headerData.getComponentByPosition(3)
securityParameters = msg.getComponentByPosition(2)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: msg data msgVersion %s msgID %s '
'securityModel %s' % (msgVersion, msgID, securityModel))
# 7.2.4
if securityModel not in snmpEngine.securityModels:
snmpUnknownSecurityModels, = mibBuilder.importSymbols(
'__SNMP-MPD-MIB', 'snmpUnknownSecurityModels')
snmpUnknownSecurityModels.syntax += 1
raise error.StatusInformation(errorIndication=errind.unsupportedSecurityModel)
# 7.2.5
if msgFlags & 0x03 == 0x00:
securityLevel = 1
elif (msgFlags & 0x03) == 0x01:
securityLevel = 2
elif (msgFlags & 0x03) == 0x03:
securityLevel = 3
else:
snmpInvalidMsgs, = mibBuilder.importSymbols(
'__SNMP-MPD-MIB', 'snmpInvalidMsgs')
snmpInvalidMsgs.syntax += 1
raise error.StatusInformation(errorIndication=errind.invalidMsg)
if msgFlags & 0x04:
reportableFlag = 1
else:
reportableFlag = 0
# 7.2.6
smHandler = snmpEngine.securityModels[securityModel]
try:
(securityEngineId,
securityName,
scopedPDU,
maxSizeResponseScopedPDU,
securityStateReference) = smHandler.processIncomingMsg(
snmpEngine, messageProcessingModel, maxMessageSize,
securityParameters, securityModel, securityLevel,
wholeMsg, msg)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: SM succeeded')
except error.StatusInformation as exc:
statusInformation = exc
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: SM failed, statusInformation '
'%s' % statusInformation)
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:sm-failure',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
securityModel=securityModel,
securityLevel=securityLevel,
securityParameters=securityParameters,
statusInformation=statusInformation))
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:sm-failure')
if 'errorIndication' in statusInformation:
# 7.2.6a
if 'oid' in statusInformation:
# 7.2.6a1
securityStateReference = statusInformation['securityStateReference']
contextEngineId = statusInformation['contextEngineId']
contextName = statusInformation['contextName']
if 'scopedPDU' in statusInformation:
scopedPDU = statusInformation['scopedPDU']
pdu = scopedPDU.getComponentByPosition(2).getComponent()
else:
pdu = None
maxSizeResponseScopedPDU = statusInformation['maxSizeResponseScopedPDU']
securityName = None # XXX secmod cache used
# 7.2.6a2
stateReference = self._cache.newStateReference()
self._cache.pushByStateRef(
stateReference, msgVersion=messageProcessingModel,
msgID=msgID, contextEngineId=contextEngineId,
contextName=contextName, securityModel=securityModel,
securityName=securityName, securityLevel=securityLevel,
securityStateReference=securityStateReference,
reportableFlag=reportableFlag,
msgMaxSize=maxMessageSize,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
transportDomain=transportDomain,
transportAddress=transportAddress)
# 7.2.6a3
try:
snmpEngine.msgAndPduDsp.returnResponsePdu(
snmpEngine, 3, securityModel, securityName,
securityLevel, contextEngineId, contextName,
1, pdu, maxSizeResponseScopedPDU, stateReference,
statusInformation)
except error.StatusInformation:
pass
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: error reported')
# 7.2.6b
if sys.version_info[0] <= 2:
raise statusInformation
else:
origTraceback = sys.exc_info()[2]
try:
raise statusInformation.with_traceback(origTraceback)
finally:
# Break cycle between locals and traceback object
# (seems to be irrelevant on Py3 but just in case)
del origTraceback
else:
# Sniff for engineIdCache
k = transportDomain, transportAddress
if k not in self._engineIdCache:
contextEngineId = scopedPDU[0]
contextName = scopedPDU[1]
pdus = scopedPDU[2]
pdu = pdus.getComponent()
# Here we assume that authentic/default EngineIDs
# come only in the course of engine-to-engine communication.
if pdu.tagSet in rfc3411.INTERNAL_CLASS_PDUS:
self._engineIdCache[k] = {
'securityEngineId': securityEngineId,
'contextEngineId': contextEngineId,
'contextName': contextName
}
timerResolution = snmpEngine.transportDispatcher.getTimerResolution()
expireAt = int(self._expirationTimer + 300 / timerResolution)
if expireAt not in self._engineIdCacheExpQueue:
self._engineIdCacheExpQueue[expireAt] = []
self._engineIdCacheExpQueue[expireAt].append(k)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: cache securityEngineId %r for %r %r' % (
securityEngineId, transportDomain, transportAddress))
snmpEngineID, = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
snmpEngineID = snmpEngineID.syntax
# 7.2.7 XXX PDU would be parsed here?
contextEngineId = scopedPDU[0]
contextName = scopedPDU[1]
pdu = scopedPDU[2]
pdu = pdu.getComponent() # PDUs
# 7.2.8
pduVersion = api.SNMP_VERSION_2C
# 7.2.9
pduType = pdu.tagSet
# 7.2.10
if (pduType in rfc3411.RESPONSE_CLASS_PDUS or
pduType in rfc3411.INTERNAL_CLASS_PDUS):
# 7.2.10a
try:
cachedReqParams = self._cache.popByMsgId(msgID)
except error.ProtocolError:
smHandler.releaseStateInformation(securityStateReference)
raise error.StatusInformation(errorIndication=errind.dataMismatch)
# 7.2.10b
sendPduHandle = cachedReqParams['sendPduHandle']
else:
sendPduHandle = None
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: using sendPduHandle %s for msgID '
'%s' % (sendPduHandle, msgID))
# 7.2.11
if pduType in rfc3411.INTERNAL_CLASS_PDUS:
# 7.2.11a
varBinds = pMod.apiPDU.getVarBinds(pdu)
if varBinds:
errorIndication = _snmpErrors.get(
varBinds[0][0],
errind.ReportPduReceived(varBinds[0][0].prettyPrint()))
statusInformation = error.StatusInformation(
errorIndication=errorIndication,
oid=varBinds[0][0], val=varBinds[0][1],
sendPduHandle=sendPduHandle)
else:
statusInformation = error.StatusInformation(
sendPduHandle=sendPduHandle)
# 7.2.11b (incomplete implementation)
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:internal',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
securityEngineId=securityEngineId,
pdu=pdu))
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:internal')
# 7.2.11c
smHandler.releaseStateInformation(securityStateReference)
# 7.2.11d
# no-op
# 7.2.11e XXX may need to pass Reports up to app in some cases...
raise statusInformation
statusInformation = None # no errors ahead
# 7.2.12
if pduType in rfc3411.RESPONSE_CLASS_PDUS:
# 7.2.12a -> no-op
# 7.2.12b
# noinspection PyUnboundLocalVariable
if (securityModel != cachedReqParams['securityModel'] or
securityName != cachedReqParams['securityName'] or
securityLevel != cachedReqParams['securityLevel'] or
contextEngineId != cachedReqParams['contextEngineId'] or
contextName != cachedReqParams['contextName']):
smHandler.releaseStateInformation(securityStateReference)
raise error.StatusInformation(errorIndication=errind.dataMismatch)
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:response',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
securityEngineId=securityEngineId,
pdu=pdu))
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:response')
# 7.2.12c
smHandler.releaseStateInformation(securityStateReference)
stateReference = None
# 7.2.12d
return (messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName,
pduVersion, pdu, pduType, sendPduHandle,
maxSizeResponseScopedPDU, statusInformation,
stateReference)
# 7.2.13
if pduType in rfc3411.CONFIRMED_CLASS_PDUS:
# 7.2.13a
if securityEngineId != snmpEngineID:
smHandler.releaseStateInformation(securityStateReference)
raise error.StatusInformation(
errorIndication=errind.engineIDMismatch)
# 7.2.13b
stateReference = self._cache.newStateReference()
self._cache.pushByStateRef(
stateReference, msgVersion=messageProcessingModel,
msgID=msgID, contextEngineId=contextEngineId,
contextName=contextName, securityModel=securityModel,
securityName=securityName, securityLevel=securityLevel,
securityStateReference=securityStateReference,
reportableFlag=reportableFlag, msgMaxSize=maxMessageSize,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
transportDomain=transportDomain,
transportAddress=transportAddress)
debug.logger & debug.FLAG_MP and debug.logger(
'prepareDataElements: new stateReference %s' % stateReference)
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:confirmed',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
securityEngineId=securityEngineId,
pdu=pdu))
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:confirmed')
# 7.2.13c
return (messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName,
pduVersion, pdu, pduType, sendPduHandle,
maxSizeResponseScopedPDU, statusInformation,
stateReference)
# 7.2.14
if pduType in rfc3411.UNCONFIRMED_CLASS_PDUS:
# Pass new stateReference to let app browse request details
stateReference = self._cache.newStateReference()
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:unconfirmed',
dict(transportDomain=transportDomain,
transportAddress=transportAddress,
securityModel=securityModel,
securityName=securityName,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
securityEngineId=securityEngineId,
pdu=pdu))
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3412.prepareDataElements:unconfirmed')
# This is not specified explicitly in RFC
smHandler.releaseStateInformation(securityStateReference)
return (messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName,
pduVersion, pdu, pduType, sendPduHandle,
maxSizeResponseScopedPDU, statusInformation,
stateReference)
smHandler.releaseStateInformation(securityStateReference)
raise error.StatusInformation(errorIndication=errind.unsupportedPDUtype)
def _expireEnginesInfo(self):
if self._expirationTimer in self._engineIdCacheExpQueue:
for engineKey in self._engineIdCacheExpQueue[self._expirationTimer]:
del self._engineIdCache[engineKey]
debug.logger & debug.FLAG_MP and debug.logger(
'__expireEnginesInfo: expiring %r' % (engineKey,))
del self._engineIdCacheExpQueue[self._expirationTimer]
self._expirationTimer += 1
def receiveTimerTick(self, snmpEngine, timeNow):
self._expireEnginesInfo()
AbstractMessageProcessingModel.receiveTimerTick(self, snmpEngine, timeNow)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 RIKEN R-CCS
## \file kmrrungenscript.in.py KMRRUN Job-Script Generator.
import sys
import os
import re
from optparse import OptionParser
kmrhome = '@KMRHOME@'
## Checks file existence.
# If file does not exist, it prints an error message and exit.
# @param path file path for check.
def _check_exist(path):
if not os.path.exists(path):
print >> sys.stderr, 'Error: file or dir "%s" is not exist.' % path
sys.exit()
return path
## Check if command in the specified command line exists.
# @param cmdline a command line to be executed
# @param sched string that represents scheduler type
def check_cmdline(cmdline, sched):
_check_exist(cmdline.split()[0])
if sched.upper() == 'K':
cmdlns = cmdline.split()
cmdlns[0] = './' + os.path.basename(cmdlns[0])
return ' '.join(cmdlns)
else:
return cmdline
## Check if command in the specified command line exists.
# @param dirname name of directory where input files are located
# @param sched string that represents scheduler type
def check_indir(dirname, sched):
_check_exist(dirname)
if sched.upper() == 'K':
_dirname = dirname.rstrip().rstrip('/')
return './' + os.path.basename(_dirname)
else:
return dirname
## Check restart mode.
# If node > number of checkpoint file, error.
# @param restart_basename prefix of checkpoint directory name
# @param procstr string that represents process number
# @param sched string that represents scheduler type
def check_restart(restart_basename, procstr, sched):
if sched.upper() == 'K':
if restart_basename is None: return
ckpt_prefix = restart_basename + '.'
else:
ckpt_prefix = 'ckptdir'
repatter = re.compile(r'^%s\d+$' % ckpt_prefix)
files = os.listdir('./')
count = 0
for file_ in files:
if repatter.match(file_):
count += 1
if count == 0: return
nprocs_file = ckpt_prefix + '00000/nprocs'
if not os.path.exists(nprocs_file):
print >> sys.stderr, \
'Error: Checkpoint nproc file %s not exit.\n' % nprocs_file
sys.exit()
preprocstr = open(nprocs_file).read()
preproc = preprocstr.split("=")[1]
if count != int(preproc):
print >> sys.stderr, \
'Error: Do not match number of checkpoint file and ' \
'executed process. ***\n'
sys.exit()
proc = k_node_to_int(procstr)
if proc > int(preproc):
print >> sys.stderr, \
'Error: On restart, increasing number of process is ' \
'not supported. ***\n'
sys.exit()
if count > proc:
sys.stderr.write("*** Reduction mode. ***\n")
## Parse K node declaration into an integer.
# @param shape_str string that represents K node shape
def k_node_to_int(shape_str):
m = re.match(r"(\d+)x?(\d+)?x?(\d+)?(:strict)?", shape_str)
prdct = 1
for mstr in m.groups()[0:3]:
if mstr:
prdct *= int(mstr)
return prdct
## Generates job-script for K.
# @param name name of the job
# @param queue queue to submit job
# @param rsctime resource time limit
# @param node number of node to execute.
# @param kmrrun_path path to kmrrun command
# @param kmrrun_parameter parameter for kmrrun
# @param template_path path for template file
# @param shape mpi process shape
# @param proc number of execute proc
# @param mapper mapper command line
# @param kvgen kv generator command line
# @param reducer reducer command line
# @param indir directory where inputs are located(staged-in)
# @param ckpt enable checkpoint
# @param restart_basename prefix of checkpoint directory name
def k_scheduler(name, queue, rsctime, node, kmrrun_path, kmrrun_parameter,
template_path, shape, proc, mapper, kvgen, reducer, indir,
ckpt, restart_basename):
# Stage in section
stginstr = ''
if mapper:
mapper_cmd = mapper.split()[0]
mapper_cmd_base = os.path.basename(mapper_cmd)
stginstr += '#PJM --stgin "%s %s"' % (mapper_cmd, mapper_cmd_base)
if kvgen:
if len(stginstr):
stginstr += '\n'
kvgen_cmd = kvgen.split()[0]
kvgen_cmd_base = os.path.basename(kvgen_cmd)
stginstr += '#PJM --stgin "%s %s"' % (kvgen_cmd, kvgen_cmd_base)
if reducer:
if len(stginstr):
stginstr += '\n'
reducer_cmd = reducer.split()[0]
reducer_cmd_base = os.path.basename(reducer_cmd)
stginstr += '#PJM --stgin "%s %s"' % (reducer_cmd, reducer_cmd_base)
if len(stginstr):
stginstr += '\n'
indir_stgin = './' + os.path.basename(indir.rstrip().rstrip('/'))
stginstr += '#PJM --stgin "%s/* %s/"' % (indir, indir_stgin)
# Stage in ckpt files
if restart_basename:
fname = os.path.basename(restart_basename) + '.00000/nprocs'
nproc = int(open(fname).read().split('=')[1])
for rank in range(nproc):
stginstr += '\n'
stginstr += '#PJM --stgin "./%s.%05d/* ./ckptdir%05d/"' \
% (restart_basename, rank, rank)
# Stage out section
stgoutstr = "#\n# !!WRITE STGOUT HERE!!\n#"
# Stage out ckpt files
if ckpt or restart_basename:
for rank in range(k_node_to_int(proc)):
stgoutstr += '\n'
stgoutstr += '#PJM --stgout "./ckptdir%05d/* ' \
'./ckptdir_%%j.%05d/"' % (rank, rank)
execstr = 'mpiexec -n %d ./kmrrun %s' % (k_node_to_int(proc), kmrrun_parameter)
template = open(template_path).read()
return template % {'NAME': name, 'QUEUE': queue, 'NODE': node,
'RSCTIME': rsctime, 'KMRRUN': kmrrun_path,
'SHAPE': shape, 'PROC': proc, 'DATASTGIN': stginstr,
'DATASTGOUT': stgoutstr, 'EXEC': execstr}
## Generates job-script for FOCUS supercomputer
# @param name name of the job
# @param queue queue to submit job
# @param rsctime resource time limit
# @param node number of MPI processes to use
# @param kmrrun_path path to kmrrun command
# @param kmrrun_parameter parameter for kmrrun
# @param template_path path for template file
def focus_scheduler(name, queue, rsctime, node, kmrrun_path, kmrrun_parameter,
template_path):
template = open(template_path).read()
return template % {'NAME': name, 'QUEUE': queue, 'NODE': node,
'RSCTIME': rsctime, 'KMRRUN': kmrrun_path,
'KMRRUN_PARAM': kmrrun_parameter}
## Selects job-scheduler.
# @param opts Options to the generator
# @param sched scheduler
def select_scheduler(opts, sched):
# find kmrrun and its job-scheduler templates
template_dir = kmrhome + '/lib'
kmrrun_path = template_dir + '/kmrrun'
if not os.path.exists(kmrrun_path):
# kmrrun does not exist in the install directory. In this case,
# We assume that we are working in KMRSRC/cmd directory.
template_dir = '.'
kmrrun_path = template_dir + '/../kmrrun/kmrrun'
if not os.path.exists(kmrrun_path):
# error exit
print >> sys.stderr, 'Error: could not find kmrrun utility.'
sys.exit()
# set parameters
queue = opts.queue
node = opts.node
rsctime = options.rsctime
mapper = check_cmdline(opts.mapper, sched)
kvgen = check_cmdline(opts.kvgen, sched)
reducer = check_cmdline(opts.reducer, sched)
kmrrun_parameter = ''
if opts.taskproc:
kmrrun_parameter += '-n %s ' % (opts.taskproc)
if opts.mapper:
kmrrun_parameter += '-m "%s" ' % (mapper)
if opts.kvgen:
kmrrun_parameter += '-k "%s" ' % (kvgen)
if opts.reducer:
kmrrun_parameter += '-r "%s" ' % (reducer)
if opts.ckpt or opts.restart:
kmrrun_parameter += '--ckpt '
kmrrun_parameter += check_indir(opts.indir, sched)
name = 'kmrrun_job'
if opts.scrfile:
name = opts.scrfile
if sched.upper() == 'K':
script = k_scheduler(name, queue, rsctime, node, kmrrun_path,
kmrrun_parameter,
template_dir + '/kmrrungenscript.template.k',
opts.shape, opts.proc, opts.mapper, opts.kvgen,
opts. reducer, opts.indir,
opts.ckpt, opts.restart)
elif sched.upper() == 'FOCUS':
script = focus_scheduler(name, queue, rsctime, node, kmrrun_path,
kmrrun_parameter,
template_dir + '/kmrrungenscript.template.focus')
# for other schedulers...
else:
print >> sys.stderr, 'Unknown scheduler'
sys.exit()
# output script
if opts.scrfile is None:
print script
else:
out = open(opts.scrfile, "w")
print >> out, script
out.close()
## Warn to write Stage-out section.
# @param opts Options to the generator
def warn_stageout(opts):
if opts.sched != 'K':
return
message = """
#########################################################################
Don't forget to write stage-out directives for MapReduce output files.
"""[1:-1]
if opts.ckpt or opts.restart:
message += """
A job script generated by this program stages-out only checkpoint files.
"""[0:-1]
message += """
#########################################################################
"""
print >> sys.stderr, message
## kmrgenscript main routine.
# It works on Python 2.4 or later.
if __name__ == "__main__":
usage = "usage: %prog [options] -m mapper [-k keygener -r reducer]"
parser = OptionParser(usage)
parser.add_option("-q",
"--queue",
dest="queue",
type="string",
help="queue to submit your job",
metavar="'string'",
default='None')
parser.add_option("-t",
"--resource-time",
dest="rsctime",
type="string",
help="job execution time (default is '00:10:00')",
metavar="'string'",
default='00:10:00')
parser.add_option("-e",
"--number-of-node",
dest="node",
type="string",
help="number of node (default is '12')",
metavar="'string'",
default='12')
parser.add_option("-s",
"--shape",
dest="shape",
type="string",
help="mpi process shape. "
"Valid only on K scheduler. (default is '1')",
metavar="'string'",
default='1')
parser.add_option("-p",
"--proc",
dest="proc",
type="string",
help="number of mpi processes. "
"Valid only on K scheduler. (default is '8')",
metavar="'string'",
default='8')
parser.add_option("-d",
"--inputdir",
dest="indir",
type="string",
help="input file directory. "
"When used on K computer, this directory should be one "
"located in K global storage that is staged-in. "
"(default is './input')",
metavar="'string'",
default='./input')
parser.add_option("-n",
"--task-proc",
dest="taskproc",
type="string",
help="number of processes to run each mapper/reducer "
"(default is 1)",
metavar="number",
default=1)
parser.add_option("-m",
"--mapper",
dest="mapper",
type="string",
help="mapper command path and its arguments",
metavar="'string'")
parser.add_option("-k",
"--kvgen",
dest="kvgen",
type="string",
help="kv generator command path and its arguments",
metavar="'string'")
parser.add_option("-r",
"--reducer",
dest="reducer",
type="string",
help="reducer command path and its arguments",
metavar="'string'")
parser.add_option("-C",
"--ckpt",
dest="ckpt",
action="store_true",
help="enable Checkpoint/Restart (default is false)",
default=False)
parser.add_option("-R",
"--restart-filename",
dest="restart",
type="string",
help="specify prefix of directories where checkpoint "
"files are located. "
"This option should be given when restarting on "
"a system that requires staging. "
"Valid only on K scheduler.",
metavar="'string'")
parser.add_option("-S",
"--scheduler",
dest="sched",
type="string",
help="scheduler type. "
"Specify Scheduler 'K' or 'FOCUS'. "
"'K' supports K computer/FX10 and 'FOCUS' supports "
"Focus supercomputer. (default is 'K')",
metavar="'string'",
default='K')
parser.add_option("-w",
"--write-scriptfile",
dest="scrfile",
type="string",
help="output job script filename",
metavar="'string'")
(options, args) = parser.parse_args()
# check parameters.
if len(args) <> 0:
parser.error("Error: Missing parameter")
sys.exit()
if not options.mapper:
print >> sys.stderr, "Error: Mapper is not specified\n"
sys.exit()
if options.reducer and not options.kvgen:
print >> sys.stderr, \
"Error: Specify kv generator when reducer is specified\n"
sys.exit()
if options.ckpt:
if options.sched == 'K':
check_restart(options.restart, options.proc, 'K')
else:
check_restart(options.restart, '1', options.sched)
select_scheduler(options, options.sched)
warn_stageout(options)
# Copyright (C) 2012-2018 RIKEN R-CCS
# This library is distributed WITHOUT ANY WARRANTY. This library can be
# redistributed and/or modified under the terms of the BSD 2-Clause License.
|
|
from uuid import uuid4
from corehq.dbaccessors.couchapps.all_docs import get_doc_count_by_type
from corehq.util.couch_helpers import MultiKeyViewArgsProvider, MultiKwargViewArgsProvider
from corehq.util.doc_processor.interface import DocumentProvider, ProcessorProgressLogger
from corehq.util.pagination import ResumableFunctionIterator
from dimagi.utils.couch.database import retry_on_couch_error
class DocsIterator:
"""Iterate over all documents of the given Couch document class
The number of documents can be counted with `len()`.
Yields unwrapped document dicts.
"""
def __init__(self, couch_class, chunk_size=100):
self.couch_class = couch_class
self.db = couch_class.get_db()
self.doc_type = couch_class.__name__
self.chunk_size = chunk_size
def __len__(self):
if not hasattr(self, "_len"):
self._len = get_doc_count_by_type(self.db, self.doc_type)
return self._len
def __iter__(self):
@retry_on_couch_error
def discard_state():
docs.discard_state()
docs = resumable_view_iterator(
self.db,
uuid4().hex,
'all_docs/by_doc_type',
[[self.doc_type]],
self.chunk_size,
)
try:
yield from docs
finally:
discard_state()
def resumable_view_iterator(db, iteration_key, view_name, view_keys,
chunk_size=100, view_event_handler=None, full_row=False):
"""Perform one-time resumable iteration over a CouchDB View
Iteration can be efficiently stopped and resumed. The iteration may
omit documents that are added after the iteration begins or resumes
and may include deleted documents.
:param db: Couchdb database.
:param iteration_key: A unique key identifying the iteration. This
key will be used to maintain state about an iteration that is in progress.
The state will be maintained indefinitely unless it is removed with `discard_state()`.
:param view_name: The name of the CouchDB view to query
:param view_keys: List of view keys to use when querying the view.
:param chunk_size: Number of documents to yield before updating the
iteration checkpoint. In the worst case about this many documents
that were previously yielded may be yielded again if the iteration
is stopped and later resumed.
"""
def data_function(**view_kwargs):
view_kwargs["limit"] = chunk_size
return db.view(view_name, **view_kwargs)
if isinstance(view_keys[0], dict):
args_provider = MultiKwargViewArgsProvider(view_keys, include_docs=True)
else:
args_provider = MultiKeyViewArgsProvider(view_keys, include_docs=True)
args_provider.initial_view_kwargs.pop("limit")
class ResumableDocsIterator(ResumableFunctionIterator):
def __iter__(self):
for result in super(ResumableDocsIterator, self).__iter__():
yield result if full_row else result['doc']
return ResumableDocsIterator(iteration_key, data_function, args_provider, view_event_handler)
def resumable_docs_by_type_iterator(db, doc_types, iteration_key, chunk_size=100,
view_event_handler=None, domain=None):
"""Perform one-time resumable iteration over documents by type
Iteration can be efficiently stopped and resumed. The iteration may
omit documents that are added after the iteration begins or resumes
and may include deleted documents.
:param db: Couchdb database.
:param doc_types: A list of doc type names to iterate on (can't be empty).
:param iteration_key: A unique key identifying the iteration. This
key will be used to maintain state about an iteration that is in progress.
The state will be maintained indefinitely unless it is removed with `discard_state()`.
:param chunk_size: Number of documents to yield before updating the
iteration checkpoint. In the worst case about this many documents
that were previously yielded may be yielded again if the iteration
is stopped and later resumed.
:param domain: If the domain is specified only iterate over docs for that domain
"""
view_name = 'by_domain_doc_type_date/view' if domain else 'all_docs/by_doc_type'
def _get_key(doc_type):
if domain:
return [domain, doc_type]
return [doc_type]
keys = [_get_key(doc_type) for doc_type in doc_types]
return resumable_view_iterator(db, iteration_key, view_name, keys, chunk_size, view_event_handler)
class CouchProcessorProgressLogger(ProcessorProgressLogger):
"""
:param doc_types: List of doc_types that are being processed
"""
def __init__(self, doc_types):
super().__init__()
self.doc_types = doc_type_tuples_to_list(doc_types)
def progress_starting(self, total, previously_visited):
print("Processing {} documents{}: {}...".format(
total,
" (~{} already processed)".format(previously_visited) if previously_visited else "",
", ".join(self.doc_types)
))
class CouchDocumentProvider(DocumentProvider):
"""Document provider for couch documents.
All documents must live in the same couch database.
:param iteration_key: unique key to identify the document iterator. Must be unique
across all document iterators.
:param doc_type_tuples: An ordered sequence where each item in the sequence should be
either a doc type class or a tuple ``(doc_type_name_string, doc_type_class)``
if the doc type name is different from the model class name.
Note that the order of the sequence should never change while the iteration is
in progress to avoid skipping doc types.
"""
def __init__(self, iteration_key, doc_type_tuples, domain=None):
self.iteration_key = iteration_key
self.domain = domain
assert isinstance(doc_type_tuples, list)
self.doc_types = doc_type_tuples_to_list(doc_type_tuples)
self.doc_type_map = doc_type_tuples_to_dict(doc_type_tuples)
if len(doc_type_tuples) != len(self.doc_type_map):
raise ValueError("Invalid (duplicate?) doc types")
self.couchdb = next(iter(self.doc_type_map.values())).get_db()
couchid = lambda db: getattr(db, "dbname", id(db)) # noqa: E731
dbid = couchid(self.couchdb)
assert all(couchid(m.get_db()) == dbid for m in self.doc_type_map.values()), \
"documents must live in same couch db: %s" % repr(self.doc_type_map)
if domain:
for doc_class in self.doc_type_map.values():
properties_by_key = doc_class._properties_by_key
assert 'domain' in properties_by_key, "{} does not have a 'domain' property".format(doc_class)
def get_document_iterator(self, chunk_size, event_handler=None):
return resumable_docs_by_type_iterator(
self.couchdb, self.doc_types, self.iteration_key,
chunk_size=chunk_size, view_event_handler=event_handler,
domain=self.domain
)
def get_total_document_count(self):
from corehq.dbaccessors.couchapps.all_docs import get_doc_count_by_type, get_doc_count_by_domain_type
if self.domain:
return sum(
get_doc_count_by_domain_type(self.couchdb, self.domain, doc_type)
for doc_type in self.doc_type_map
)
else:
return sum(
get_doc_count_by_type(self.couchdb, doc_type)
for doc_type in self.doc_type_map
)
class CouchViewDocumentProvider(DocumentProvider):
def __init__(self, couchdb, iteration_key, view_name, view_keys):
self.couchdb = couchdb
self.iteration_key = iteration_key
self.view_name = view_name
self.view_keys = view_keys
def get_document_iterator(self, chunk_size, event_handler=None):
return resumable_view_iterator(
self.couchdb, self.iteration_key, self.view_name, self.view_keys,
chunk_size=chunk_size, view_event_handler=event_handler
)
def get_total_document_count(self):
return -1
def doc_type_tuples_to_dict(doc_types):
return dict(
t if isinstance(t, tuple) else (t.__name__, t) for t in doc_types
)
def doc_type_tuples_to_list(doc_types):
return sorted(doc_type_tuples_to_dict(doc_types))
|
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import inspect
import json
import os
import re
import tempfile
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import compute
from cinder import db
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
nas_opts = [
# TODO(eharney): deprecate nas_ip and change this to nas_host
cfg.StrOpt('nas_ip',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.IntOpt('nas_ssh_port',
default=22,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
default=None,
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.')),
]
old_vol_type_opts = [cfg.DeprecatedOpt('glusterfs_sparsed_volumes'),
cfg.DeprecatedOpt('glusterfs_qcow2_volumes')]
volume_opts = [
cfg.StrOpt('nas_volume_prov_type',
default='thin',
choices=['thin', 'thick'],
deprecated_opts=old_vol_type_opts,
help=('Provisioning type that will be used when '
'creating volumes.')),
]
CONF = cfg.CONF
CONF.register_opts(nas_opts)
def locked_volume_id_operation(f, external=False):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named
with the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
May be applied to methods of signature:
method(<self>, volume, *, **)
"""
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('volume'):
volume_id = call_args['volume']['id']
elif call_args.get('snapshot'):
volume_id = call_args['snapshot']['volume']['id']
else:
err_msg = _('The decorated method must accept either a volume or '
'a snapshot object')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, volume_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = 'remotefs'
volume_backend_name = None
SHARE_FORMAT_REGEX = r'.+:/.+'
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
if self.configuration:
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_provisioned_capacity(self):
"""Returns the provisioned capacity.
Get the sum of sizes of volumes, snapshots and any other
files on the mountpoint.
"""
provisioned_size = 0.0
for share in self.shares.keys():
mount_path = self._get_mount_point_for_share(share)
out, _ = self._execute('du', '--bytes', mount_path,
run_as_root=True)
provisioned_size += int(out.split()[0])
return round(provisioned_size / units.Gi, 2)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and mount them locally."""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s', self._mounted_shares)
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Delete snapshot.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path, force_run_as_root=False):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
run_as_root = self._execute_as_root or force_run_as_root
self._execute('rm', '-f', path, run_as_root=run_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _fallocate(self, path, size):
"""Creates a raw file of given size in GiB using fallocate."""
self._execute('fallocate', '--length=%sG' % size,
path, run_as_root=True)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
LOG.warning(_LW('%(path)s is being set with open permissions: '
'%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
remotefs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
run_as_root = self._execute_as_root
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'],
run_as_root=run_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'],
run_as_root=run_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_ip,
self.configuration.nas_share_path)):
LOG.debug('Using nas_ip and nas_share_path configuration.')
nas_ip = self.configuration.nas_ip
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_ip, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_ip and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip()
# Replace \040 with a space, to support paths with spaces
share_address = share_address.replace("\\040", " ")
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error(_LE("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
"/nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warning(_LW("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration."),
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration."),
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info(_LI('Cinder secure environment '
'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s'),
err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriver, self).do_setup(context)
self._nova = compute.API()
def _local_volume_dir(self, volume):
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
# Backslashes are replaced so that this check will work with
# Windows paths as well.
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
'basedir': basedir.replace('\\', '/'),
'volname': volume_name
}
if not re.match(backing_file_template,
info.backing_file.replace('\\', '/')):
msg = _("File %(path)s has invalid backing file "
"%(bfile)s, aborting.") % {'path': path,
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path,
run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param: info_path: path to file
:param: empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume['name'])
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume['name'])
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str.
Returns string in a hex format.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: RemoteFSException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('Share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
vol_path = self._local_volume_dir(snapshot['volume'])
self._ensure_share_writable(vol_path)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot['volume']['name'])
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot['volume']['name'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | commited down) |
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | commited down) | exist, needs |
# used here) | | ptr update) |
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
info = self._qemu_img_info(backing_path_full_path,
snapshot['volume']['name'])
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s', result)
except Exception:
LOG.exception(_LE('Call to Nova to create snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception:
LOG.exception(_LE('Call to Nova delete snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.RemoteFSException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._delete(path_to_delete, force_run_as_root=True)
@locked_volume_id_operation
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@locked_volume_id_operation
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@locked_volume_id_operation
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
@locked_volume_id_operation
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref)
@locked_volume_id_operation
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context,
volume,
image_service,
image_meta)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from object_detection.utils import ops
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results, use_matmul_gather=False):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
use_matmul_gather: Use matrix multiplication based gather instead of
standard tf.gather. (Default: False).
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
self._gather_op = tf.gather
if use_matmul_gather:
self._gather_op = ops.matmul_gather_on_zeroth_axis
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(self.matched_column_indices())
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.size(self.unmatched_column_indices())
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
self._gather_op(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = self._gather_op(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher.
"""
__metaclass__ = ABCMeta
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
self._use_matmul_gather = use_matmul_gather
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params),
self._use_matmul_gather)
@abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
|
|
from algorithms.graph import Tarjan
from algorithms.graph import check_bipartite
from algorithms.graph.dijkstra import Dijkstra
from algorithms.graph import ford_fulkerson
from algorithms.graph import edmonds_karp
from algorithms.graph import dinic
from algorithms.graph import maximum_flow_bfs
from algorithms.graph import maximum_flow_dfs
from algorithms.graph import all_pairs_shortest_path
from algorithms.graph import bellman_ford
from algorithms.graph import count_connected_number_of_component
from algorithms.graph import prims_minimum_spanning
from algorithms.graph import check_digraph_strongly_connected
from algorithms.graph import cycle_detection
from algorithms.graph import find_path
from algorithms.graph import path_between_two_vertices_in_digraph
import unittest
class TestTarjan(unittest.TestCase):
"""
Test for the file tarjan.py
Arguments:
unittest {[type]} -- [description]
"""
def test_tarjan_example_1(self):
# Graph from https://en.wikipedia.org/wiki/File:Scc.png
example = {
'A': ['B'],
'B': ['C', 'E', 'F'],
'C': ['D', 'G'],
'D': ['C', 'H'],
'E': ['A', 'F'],
'F': ['G'],
'G': ['F'],
'H': ['D', 'G']
}
g = Tarjan(example)
self.assertEqual(g.sccs, [['F', 'G'], ['C', 'D', 'H'],
['A', 'B', 'E']])
def test_tarjan_example_2(self):
# Graph from https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm#/media/File:Tarjan%27s_Algorithm_Animation.gif
example = {
'A': ['E'],
'B': ['A'],
'C': ['B', 'D'],
'D': ['C'],
'E': ['B'],
'F': ['B', 'E', 'G'],
'G': ['F', 'C'],
'H': ['G', 'H', 'D']
}
g = Tarjan(example)
self.assertEqual(g.sccs, [['A', 'B', 'E'], ['C', 'D'], ['F', 'G'],
['H']])
class TestCheckBipartite(unittest.TestCase):
def test_check_bipartite(self):
adj_list_1 = [[0, 0, 1], [0, 0, 1], [1, 1, 0]]
self.assertEqual(True, check_bipartite(adj_list_1))
adj_list_2 = [[0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0]]
self.assertEqual(True, check_bipartite(adj_list_2))
adj_list_3 = [[0, 1, 0, 0], [1, 0, 1, 1], [0, 1, 0, 1], [0, 1, 1, 0]]
self.assertEqual(False, check_bipartite(adj_list_3))
class TestDijkstra(unittest.TestCase):
def test_dijkstra(self):
g = Dijkstra(9)
g.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0]]
self.assertEqual(g.dijkstra(0), [0, 4, 12, 19, 21, 11, 9, 8, 14])
class TestMaximumFlow(unittest.TestCase):
"""
Test for the file maximum_flow.py
Arguments:
unittest {[type]} -- [description]
"""
def test_ford_fulkerson(self):
capacity = [
[0, 10, 10, 0, 0, 0, 0],
[0, 0, 2, 0, 4, 8, 0],
[0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 6, 0, 10],
[0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(19, ford_fulkerson(capacity, 0, 6))
def test_edmonds_karp(self):
capacity = [
[0, 10, 10, 0, 0, 0, 0],
[0, 0, 2, 0, 4, 8, 0],
[0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 6, 0, 10],
[0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(19, edmonds_karp(capacity, 0, 6))
def dinic(self):
capacity = [
[0, 10, 10, 0, 0, 0, 0],
[0, 0, 2, 0, 4, 8, 0],
[0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 6, 0, 10],
[0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(19, dinic(capacity, 0, 6))
class TestMaximum_Flow_Bfs(unittest.TestCase):
"""
Test for the file def maximum_flow_bfs.py
Arguments:
unittest {[type]} -- [description]
"""
def test_maximum_flow_bfs(self):
graph = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0]
]
maximum_flow = maximum_flow_bfs(graph)
self.assertEqual(maximum_flow, 23)
class TestMaximum_Flow_Dfs(unittest.TestCase):
"""
Test for the file def maximum_flow_dfs.py
Arguments:
unittest {[type]} -- [description]
"""
def test_maximum_flow_dfs(self):
graph = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0]
]
maximum_flow = maximum_flow_dfs(graph)
self.assertEqual(maximum_flow, 23)
class TestAll_Pairs_Shortest_Path(unittest.TestCase):
def test_all_pairs_shortest_path(self):
graph = [[0, 0.1, 0.101, 0.142, 0.277],
[0.465, 0, 0.191, 0.192, 0.587],
[0.245, 0.554, 0, 0.333, 0.931],
[1.032, 0.668, 0.656, 0, 0.151],
[0.867, 0.119, 0.352, 0.398, 0]]
result = all_pairs_shortest_path(graph)
self.assertEqual(result, [
[0, 0.1, 0.101, 0.142, 0.277],
[0.436, 0, 0.191, 0.192,
0.34299999999999997],
[0.245, 0.345, 0, 0.333, 0.484],
[0.706, 0.27, 0.46099999999999997, 0,
0.151],
[0.5549999999999999, 0.119, 0.31, 0.311,
0],
])
class TestBellmanFord(unittest.TestCase):
def test_bellman_ford(self):
graph1 = {
'a': {'b': 6, 'e': 7},
'b': {'c': 5, 'd': -4, 'e': 8},
'c': {'b': -2},
'd': {'a': 2, 'c': 7},
'e': {'b': -3}
}
self.assertEqual(True, bellman_ford(graph1, 'a'))
graph2 = {
'a': {'d': 3, 'e': 4},
'b': {'a': 7, 'e': 2},
'c': {'a': 12, 'd': 9, 'e': 11},
'd': {'c': 5, 'e': 11},
'e': {'a': 7, 'b': 5, 'd': 1}
}
self.assertEqual(True, bellman_ford(graph2, 'a'))
class TestConnectedComponentInGraph(unittest.TestCase):
"""
Class for testing different cases for connected components in graph
"""
def test_count_connected_components(self):
"""
Test Function that test the different cases of count connected
components
2----------0 1--------5 3
|
|
4
output = 3
"""
expected_result = 3
# adjacency list representation of graph
l = [[2],
[5],
[0,4],
[],
[2],
[1]]
size = 5
result = count_connected_number_of_component.count_components(l, size)
self.assertEqual(result, expected_result)
def test_connected_components_with_empty_graph(self):
"""
input :
output : 0
"""
l = [[]]
expected_result = 0
size = 0
result = count_connected_number_of_component.count_components(l, size)
self.assertEqual(result, expected_result)
def test_connected_components_without_edges_graph(self):
"""
input : 0 2 3 4
output : 4
"""
l = [[0], [], [2], [3], [4]]
size = 4
expected_result = 4
result = count_connected_number_of_component.count_components(l, size)
self.assertEqual(result, expected_result)
class PrimsMinimumSpanning(unittest.TestCase):
def test_prim_spanning(self):
graph1 = {
1: [[3, 2], [8, 3]],
2: [[3, 1], [5, 4]],
3: [[8, 1], [2, 4], [4, 5]],
4: [[5, 2], [2, 3], [6, 5]],
5: [[4, 3], [6, 4]]
}
self.assertEqual(14, prims_minimum_spanning(graph1))
graph2 = {
1: [[7, 2], [6, 4]],
2: [[7, 1], [9, 4], [6, 3]],
3: [[8, 4], [6, 2]],
4: [[6, 1], [9, 2], [8, 3]]
}
self.assertEqual(19, prims_minimum_spanning(graph2))
class TestDigraphStronglyConnected(unittest.TestCase):
def test_digraph_strongly_connected(self):
g1 = check_digraph_strongly_connected.Graph(5)
g1.add_edge(0, 1)
g1.add_edge(1, 2)
g1.add_edge(2, 3)
g1.add_edge(3, 0)
g1.add_edge(2, 4)
g1.add_edge(4, 2)
self.assertTrue(g1.is_strongly_connected())
g2 = check_digraph_strongly_connected.Graph(4)
g2.add_edge(0, 1)
g2.add_edge(1, 2)
g2.add_edge(2, 3)
self.assertFalse(g2.is_strongly_connected())
class TestCycleDetection(unittest.TestCase):
def test_cycle_detection_with_cycle(self):
graph = {'A': ['B', 'C'],
'B': ['D'],
'C': ['F'],
'D': ['E', 'F'],
'E': ['B'],
'F': []}
self.assertTrue(cycle_detection.contains_cycle(graph))
def test_cycle_detection_with_no_cycle(self):
graph = {'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': ['E'],
'E': [],
'F': []}
self.assertFalse(cycle_detection.contains_cycle(graph))
class TestFindPath(unittest.TestCase):
def test_find_all_paths(self):
graph = {'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D', 'F'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
paths = find_path.find_all_path(graph, 'A', 'F')
print(paths)
self.assertEqual(sorted(paths), sorted([
['A', 'C', 'F'],
['A', 'B', 'C', 'F'],
['A', 'B', 'D', 'C', 'F'],
]))
class TestPathBetweenTwoVertices(unittest.TestCase):
def test_node_is_reachable(self):
g = path_between_two_vertices_in_digraph.Graph(4)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
self.assertTrue(g.is_reachable(1, 3))
self.assertFalse(g.is_reachable(3, 1))
|
|
"""Module for querying SymPy objects about assumptions."""
from __future__ import print_function, division
from sympy.core import sympify
from sympy.core.cache import cacheit
from sympy.logic.boolalg import (to_cnf, And, Not, Or, Implies, Equivalent,
BooleanFunction, BooleanAtom)
from sympy.logic.inference import satisfiable
from sympy.assumptions.assume import (global_assumptions, Predicate,
AppliedPredicate)
from sympy.core.decorators import deprecated
from sympy.utilities.decorator import memoize_property
# Deprecated predicates should be added to this list
deprecated_predicates = [
'bounded',
'infinity',
'infinitesimal'
]
# Memoization storage for predicates
predicate_storage = {}
predicate_memo = memoize_property(predicate_storage)
# Memoization is necessary for the properties of AssumptionKeys to
# ensure that only one object of Predicate objects are created.
# This is because assumption handlers are registered on those objects.
class AssumptionKeys(object):
"""
This class contains all the supported keys by ``ask``.
"""
@predicate_memo
def hermitian(self):
"""
Hermitian predicate.
``ask(Q.hermitian(x))`` is true iff ``x`` belongs to the set of
Hermitian operators.
References
==========
.. [1] http://mathworld.wolfram.com/HermitianOperator.html
"""
# TODO: Add examples
return Predicate('hermitian')
@predicate_memo
def antihermitian(self):
"""
Antihermitian predicate.
``Q.antihermitian(x)`` is true iff ``x`` belongs to the field of
antihermitian operators, i.e., operators in the form ``x*I``, where
``x`` is Hermitian.
References
==========
.. [1] http://mathworld.wolfram.com/HermitianOperator.html
"""
# TODO: Add examples
return Predicate('antihermitian')
@predicate_memo
def real(self):
r"""
Real number predicate.
``Q.real(x)`` is true iff ``x`` is a real number, i.e., it is in the
interval `(-\infty, \infty)`. Note that, in particular the infinities
are not real. Use ``Q.extended_real`` if you want to consider those as
well.
A few important facts about reals:
- Every real number is positive, negative, or zero. Furthermore,
because these sets are pairwise disjoint, each real number is exactly
one of those three.
- Every real number is also complex.
- Every real number is finite.
- Every real number is either rational or irrational.
- Every real number is either algebraic or transcendental.
- The facts ``Q.negative``, ``Q.zero``, ``Q.positive``,
``Q.nonnegative``, ``Q.nonpositive``, ``Q.nonzero``, ``Q.integer``,
``Q.rational``, and ``Q.irrational`` all imply ``Q.real``, as do all
facts that imply those facts.
- The facts ``Q.algebraic``, and ``Q.transcendental`` do not imply
``Q.real``; they imply ``Q.complex``. An algebraic or transcendental
number may or may not be real.
- The "non" facts (i.e., ``Q.nonnegative``, ``Q.nonzero``,
``Q.nonpositive`` and ``Q.noninteger``) are not equivalent to not the
fact, but rather, not the fact *and* ``Q.real``. For example,
``Q.nonnegative`` means ``~Q.negative & Q.real``. So for example,
``I`` is not nonnegative, nonzero, or nonpositive.
Examples
========
>>> from sympy import Q, ask, symbols
>>> x = symbols('x')
>>> ask(Q.real(x), Q.positive(x))
True
>>> ask(Q.real(0))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Real_number
"""
return Predicate('real')
@predicate_memo
def extended_real(self):
r"""
Extended real predicate.
``Q.extended_real(x)`` is true iff ``x`` is a real number or
`\{-\infty, \infty\}`.
See documentation of ``Q.real`` for more information about related facts.
Examples
========
>>> from sympy import ask, Q, oo, I
>>> ask(Q.extended_real(1))
True
>>> ask(Q.extended_real(I))
False
>>> ask(Q.extended_real(oo))
True
"""
return Predicate('extended_real')
@predicate_memo
def imaginary(self):
"""
Imaginary number predicate.
``Q.imaginary(x)`` is true iff ``x`` can be written as a real
number multiplied by the imaginary unit ``I``. Please note that ``0``
is not considered to be an imaginary number.
Examples
========
>>> from sympy import Q, ask, I
>>> ask(Q.imaginary(3*I))
True
>>> ask(Q.imaginary(2 + 3*I))
False
>>> ask(Q.imaginary(0))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Imaginary_number
"""
return Predicate('imaginary')
@predicate_memo
def complex(self):
"""
Complex number predicate.
``Q.complex(x)`` is true iff ``x`` belongs to the set of complex
numbers. Note that every complex number is finite.
Examples
========
>>> from sympy import Q, Symbol, ask, I, oo
>>> x = Symbol('x')
>>> ask(Q.complex(0))
True
>>> ask(Q.complex(2 + 3*I))
True
>>> ask(Q.complex(oo))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Complex_number
"""
return Predicate('complex')
@predicate_memo
def algebraic(self):
r"""
Algebraic number predicate.
``Q.algebraic(x)`` is true iff ``x`` belongs to the set of
algebraic numbers. ``x`` is algebraic if there is some polynomial
in ``p(x)\in \mathbb\{Q\}[x]`` such that ``p(x) = 0``.
Examples
========
>>> from sympy import ask, Q, sqrt, I, pi
>>> ask(Q.algebraic(sqrt(2)))
True
>>> ask(Q.algebraic(I))
True
>>> ask(Q.algebraic(pi))
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Algebraic_number
"""
return Predicate('algebraic')
@predicate_memo
def transcendental(self):
"""
Transcedental number predicate.
``Q.transcendental(x)`` is true iff ``x`` belongs to the set of
transcendental numbers. A transcendental number is a real
or complex number that is not algebraic.
"""
# TODO: Add examples
return Predicate('transcendental')
@predicate_memo
def integer(self):
"""
Integer predicate.
``Q.integer(x)`` is true iff ``x`` belongs to the set of integer numbers.
Examples
========
>>> from sympy import Q, ask, S
>>> ask(Q.integer(5))
True
>>> ask(Q.integer(S(1)/2))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Integer
"""
return Predicate('integer')
@predicate_memo
def rational(self):
"""
Rational number predicate.
``Q.rational(x)`` is true iff ``x`` belongs to the set of
rational numbers.
Examples
========
>>> from sympy import ask, Q, pi, S
>>> ask(Q.rational(0))
True
>>> ask(Q.rational(S(1)/2))
True
>>> ask(Q.rational(pi))
False
References
==========
https://en.wikipedia.org/wiki/Rational_number
"""
return Predicate('rational')
@predicate_memo
def irrational(self):
"""
Irrational number predicate.
``Q.irrational(x)`` is true iff ``x`` is any real number that
cannot be expressed as a ratio of integers.
Examples
========
>>> from sympy import ask, Q, pi, S, I
>>> ask(Q.irrational(0))
False
>>> ask(Q.irrational(S(1)/2))
False
>>> ask(Q.irrational(pi))
True
>>> ask(Q.irrational(I))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Irrational_number
"""
return Predicate('irrational')
@predicate_memo
def finite(self):
"""
Finite predicate.
``Q.finite(x)`` is true if ``x`` is neither an infinity
nor a ``NaN``. In other words, ``ask(Q.finite(x))`` is true for all ``x``
having a bounded absolute value.
Examples
========
>>> from sympy import Q, ask, Symbol, S, oo, I
>>> x = Symbol('x')
>>> ask(Q.finite(S.NaN))
False
>>> ask(Q.finite(oo))
False
>>> ask(Q.finite(1))
True
>>> ask(Q.finite(2 + 3*I))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite
"""
return Predicate('finite')
@predicate_memo
@deprecated(useinstead="finite", issue=9425, deprecated_since_version="0.7.7")
def bounded(self):
"""
See documentation of ``Q.finite``.
"""
return Predicate('finite')
@predicate_memo
def infinite(self):
"""
Infinite number predicate.
``Q.infinite(x)`` is true iff the absolute value of ``x`` is
infinity.
"""
# TODO: Add examples
return Predicate('infinite')
@predicate_memo
@deprecated(useinstead="infinite", issue=9426, deprecated_since_version="0.7.7")
def infinity(self):
"""
See documentation of ``Q.infinite``.
"""
return Predicate('infinite')
@predicate_memo
@deprecated(useinstead="zero", issue=9675, deprecated_since_version="0.7.7")
def infinitesimal(self):
"""
See documentation of ``Q.zero``.
"""
return Predicate('zero')
@predicate_memo
def positive(self):
r"""
Positive real number predicate.
``Q.positive(x)`` is true iff ``x`` is real and `x > 0`, that is if ``x``
is in the interval `(0, \infty)`. In particular, infinity is not
positive.
A few important facts about positive numbers:
- Note that ``Q.nonpositive`` and ``~Q.positive`` are *not* the same
thing. ``~Q.positive(x)`` simply means that ``x`` is not positive,
whereas ``Q.nonpositive(x)`` means that ``x`` is real and not
positive, i.e., ``Q.nonpositive(x)`` is logically equivalent to
`Q.negative(x) | Q.zero(x)``. So for example, ``~Q.positive(I)`` is
true, whereas ``Q.nonpositive(I)`` is false.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I
>>> x = symbols('x')
>>> ask(Q.positive(x), Q.real(x) & ~Q.negative(x) & ~Q.zero(x))
True
>>> ask(Q.positive(1))
True
>>> ask(Q.nonpositive(I))
False
>>> ask(~Q.positive(I))
True
"""
return Predicate('positive')
@predicate_memo
def negative(self):
r"""
Negative number predicate.
``Q.negative(x)`` is true iff ``x`` is a real number and :math:`x < 0`, that is,
it is in the interval :math:`(-\infty, 0)`. Note in particular that negative
infinity is not negative.
A few important facts about negative numbers:
- Note that ``Q.nonnegative`` and ``~Q.negative`` are *not* the same
thing. ``~Q.negative(x)`` simply means that ``x`` is not negative,
whereas ``Q.nonnegative(x)`` means that ``x`` is real and not
negative, i.e., ``Q.nonnegative(x)`` is logically equivalent to
``Q.zero(x) | Q.positive(x)``. So for example, ``~Q.negative(I)`` is
true, whereas ``Q.nonnegative(I)`` is false.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I
>>> x = symbols('x')
>>> ask(Q.negative(x), Q.real(x) & ~Q.positive(x) & ~Q.zero(x))
True
>>> ask(Q.negative(-1))
True
>>> ask(Q.nonnegative(I))
False
>>> ask(~Q.negative(I))
True
"""
return Predicate('negative')
@predicate_memo
def zero(self):
"""
Zero number predicate.
``ask(Q.zero(x))`` is true iff the value of ``x`` is zero.
Examples
========
>>> from sympy import ask, Q, oo, symbols
>>> x, y = symbols('x, y')
>>> ask(Q.zero(0))
True
>>> ask(Q.zero(1/oo))
True
>>> ask(Q.zero(0*oo))
False
>>> ask(Q.zero(1))
False
>>> ask(Q.zero(x*y), Q.zero(x) | Q.zero(y))
True
"""
return Predicate('zero')
@predicate_memo
def nonzero(self):
"""
Nonzero real number predicate.
``ask(Q.nonzero(x))`` is true iff ``x`` is real and ``x`` is not zero. Note in
particular that ``Q.nonzero(x)`` is false if ``x`` is not real. Use
``~Q.zero(x)`` if you want the negation of being zero without any real
assumptions.
A few important facts about nonzero numbers:
- ``Q.nonzero`` is logically equivalent to ``Q.positive | Q.negative``.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I, oo
>>> x = symbols('x')
>>> print(ask(Q.nonzero(x), ~Q.zero(x)))
None
>>> ask(Q.nonzero(x), Q.positive(x))
True
>>> ask(Q.nonzero(x), Q.zero(x))
False
>>> ask(Q.nonzero(0))
False
>>> ask(Q.nonzero(I)) # doctest: +SKIP
False
>>> ask(~Q.zero(I))
True
>>> ask(Q.nonzero(oo)) # doctest: +SKIP
False
"""
return Predicate('nonzero')
@predicate_memo
def nonpositive(self):
"""
Nonpositive real number predicate.
``ask(Q.nonpositive(x))`` is true iff ``x`` belongs to the set of
negative numbers including zero.
- Note that ``Q.nonpositive`` and ``~Q.positive`` are *not* the same
thing. ``~Q.positive(x)`` simply means that ``x`` is not positive,
whereas ``Q.nonpositive(x)`` means that ``x`` is real and not
positive, i.e., ``Q.nonpositive(x)`` is logically equivalent to
`Q.negative(x) | Q.zero(x)``. So for example, ``~Q.positive(I)`` is
true, whereas ``Q.nonpositive(I)`` is false.
Examples
========
>>> from sympy import Q, ask, I
>>> ask(Q.nonpositive(-1))
True
>>> ask(Q.nonpositive(0))
True
>>> ask(Q.nonpositive(1))
False
>>> ask(Q.nonpositive(I))
False
>>> ask(Q.nonpositive(-I))
False
"""
return Predicate('nonpositive')
@predicate_memo
def nonnegative(self):
"""
Nonnegative real number predicate.
``ask(Q.nonnegative(x))`` is true iff ``x`` belongs to the set of
positive numbers including zero.
- Note that ``Q.nonnegative`` and ``~Q.negative`` are *not* the same
thing. ``~Q.negative(x)`` simply means that ``x`` is not negative,
whereas ``Q.nonnegative(x)`` means that ``x`` is real and not
negative, i.e., ``Q.nonnegative(x)`` is logically equivalent to
``Q.zero(x) | Q.positive(x)``. So for example, ``~Q.negative(I)`` is
true, whereas ``Q.nonnegative(I)`` is false.
Examples
========
>>> from sympy import Q, ask, I
>>> ask(Q.nonnegative(1))
True
>>> ask(Q.nonnegative(0))
True
>>> ask(Q.nonnegative(-1))
False
>>> ask(Q.nonnegative(I))
False
>>> ask(Q.nonnegative(-I))
False
"""
return Predicate('nonnegative')
@predicate_memo
def even(self):
"""
Even number predicate.
``ask(Q.even(x))`` is true iff ``x`` belongs to the set of even
integers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.even(0))
True
>>> ask(Q.even(2))
True
>>> ask(Q.even(3))
False
>>> ask(Q.even(pi))
False
"""
return Predicate('even')
@predicate_memo
def odd(self):
"""
Odd number predicate.
``ask(Q.odd(x))`` is true iff ``x`` belongs to the set of odd numbers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.odd(0))
False
>>> ask(Q.odd(2))
False
>>> ask(Q.odd(3))
True
>>> ask(Q.odd(pi))
False
"""
return Predicate('odd')
@predicate_memo
def prime(self):
"""
Prime number predicate.
``ask(Q.prime(x))`` is true iff ``x`` is a natural number greater
than 1 that has no positive divisors other than ``1`` and the
number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.prime(0))
False
>>> ask(Q.prime(1))
False
>>> ask(Q.prime(2))
True
>>> ask(Q.prime(20))
False
>>> ask(Q.prime(-3))
False
"""
return Predicate('prime')
@predicate_memo
def composite(self):
"""
Composite number predicate.
``ask(Q.composite(x))`` is true iff ``x`` is a positive integer and has
at least one positive divisor other than ``1`` and the number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.composite(0))
False
>>> ask(Q.composite(1))
False
>>> ask(Q.composite(2))
False
>>> ask(Q.composite(20))
True
"""
return Predicate('composite')
@predicate_memo
def commutative(self):
"""
Commutative predicate.
``ask(Q.commutative(x))`` is true iff ``x`` commutes with any other
object with respect to multiplication operation.
"""
# TODO: Add examples
return Predicate('commutative')
@predicate_memo
def is_true(self):
"""
Generic predicate.
``ask(Q.is_true(x))`` is true iff ``x`` is true. This only makes
sense if ``x`` is a predicate.
Examples
========
>>> from sympy import ask, Q, symbols
>>> x = symbols('x')
>>> ask(Q.is_true(True))
True
"""
return Predicate('is_true')
@predicate_memo
def symmetric(self):
"""
Symmetric matrix predicate.
``Q.symmetric(x)`` is true iff ``x`` is a square matrix and is equal to
its transpose. Every square diagonal matrix is a symmetric matrix.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.symmetric(X*Z), Q.symmetric(X) & Q.symmetric(Z))
True
>>> ask(Q.symmetric(X + Z), Q.symmetric(X) & Q.symmetric(Z))
True
>>> ask(Q.symmetric(Y))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_matrix
"""
# TODO: Add handlers to make these keys work with
# actual matrices and add more examples in the docstring.
return Predicate('symmetric')
@predicate_memo
def invertible(self):
"""
Invertible matrix predicate.
``Q.invertible(x)`` is true iff ``x`` is an invertible matrix.
A square matrix is called invertible only if its determinant is 0.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.invertible(X*Y), Q.invertible(X))
False
>>> ask(Q.invertible(X*Z), Q.invertible(X) & Q.invertible(Z))
True
>>> ask(Q.invertible(X), Q.fullrank(X) & Q.square(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Invertible_matrix
"""
return Predicate('invertible')
@predicate_memo
def orthogonal(self):
"""
Orthogonal matrix predicate.
``Q.orthogonal(x)`` is true iff ``x`` is an orthogonal matrix.
A square matrix ``M`` is an orthogonal matrix if it satisfies
``M^TM = MM^T = I`` where ``M^T`` is the transpose matrix of
``M`` and ``I`` is an identity matrix. Note that an orthogonal
matrix is necessarily invertible.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.orthogonal(Y))
False
>>> ask(Q.orthogonal(X*Z*X), Q.orthogonal(X) & Q.orthogonal(Z))
True
>>> ask(Q.orthogonal(Identity(3)))
True
>>> ask(Q.invertible(X), Q.orthogonal(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Orthogonal_matrix
"""
return Predicate('orthogonal')
@predicate_memo
def unitary(self):
"""
Unitary matrix predicate.
``Q.unitary(x)`` is true iff ``x`` is a unitary matrix.
Unitary matrix is an analogue to orthogonal matrix. A square
matrix ``M`` with complex elements is unitary if :math:``M^TM = MM^T= I``
where :math:``M^T`` is the conjugate transpose matrix of ``M``.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.unitary(Y))
False
>>> ask(Q.unitary(X*Z*X), Q.unitary(X) & Q.unitary(Z))
True
>>> ask(Q.unitary(Identity(3)))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Unitary_matrix
"""
return Predicate('unitary')
@predicate_memo
def positive_definite(self):
r"""
Positive definite matrix predicate.
If ``M`` is a :math:``n \times n`` symmetric real matrix, it is said
to be positive definite if :math:`Z^TMZ` is positive for
every non-zero column vector ``Z`` of ``n`` real numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.positive_definite(Y))
False
>>> ask(Q.positive_definite(Identity(3)))
True
>>> ask(Q.positive_definite(X + Z), Q.positive_definite(X) &
... Q.positive_definite(Z))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Positive-definite_matrix
"""
return Predicate('positive_definite')
@predicate_memo
def upper_triangular(self):
"""
Upper triangular matrix predicate.
A matrix ``M`` is called upper triangular matrix if :math:`M_{ij}=0`
for :math:`i<j`.
Examples
========
>>> from sympy import Q, ask, ZeroMatrix, Identity
>>> ask(Q.upper_triangular(Identity(3)))
True
>>> ask(Q.upper_triangular(ZeroMatrix(3, 3)))
True
References
==========
.. [1] http://mathworld.wolfram.com/UpperTriangularMatrix.html
"""
return Predicate('upper_triangular')
@predicate_memo
def lower_triangular(self):
"""
Lower triangular matrix predicate.
A matrix ``M`` is called lower triangular matrix if :math:`a_{ij}=0`
for :math:`i>j`.
Examples
========
>>> from sympy import Q, ask, ZeroMatrix, Identity
>>> ask(Q.lower_triangular(Identity(3)))
True
>>> ask(Q.lower_triangular(ZeroMatrix(3, 3)))
True
References
==========
.. [1] http://mathworld.wolfram.com/LowerTriangularMatrix.html
"""
return Predicate('lower_triangular')
@predicate_memo
def diagonal(self):
"""
Diagonal matrix predicate.
``Q.diagonal(x)`` is true iff ``x`` is a diagonal matrix. A diagonal
matrix is a matrix in which the entries outside the main diagonal
are all zero.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix
>>> X = MatrixSymbol('X', 2, 2)
>>> ask(Q.diagonal(ZeroMatrix(3, 3)))
True
>>> ask(Q.diagonal(X), Q.lower_triangular(X) &
... Q.upper_triangular(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Diagonal_matrix
"""
return Predicate('diagonal')
@predicate_memo
def fullrank(self):
"""
Fullrank matrix predicate.
``Q.fullrank(x)`` is true iff ``x`` is a full rank matrix.
A matrix is full rank if all rows and columns of the matrix
are linearly independent. A square matrix is full rank iff
its determinant is nonzero.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> ask(Q.fullrank(X.T), Q.fullrank(X))
True
>>> ask(Q.fullrank(ZeroMatrix(3, 3)))
False
>>> ask(Q.fullrank(Identity(3)))
True
"""
return Predicate('fullrank')
@predicate_memo
def square(self):
"""
Square matrix predicate.
``Q.square(x)`` is true iff ``x`` is a square matrix. A square matrix
is a matrix with the same number of rows and columns.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('X', 2, 3)
>>> ask(Q.square(X))
True
>>> ask(Q.square(Y))
False
>>> ask(Q.square(ZeroMatrix(3, 3)))
True
>>> ask(Q.square(Identity(3)))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Square_matrix
"""
return Predicate('square')
@predicate_memo
def integer_elements(self):
"""
Integer elements matrix predicate.
``Q.integer_elements(x)`` is true iff all the elements of ``x``
are integers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.integer(X[1, 2]), Q.integer_elements(X))
True
"""
return Predicate('integer_elements')
@predicate_memo
def real_elements(self):
"""
Real elements matrix predicate.
``Q.real_elements(x)`` is true iff all the elements of ``x``
are real numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.real(X[1, 2]), Q.real_elements(X))
True
"""
return Predicate('real_elements')
@predicate_memo
def complex_elements(self):
"""
Complex elements matrix predicate.
``Q.complex_elements(x)`` is true iff all the elements of ``x``
are complex numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.complex(X[1, 2]), Q.complex_elements(X))
True
>>> ask(Q.complex_elements(X), Q.integer_elements(X))
True
"""
return Predicate('complex_elements')
@predicate_memo
def singular(self):
"""
Singular matrix predicate.
A matrix is singular iff the value of its determinant is 0.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.singular(X), Q.invertible(X))
False
>>> ask(Q.singular(X), ~Q.invertible(X))
True
References
==========
.. [1] http://mathworld.wolfram.com/SingularMatrix.html
"""
return Predicate('singular')
@predicate_memo
def normal(self):
"""
Normal matrix predicate.
A matrix is normal if it commutes with its conjugate transpose.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.normal(X), Q.unitary(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal_matrix
"""
return Predicate('normal')
@predicate_memo
def triangular(self):
"""
Triangular matrix predicate.
``Q.triangular(X)`` is true if ``X`` is one that is either lower
triangular or upper triangular.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.triangular(X), Q.upper_triangular(X))
True
>>> ask(Q.triangular(X), Q.lower_triangular(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Triangular_matrix
"""
return Predicate('triangular')
@predicate_memo
def unit_triangular(self):
"""
Unit triangular matrix predicate.
A unit triangular matrix is a triangular matrix with 1s
on the diagonal.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.triangular(X), Q.unit_triangular(X))
True
"""
return Predicate('unit_triangular')
Q = AssumptionKeys()
def _extract_facts(expr, symbol):
"""
Helper for ask().
Extracts the facts relevant to the symbol from an assumption.
Returns None if there is nothing to extract.
"""
if isinstance(expr, bool):
return
if not expr.has(symbol):
return
if isinstance(expr, AppliedPredicate):
if expr.arg == symbol:
return expr.func
else:
return
if isinstance(expr, Not) and expr.args[0].func in (And, Or):
cls = Or if expr.args[0] == And else And
expr = cls(*[~arg for arg in expr.args[0].args])
args = [_extract_facts(arg, symbol) for arg in expr.args]
if isinstance(expr, And):
args = [x for x in args if x is not None]
if args:
return expr.func(*args)
if args and all(x != None for x in args):
return expr.func(*args)
def ask(proposition, assumptions=True, context=global_assumptions):
"""
Method for inferring properties about objects.
**Syntax**
* ask(proposition)
* ask(proposition, assumptions)
where ``proposition`` is any boolean expression
Examples
========
>>> from sympy import ask, Q, pi
>>> from sympy.abc import x, y
>>> ask(Q.rational(pi))
False
>>> ask(Q.even(x*y), Q.even(x) & Q.integer(y))
True
>>> ask(Q.prime(x*y), Q.integer(x) & Q.integer(y))
False
**Remarks**
Relations in assumptions are not implemented (yet), so the following
will not give a meaningful result.
>>> ask(Q.positive(x), Q.is_true(x > 0)) # doctest: +SKIP
It is however a work in progress.
"""
from sympy.assumptions.satask import satask
if not isinstance(proposition, (BooleanFunction, AppliedPredicate, bool, BooleanAtom)):
raise TypeError("proposition must be a valid logical expression")
if not isinstance(assumptions, (BooleanFunction, AppliedPredicate, bool, BooleanAtom)):
raise TypeError("assumptions must be a valid logical expression")
if isinstance(proposition, AppliedPredicate):
key, expr = proposition.func, sympify(proposition.arg)
else:
key, expr = Q.is_true, sympify(proposition)
assumptions = And(assumptions, And(*context))
assumptions = to_cnf(assumptions)
local_facts = _extract_facts(assumptions, expr)
known_facts_cnf = get_known_facts_cnf()
known_facts_dict = get_known_facts_dict()
if local_facts and satisfiable(And(local_facts, known_facts_cnf)) is False:
raise ValueError("inconsistent assumptions %s" % assumptions)
# direct resolution method, no logic
res = key(expr)._eval_ask(assumptions)
if res is not None:
return bool(res)
if local_facts is None:
return satask(proposition, assumptions=assumptions, context=context)
# See if there's a straight-forward conclusion we can make for the inference
if local_facts.is_Atom:
if key in known_facts_dict[local_facts]:
return True
if Not(key) in known_facts_dict[local_facts]:
return False
elif (local_facts.func is And and
all(k in known_facts_dict for k in local_facts.args)):
for assum in local_facts.args:
if assum.is_Atom:
if key in known_facts_dict[assum]:
return True
if Not(key) in known_facts_dict[assum]:
return False
elif assum.func is Not and assum.args[0].is_Atom:
if key in known_facts_dict[assum]:
return False
if Not(key) in known_facts_dict[assum]:
return True
elif (isinstance(key, Predicate) and
local_facts.func is Not and local_facts.args[0].is_Atom):
if local_facts.args[0] in known_facts_dict[key]:
return False
# Failing all else, we do a full logical inference
res = ask_full_inference(key, local_facts, known_facts_cnf)
if res is None:
return satask(proposition, assumptions=assumptions, context=context)
return res
def ask_full_inference(proposition, assumptions, known_facts_cnf):
"""
Method for inferring properties about objects.
"""
if not satisfiable(And(known_facts_cnf, assumptions, proposition)):
return False
if not satisfiable(And(known_facts_cnf, assumptions, Not(proposition))):
return True
return None
def register_handler(key, handler):
"""
Register a handler in the ask system. key must be a string and handler a
class inheriting from AskHandler::
>>> from sympy.assumptions import register_handler, ask, Q
>>> from sympy.assumptions.handlers import AskHandler
>>> class MersenneHandler(AskHandler):
... # Mersenne numbers are in the form 2**n + 1, n integer
... @staticmethod
... def Integer(expr, assumptions):
... import math
... return ask(Q.integer(math.log(expr + 1, 2)))
>>> register_handler('mersenne', MersenneHandler)
>>> ask(Q.mersenne(7))
True
"""
if type(key) is Predicate:
key = key.name
try:
getattr(Q, key).add_handler(handler)
except AttributeError:
setattr(Q, key, Predicate(key, handlers=[handler]))
def remove_handler(key, handler):
"""Removes a handler from the ask system. Same syntax as register_handler"""
if type(key) is Predicate:
key = key.name
getattr(Q, key).remove_handler(handler)
def single_fact_lookup(known_facts_keys, known_facts_cnf):
# Compute the quick lookup for single facts
mapping = {}
for key in known_facts_keys:
mapping[key] = set([key])
for other_key in known_facts_keys:
if other_key != key:
if ask_full_inference(other_key, key, known_facts_cnf):
mapping[key].add(other_key)
return mapping
def compute_known_facts(known_facts, known_facts_keys):
"""Compute the various forms of knowledge compilation used by the
assumptions system.
This function is typically applied to the results of the ``get_known_facts``
and ``get_known_facts_keys`` functions defined at the bottom of
this file.
"""
from textwrap import dedent, wrap
fact_string = dedent('''\
"""
The contents of this file are the return value of
``sympy.assumptions.ask.compute_known_facts``.
Do NOT manually edit this file.
Instead, run ./bin/ask_update.py.
"""
from sympy.core.cache import cacheit
from sympy.logic.boolalg import And, Not, Or
from sympy.assumptions.ask import Q
# -{ Known facts in Conjunctive Normal Form }-
@cacheit
def get_known_facts_cnf():
return And(
%s
)
# -{ Known facts in compressed sets }-
@cacheit
def get_known_facts_dict():
return {
%s
}
''')
# Compute the known facts in CNF form for logical inference
LINE = ",\n "
HANG = ' '*8
cnf = to_cnf(known_facts)
c = LINE.join([str(a) for a in cnf.args])
mapping = single_fact_lookup(known_facts_keys, cnf)
items = sorted(mapping.items(), key=str)
keys = [str(i[0]) for i in items]
values = ['set(%s)' % sorted(i[1], key=str) for i in items]
m = LINE.join(['\n'.join(
wrap("%s: %s" % (k, v),
subsequent_indent=HANG,
break_long_words=False))
for k, v in zip(keys, values)]) + ','
return fact_string % (c, m)
# handlers tells us what ask handler we should use
# for a particular key
_val_template = 'sympy.assumptions.handlers.%s'
_handlers = [
("antihermitian", "sets.AskAntiHermitianHandler"),
("finite", "calculus.AskFiniteHandler"),
("commutative", "AskCommutativeHandler"),
("complex", "sets.AskComplexHandler"),
("composite", "ntheory.AskCompositeHandler"),
("even", "ntheory.AskEvenHandler"),
("extended_real", "sets.AskExtendedRealHandler"),
("hermitian", "sets.AskHermitianHandler"),
("imaginary", "sets.AskImaginaryHandler"),
("integer", "sets.AskIntegerHandler"),
("irrational", "sets.AskIrrationalHandler"),
("rational", "sets.AskRationalHandler"),
("negative", "order.AskNegativeHandler"),
("nonzero", "order.AskNonZeroHandler"),
("nonpositive", "order.AskNonPositiveHandler"),
("nonnegative", "order.AskNonNegativeHandler"),
("zero", "order.AskZeroHandler"),
("positive", "order.AskPositiveHandler"),
("prime", "ntheory.AskPrimeHandler"),
("real", "sets.AskRealHandler"),
("odd", "ntheory.AskOddHandler"),
("algebraic", "sets.AskAlgebraicHandler"),
("is_true", "common.TautologicalHandler"),
("symmetric", "matrices.AskSymmetricHandler"),
("invertible", "matrices.AskInvertibleHandler"),
("orthogonal", "matrices.AskOrthogonalHandler"),
("unitary", "matrices.AskUnitaryHandler"),
("positive_definite", "matrices.AskPositiveDefiniteHandler"),
("upper_triangular", "matrices.AskUpperTriangularHandler"),
("lower_triangular", "matrices.AskLowerTriangularHandler"),
("diagonal", "matrices.AskDiagonalHandler"),
("fullrank", "matrices.AskFullRankHandler"),
("square", "matrices.AskSquareHandler"),
("integer_elements", "matrices.AskIntegerElementsHandler"),
("real_elements", "matrices.AskRealElementsHandler"),
("complex_elements", "matrices.AskComplexElementsHandler"),
]
for name, value in _handlers:
register_handler(name, _val_template % value)
@cacheit
def get_known_facts_keys():
return [
getattr(Q, attr)
for attr in Q.__class__.__dict__
if not (attr.startswith('__') or
attr in deprecated_predicates)]
@cacheit
def get_known_facts():
return And(
Implies(Q.infinite, ~Q.finite),
Implies(Q.real, Q.complex),
Implies(Q.real, Q.hermitian),
Equivalent(Q.extended_real, Q.real | Q.infinite),
Equivalent(Q.even | Q.odd, Q.integer),
Implies(Q.even, ~Q.odd),
Equivalent(Q.prime, Q.integer & Q.positive & ~Q.composite),
Implies(Q.integer, Q.rational),
Implies(Q.rational, Q.algebraic),
Implies(Q.algebraic, Q.complex),
Equivalent(Q.transcendental | Q.algebraic, Q.complex),
Implies(Q.transcendental, ~Q.algebraic),
Implies(Q.imaginary, Q.complex & ~Q.real),
Implies(Q.imaginary, Q.antihermitian),
Implies(Q.antihermitian, ~Q.hermitian),
Equivalent(Q.irrational | Q.rational, Q.real),
Implies(Q.irrational, ~Q.rational),
Implies(Q.zero, Q.even),
Equivalent(Q.real, Q.negative | Q.zero | Q.positive),
Implies(Q.zero, ~Q.negative & ~Q.positive),
Implies(Q.negative, ~Q.positive),
Equivalent(Q.nonnegative, Q.zero | Q.positive),
Equivalent(Q.nonpositive, Q.zero | Q.negative),
Equivalent(Q.nonzero, Q.negative | Q.positive),
Implies(Q.orthogonal, Q.positive_definite),
Implies(Q.orthogonal, Q.unitary),
Implies(Q.unitary & Q.real, Q.orthogonal),
Implies(Q.unitary, Q.normal),
Implies(Q.unitary, Q.invertible),
Implies(Q.normal, Q.square),
Implies(Q.diagonal, Q.normal),
Implies(Q.positive_definite, Q.invertible),
Implies(Q.diagonal, Q.upper_triangular),
Implies(Q.diagonal, Q.lower_triangular),
Implies(Q.lower_triangular, Q.triangular),
Implies(Q.upper_triangular, Q.triangular),
Implies(Q.triangular, Q.upper_triangular | Q.lower_triangular),
Implies(Q.upper_triangular & Q.lower_triangular, Q.diagonal),
Implies(Q.diagonal, Q.symmetric),
Implies(Q.unit_triangular, Q.triangular),
Implies(Q.invertible, Q.fullrank),
Implies(Q.invertible, Q.square),
Implies(Q.symmetric, Q.square),
Implies(Q.fullrank & Q.square, Q.invertible),
Equivalent(Q.invertible, ~Q.singular),
Implies(Q.integer_elements, Q.real_elements),
Implies(Q.real_elements, Q.complex_elements),
)
from sympy.assumptions.ask_generated import (
get_known_facts_dict, get_known_facts_cnf)
|
|
#!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Exports AppSearch Androidx code to Framework
#
# NOTE: This will remove and replace all files in the
# packages/modules/AppSearch path.
#
# Example usage (from root dir of androidx workspace):
# $ ./frameworks/support/appsearch/exportToFramework.py "$HOME/android/master" "<jetpack git sha>"
# Special directives supported by this script:
#
# Causes the file where it appears to not be copied at all:
# @exportToFramework:skipFile()
#
# Causes the text appearing between startStrip() and endStrip() to be removed during export:
# // @exportToFramework:startStrip() ... // @exportToFramework:endStrip()
#
# Replaced with @hide:
# <!--@exportToFramework:hide-->
#
# Replaced with @CurrentTimeMillisLong:
# /*@exportToFramework:CurrentTimeMillisLong*/
#
# Removes the text appearing between ifJetpack() and else(), and causes the text appearing between
# else() and --> to become uncommented, to support framework-only Javadocs:
# <!--@exportToFramework:ifJetpack()-->
# Jetpack-only Javadoc
# <!--@exportToFramework:else()
# Framework-only Javadoc
# -->
# Note: Using the above pattern, you can hide a method in Jetpack but unhide it in Framework like
# this:
# <!--@exportToFramework:ifJetpack()-->@hide<!--@exportToFramework:else()-->
import os
import re
import subprocess
import sys
# Jetpack paths relative to frameworks/support/appsearch
JETPACK_API_ROOT = 'appsearch/src/main/java/androidx/appsearch'
JETPACK_API_TEST_ROOT = 'appsearch/src/androidTest/java/androidx/appsearch'
JETPACK_IMPL_ROOT = 'appsearch-local-storage/src/main/java/androidx/appsearch'
JETPACK_IMPL_TEST_ROOT = 'appsearch-local-storage/src/androidTest/java/androidx/appsearch'
JETPACK_TEST_UTIL_ROOT = 'appsearch-test-util/src/main/java/androidx/appsearch'
JETPACK_TEST_UTIL_TEST_ROOT = 'appsearch-test-util/src/androidTest/java/androidx/appsearch'
# Framework paths relative to packages/modules/AppSearch
FRAMEWORK_API_ROOT = 'framework/java/external/android/app/appsearch'
FRAMEWORK_API_TEST_ROOT = 'testing/coretests/src/android/app/appsearch/external'
FRAMEWORK_IMPL_ROOT = 'service/java/com/android/server/appsearch/external'
FRAMEWORK_IMPL_TEST_ROOT = 'testing/servicestests/src/com/android/server/appsearch/external'
FRAMEWORK_TEST_UTIL_ROOT = 'testing/testutils/src/android/app/appsearch/testutil/external'
FRAMEWORK_TEST_UTIL_TEST_ROOT = 'testing/servicestests/src/android/app/appsearch/testutil/external'
FRAMEWORK_CTS_TEST_ROOT = '../../../cts/tests/appsearch/src/com/android/cts/appsearch/external'
GOOGLE_JAVA_FORMAT = (
'../../../prebuilts/tools/common/google-java-format/google-java-format')
# Miscellaneous constants
CHANGEID_FILE_NAME = 'synced_jetpack_changeid.txt'
class ExportToFramework:
def __init__(self, jetpack_appsearch_root, framework_appsearch_root):
self._jetpack_appsearch_root = jetpack_appsearch_root
self._framework_appsearch_root = framework_appsearch_root
self._written_files = []
def _PruneDir(self, dir_to_prune):
for walk_path, walk_folders, walk_files in os.walk(dir_to_prune):
for walk_filename in walk_files:
abs_path = os.path.join(walk_path, walk_filename)
print('Prune: remove "%s"' % abs_path)
os.remove(abs_path)
def _TransformAndCopyFile(
self, source_path, dest_path, transform_func=None, ignore_skips=False):
with open(source_path, 'r') as fh:
contents = fh.read()
if not ignore_skips and '@exportToFramework:skipFile()' in contents:
print('Skipping: "%s" -> "%s"' % (source_path, dest_path), file=sys.stderr)
return
print('Copy: "%s" -> "%s"' % (source_path, dest_path), file=sys.stderr)
if transform_func:
contents = transform_func(contents)
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
with open(dest_path, 'w') as fh:
fh.write(contents)
# Save file for future formatting
self._written_files.append(dest_path)
def _TransformCommonCode(self, contents):
# Apply stripping
contents = re.sub(
r'\/\/ @exportToFramework:startStrip\(\).*?\/\/ @exportToFramework:endStrip\(\)',
'',
contents,
flags=re.DOTALL)
# Apply if/elses in javadocs
contents = re.sub(
r'<!--@exportToFramework:ifJetpack\(\)-->.*?<!--@exportToFramework:else\(\)(.*?)-->',
r'\1',
contents,
flags=re.DOTALL)
# Add additional imports if required
imports_to_add = []
if '@exportToFramework:CurrentTimeMillisLong' in contents:
imports_to_add.append('android.annotation.CurrentTimeMillisLong')
if '@exportToFramework:UnsupportedAppUsage' in contents:
imports_to_add.append('android.compat.annotation.UnsupportedAppUsage')
for import_to_add in imports_to_add:
contents = re.sub(
r'^(\s*package [^;]+;\s*)$', r'\1\nimport %s;\n' % import_to_add, contents,
flags=re.MULTILINE)
# Apply in-place replacements
return (contents
.replace('androidx.appsearch.app', 'android.app.appsearch')
.replace(
'androidx.appsearch.localstorage.',
'com.android.server.appsearch.external.localstorage.')
.replace('androidx.appsearch', 'android.app.appsearch')
.replace(
'androidx.annotation.GuardedBy',
'com.android.internal.annotations.GuardedBy')
.replace(
'androidx.annotation.VisibleForTesting',
'com.android.internal.annotations.VisibleForTesting')
.replace('androidx.annotation.', 'android.annotation.')
.replace('androidx.collection.ArrayMap', 'android.util.ArrayMap')
.replace('androidx.collection.ArraySet', 'android.util.ArraySet')
.replace(
'androidx.core.util.ObjectsCompat',
'java.util.Objects')
# Preconditions.checkNotNull is replaced with Objects.requireNonNull. We add both
# imports and let google-java-format sort out which one is unused.
.replace(
'import androidx.core.util.Preconditions;',
'import java.util.Objects; import com.android.internal.util.Preconditions;')
.replace('import androidx.annotation.RestrictTo;', '')
.replace('@RestrictTo(RestrictTo.Scope.LIBRARY)', '')
.replace('@RestrictTo(RestrictTo.Scope.LIBRARY_GROUP)', '')
.replace('Preconditions.checkNotNull(', 'Objects.requireNonNull(')
.replace('ObjectsCompat.', 'Objects.')
.replace('/*@exportToFramework:CurrentTimeMillisLong*/', '@CurrentTimeMillisLong')
.replace('/*@exportToFramework:UnsupportedAppUsage*/', '@UnsupportedAppUsage')
.replace('<!--@exportToFramework:hide-->', '@hide')
.replace('// @exportToFramework:skipFile()', '')
)
def _TransformTestCode(self, contents):
contents = (contents
.replace('androidx.appsearch.testutil.', 'android.app.appsearch.testutil.')
.replace(
'package androidx.appsearch.testutil;',
'package android.app.appsearch.testutil;')
.replace(
'androidx.appsearch.localstorage.LocalStorage',
'android.app.appsearch.AppSearchManager')
.replace('LocalStorage.', 'AppSearchManager.')
)
for shim in ['AppSearchSession', 'GlobalSearchSession', 'SearchResults']:
contents = re.sub(r"([^a-zA-Z])(%s)([^a-zA-Z0-9])" % shim, r'\1\2Shim\3', contents)
return self._TransformCommonCode(contents)
def _TransformAndCopyFolder(self, source_dir, dest_dir, transform_func=None):
for currentpath, folders, files in os.walk(source_dir):
dir_rel_to_root = os.path.relpath(currentpath, source_dir)
for filename in files:
source_abs_path = os.path.join(currentpath, filename)
dest_path = os.path.join(dest_dir, dir_rel_to_root, filename)
self._TransformAndCopyFile(source_abs_path, dest_path, transform_func)
def _ExportApiCode(self):
# Prod source
api_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_API_ROOT)
api_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_API_ROOT)
# Unit tests
api_test_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_API_TEST_ROOT)
api_test_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_API_TEST_ROOT)
# CTS tests
cts_test_source_dir = os.path.join(api_test_source_dir, 'cts')
cts_test_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_CTS_TEST_ROOT)
# Test utils
test_util_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_TEST_UTIL_ROOT)
test_util_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_TEST_UTIL_ROOT)
# Prune existing files
self._PruneDir(api_dest_dir)
self._PruneDir(api_test_dest_dir)
self._PruneDir(cts_test_dest_dir)
self._PruneDir(test_util_dest_dir)
# Copy api classes. We can't use _TransformAndCopyFolder here because we
# need to specially handle the 'app' package.
print('~~~ Copying API classes ~~~')
def _TransformApiCode(contents):
contents = contents.replace(
'package androidx.appsearch.app;',
'package android.app.appsearch;')
return self._TransformCommonCode(contents)
for currentpath, folders, files in os.walk(api_source_dir):
dir_rel_to_root = os.path.relpath(currentpath, api_source_dir)
for filename in files:
# Figure out what folder to place them into
source_abs_path = os.path.join(currentpath, filename)
if dir_rel_to_root == 'app':
# Files in the 'app' folder live in the root of the platform tree
dest_path = os.path.join(api_dest_dir, filename)
else:
dest_path = os.path.join(api_dest_dir, dir_rel_to_root, filename)
self._TransformAndCopyFile(source_abs_path, dest_path, _TransformApiCode)
# Copy api unit tests. We can't use _TransformAndCopyFolder here because we need to skip the
# 'util' and 'cts' subfolders.
print('~~~ Copying API unit tests ~~~')
for currentpath, folders, files in os.walk(api_test_source_dir):
if (currentpath.startswith(cts_test_source_dir) or
currentpath.startswith(test_util_source_dir)):
continue
dir_rel_to_root = os.path.relpath(currentpath, api_test_source_dir)
for filename in files:
source_abs_path = os.path.join(currentpath, filename)
dest_path = os.path.join(api_test_dest_dir, dir_rel_to_root, filename)
self._TransformAndCopyFile(source_abs_path, dest_path, self._TransformTestCode)
# Copy CTS tests
print('~~~ Copying CTS tests ~~~')
self._TransformAndCopyFolder(
cts_test_source_dir, cts_test_dest_dir, transform_func=self._TransformTestCode)
# Copy test utils
print('~~~ Copying test utils ~~~')
self._TransformAndCopyFolder(
test_util_source_dir, test_util_dest_dir, transform_func=self._TransformTestCode)
for iface_file in (
'AppSearchSession.java', 'GlobalSearchSession.java', 'SearchResults.java'):
dest_file_name = os.path.splitext(iface_file)[0] + 'Shim.java'
self._TransformAndCopyFile(
os.path.join(api_source_dir, 'app/' + iface_file),
os.path.join(test_util_dest_dir, dest_file_name),
transform_func=self._TransformTestCode,
ignore_skips=True)
def _ExportImplCode(self):
impl_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_IMPL_ROOT)
impl_test_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_IMPL_TEST_ROOT)
impl_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_IMPL_ROOT)
impl_test_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_IMPL_TEST_ROOT)
test_util_test_source_dir = os.path.join(
self._jetpack_appsearch_root, JETPACK_TEST_UTIL_TEST_ROOT)
test_util_test_dest_dir = os.path.join(
self._framework_appsearch_root, FRAMEWORK_TEST_UTIL_TEST_ROOT)
# Prune
self._PruneDir(impl_dest_dir)
self._PruneDir(impl_test_dest_dir)
self._PruneDir(test_util_test_dest_dir)
# Copy impl classes
def _TransformImplCode(contents):
contents = (contents
.replace('package androidx.appsearch',
'package com.android.server.appsearch.external')
.replace('com.google.android.icing.protobuf.', 'com.google.protobuf.')
)
return self._TransformCommonCode(contents)
self._TransformAndCopyFolder(
impl_source_dir, impl_dest_dir, transform_func=_TransformImplCode)
# Copy servicestests
def _TransformImplTestCode(contents):
contents = (contents
.replace('package androidx.appsearch',
'package com.android.server.appsearch.external')
.replace('com.google.android.icing.proto.',
'com.android.server.appsearch.icing.proto.')
.replace('com.google.android.icing.protobuf.',
'com.android.server.appsearch.protobuf.')
)
return self._TransformTestCode(contents)
self._TransformAndCopyFolder(
impl_test_source_dir, impl_test_dest_dir, transform_func=_TransformImplTestCode)
self._TransformAndCopyFolder(
test_util_test_source_dir,
test_util_test_dest_dir,
transform_func=self._TransformTestCode)
def _FormatWrittenFiles(self):
google_java_format_cmd = [GOOGLE_JAVA_FORMAT, '--aosp', '-i'] + self._written_files
print('$ ' + ' '.join(google_java_format_cmd))
subprocess.check_call(google_java_format_cmd, cwd=self._framework_appsearch_root)
def ExportCode(self):
self._ExportApiCode()
self._ExportImplCode()
self._FormatWrittenFiles()
def WriteChangeIdFile(self, changeid):
"""Copies the changeid of the most recent public CL into a file on the framework side.
This file is used for tracking, to determine what framework is synced to.
You must always provide a changeid of an exported, preferably even submitted CL. If you
abandon the CL pointed to by this changeid, the next person syncing framework will be unable
to find what CL it is synced to.
"""
file_path = os.path.join(self._framework_appsearch_root, CHANGEID_FILE_NAME)
with open(file_path, 'w') as fh:
print(changeid, file=fh)
print('Wrote "%s"' % file_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <path/to/framework/checkout> <git sha of head jetpack commit>' % (
sys.argv[0]),
file=sys.stderr)
sys.exit(1)
source_dir = os.path.normpath(os.path.dirname(sys.argv[0]))
dest_dir = os.path.normpath(sys.argv[1])
dest_dir = os.path.join(dest_dir, 'packages/modules/AppSearch')
if not os.path.isdir(dest_dir):
print('Destination path "%s" does not exist or is not a directory' % (
dest_dir),
file=sys.stderr)
sys.exit(1)
exporter = ExportToFramework(source_dir, dest_dir)
exporter.ExportCode()
exporter.WriteChangeIdFile(sys.argv[2])
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
# NOTE(dhellmann): Use stdlib logging instead of oslo.log because we
# need to call methods on the logger that are not exposed through the
# adapter provided by oslo.log.
import logging
import os
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
LOG = logging.getLogger(__name__)
class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 2
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
super(NovaMigrationsCheckers, self).setUp()
# NOTE(viktors): We should reduce log output because it causes issues,
# when we run tests with testr
migrate_log = logging.getLogger('migrate')
old_level = migrate_log.level
migrate_log.setLevel(logging.WARN)
self.addCleanup(migrate_log.setLevel, old_level)
# NOTE(rpodolyaka): we need to repeat the functionality of the base
# test case a bit here as this gets overriden by oslotest base test
# case and nova base test case cleanup must be the last one (as it
# deletes attributes of test case instances)
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s does not exist' % (table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s should not exist' % (table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s does not exist' %
(index, table_name))
def assertIndexNotExists(self, engine, table_name, index):
self.assertFalse(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s should not exist' %
(index, table_name))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
# Implementations for ModelsMigrationsSync
def db_sync(self, engine):
with mock.patch.object(sa_migration, 'get_engine',
return_value=engine):
sa_migration.db_sync()
def get_engine(self):
return self.migrate_engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model. shadow_* are generated from
# the model and have their own tests to ensure they don't
# drift.
if name == 'migrate_version' or name.startswith('shadow_'):
return False
return True
def _skippable_migrations(self):
special = [
216, # Havana
272, # NOOP migration due to revert
]
havana_placeholders = range(217, 227)
icehouse_placeholders = range(235, 244)
juno_placeholders = range(255, 265)
kilo_placeholders = range(281, 291)
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders +
kilo_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
# Drop a variety of unique constraints to ensure that the script
# properly readds them back
for table_name, constraint_name in [
('compute_nodes', 'uniq_compute_nodes0'
'host0hypervisor_hostname'),
('fixed_ips', 'uniq_fixed_ips0address0deleted'),
('instance_info_caches', 'uniq_instance_info_caches0'
'instance_uuid'),
('instance_type_projects', 'uniq_instance_type_projects0'
'instance_type_id0project_id0'
'deleted'),
('pci_devices', 'uniq_pci_devices0compute_node_id0'
'address0deleted'),
('virtual_interfaces', 'uniq_virtual_interfaces0'
'address0deleted')]:
table = oslodbutils.get_table(engine, table_name)
constraints = [c for c in table.constraints
if c.name == constraint_name]
for cons in constraints:
# Need to use sqlalchemy-migrate UniqueConstraint
cons = UniqueConstraint(*[c.name for c in cons.columns],
name=cons.name,
table=table)
cons.drop()
def _check_273(self, engine, data):
for src_table, src_column, dst_table, dst_column in [
('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
('instance_system_metadata', 'instance_uuid',
'instances', 'uuid'),
('instance_type_projects', 'instance_type_id',
'instance_types', 'id'),
('iscsi_targets', 'volume_id', 'volumes', 'id'),
('reservations', 'usage_id', 'quota_usages', 'id'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
('compute_nodes', 'service_id', 'services', 'id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid')]:
src_table = oslodbutils.get_table(engine, src_table)
fkeys = {fk.parent.name: fk.column
for fk in src_table.foreign_keys}
self.assertIn(src_column, fkeys)
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
# entries.
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
fake_keypair = {'name': 'test-migr'}
key_pairs.insert().execute(fake_keypair)
def _check_275(self, engine, data):
self.assertColumnExists(engine, 'key_pairs', 'type')
self.assertColumnExists(engine, 'shadow_key_pairs', 'type')
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
shadow_key_pairs = oslodbutils.get_table(engine, 'shadow_key_pairs')
self.assertIsInstance(key_pairs.c.type.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_key_pairs.c.type.type,
sqlalchemy.types.String)
# Make sure the keypair entry will have the type 'ssh'
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
keypair = key_pairs.select(
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _check_291(self, engine, data):
# NOTE(danms): This is a dummy migration that just does a consistency
# check
pass
def _check_292(self, engine, data):
self.assertTableNotExists(engine, 'iscsi_targets')
self.assertTableNotExists(engine, 'volumes')
self.assertTableNotExists(engine, 'shadow_iscsi_targets')
self.assertTableNotExists(engine, 'shadow_volumes')
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_no_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
includes_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
includes_downgrade.append(fname)
helpful_msg = ("The following migrations have a downgrade "
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)
|
|
import tensorflow as tf
import numpy as np
import time
import h5py
# import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from copy import deepcopy
import os
import os.path
from collections import OrderedDict
import pickle
# import cPickle as pickle
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
class cnnMNIST(object):
def __init__(self):
self.lr = 1e-3
self.epochs = 1000
self.build_graph()
def onehot_labels(self, labels):
out = np.zeros((labels.shape[0], 2))
for i in range(labels.shape[0]):
out[i, :] = np.eye(2, dtype=int)[int(labels[i])]
return out
def onenothot_labels(self, labels):
out = np.zeros((labels.shape[0],))
for i in range(labels.shape[0]):
out[i] = np.argmax(labels[i, :])
return out
def get_data(self):
# data_norm = True
# data_augmentation = False
f = h5py.File('./sequential_dataset.h5', 'r')
X = f['train']
X_test = f['test']
self.x_train = X
self.x_test = X_test
# NOTE: always use the keylist to get data
self.data_keylist = list(X.keys())
return
def batch(self, iterable, n=1, shuffle=True, small_test=True, usethesekeys = None, shortset=False):
if shuffle:
self.shuffle()
if usethesekeys is None:
keylist = self.data_keylist
else:
keylist = usethesekeys
if shortset:
keylist = usethesekeys[:100]
# l = len(iterable)
for i in range(len(keylist)):
x = np.array(iterable[keylist[i]]['measured_spectra'])
y = np.array(iterable[keylist[i]]['labels'])
mask = y >= 0.5
z = np.ones((y.shape[0],))
z[mask] = 100000.0
y = self.onehot_labels(y)
yield x, y, z
def validation_batcher(self):
f = h5py.File('./sequential_dataset_validation.h5', 'r')
# f = h5py.File('/home/holiestcow/Documents/2017_fall/ne697_hayward/lecture/datacompetition/sequential_dataset_validation.h5', 'r')
samplelist = list(f.keys())
# samplelist = samplelist[:10]
for i in range(len(samplelist)):
data = f[samplelist[i]]
yield data
def build_graph(self):
self.x = tf.placeholder(tf.float32, shape=[None, 15, 1024])
# [batch, height, width, channels]
self.y_ = tf.placeholder(tf.float32, shape=[None, 2])
# self.weights = tf.placeholder(tf.float32, shape=[None])
# x_image = self.hack_1dreshape(self.x)
x_image = tf.reshape(self.x, [-1, 1, 1024, 15])
# define conv-layer variables
# [5, 5, 1, 32]
print(x_image.shape)
W_conv1 = self.weight_variable([1, 5, 15, 15]) # first conv-layer has 32 kernels, size=5
b_conv1 = self.bias_variable([15])
W_conv2 = self.weight_variable([1, 5, 15, 15])
b_conv2 = self.bias_variable([15])
# x_image = tf.reshape(self.x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = self.max_pool_2x2(h_conv2)
print(h_pool2.shape)
new_features = tf.reshape(h_pool2, [-1, 15, 256])
print(new_features.shape)
num_units = 128
num_layers = 2
# dropout = tf.placeholder(tf.float32)
cells = []
for _ in range(num_layers):
cell = tf.contrib.rnn.GRUCell(num_units) # Or LSTMCell(num_units)
# cell = tf.contrib.rnn.DropoutWrapper(
# cell, output_keep_prob=1.0)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells)
output, state = tf.nn.dynamic_rnn(cell, new_features, dtype=tf.float32)
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
out_size = self.y_.get_shape()[1].value
self.y_conv = tf.contrib.layers.fully_connected(
last, out_size, activation_fn=None)
# self.y_conv = tf.nn.softmax(logit) # probably a mistake here
# ratio = 1.0 / 1000000.0
# ratio = 1.0 / ratio
# class_weight = tf.constant([ratio, 1.0 - ratio])
# weighted_logits = tf.multiply(self.y_conv, class_weight) # shape [batch_size, 2]
# self.loss = tf.nn.softmax_cross_entropy_with_logits(
# logits=weighted_logits, labels=self.y_, name="xent_raw")
# NOTE: Normal gru
# self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
# NOTE Normal gru with summing instead of mean
self.loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
# NOTE: Weighted gru
# self.loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self.y_, logits=self.y_conv, pos_weight=200.0))
# NOTE: Weighted gru with summing instead of mean
# self.loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
# self.loss = tf.losses.sparse_softmax_cross_entropy(self.y_, self.y_conv, weights=self.weights)
self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def shuffle(self):
np.random.shuffle(self.data_keylist)
return
def train(self):
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.eval() # creating evaluation
a = time.time()
for i in range(self.epochs):
# batch = mnist.train.next_batch(50)
x_generator = self.batch(self.x_train, shuffle=True)
if i % 10 == 0 and i != 0:
counter = 0
sum_acc = 0
sum_loss = 0
hits = 0
x_generator_test = self.batch(self.x_test,
usethesekeys=list(self.x_test.keys()), shortset=True)
for j, k, z in x_generator_test:
train_loss, prediction = self.sess.run([self.loss, self.prediction],feed_dict={self.x: j,
self.y_: k})
sum_loss += np.sum(train_loss)
hits += np.sum(prediction)
counter += 1
b = time.time()
print('step {}:\navg loss {}\ntotalhits {}\ntime elapsed: {} s'.format(i, sum_loss / counter, hits, b-a))
x, y, z = next(x_generator)
self.sess.run([self.train_step], feed_dict={
self.x: x,
self.y_: y})
# self.shuffle()
def eval(self):
# self.time_index = np.arange(self.y_conv.get_shape()[0])
self.prediction = tf.argmax(self.y_conv, 1)
truth = tf.argmax(self.y_, 1)
correct_prediction = tf.equal(self.prediction, truth)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# def test_eval(self):
# self.eval()
# x_generator = self.batch(self.x_test, n=100, shuffle=False)
# y_generator = self.batch(self.y_test, n=100, shuffle=False)
# test_acc = []
# counter = 0
# for data in x_generator:
# test_acc += [self.sess.run(self.accuracy, feed_dict={
# self.x: data, self.y_: next(y_generator), self.keep_prob: 1.0})]
# total_test_acc = sum(test_acc) / float(len(test_acc))
# print('test accuracy %g' % total_test_acc)
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# def hack_1dreshape(self, x):
# # expand its dimensionality to fit into conv2d
# tensor_expand = tf.expand_dims(x, 1)
# tensor_expand = tf.expand_dims(tensor_expand, -1)
# return tensor_expand
def hack_1dreshape(self, x):
# expand its dimensionality to fit into conv2d
# tensor_expand = tf.expand_dims(x, 1)
tensor_expand = tf.transpose(x, [0, 2, 1])
return tensor_expand
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def get_label_predictions(self):
x_batcher = self.batch(self.x_test, n=1000, shuffle=False,
usethesekeys=list(self.x_test.keys()))
# y_batcher = self.batch(self.y_test, n=1000, shuffle=False)
predictions = []
correct_predictions = np.zeros((0, 2))
for x, y, z in x_batcher:
temp_predictions = self.sess.run(
self.prediction,
feed_dict={self.x: x})
predictions += temp_predictions.tolist()
correct_predictions = np.vstack((correct_predictions, y))
return predictions, correct_predictions
# def plot_confusion_matrix(cm, classes,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Blues):
# """
# This function prints and plots the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
# """
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
#
# print(cm)
#
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
# plt.yticks(tick_marks, classes)
#
# fmt = '.2f' if normalize else 'd'
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
#
# plt.tight_layout()
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def main():
cnn = cnnMNIST()
a = time.time()
print('Retrieving data')
cnn.get_data()
b = time.time()
print('Built the data in {} s'.format(b-a))
a = time.time()
cnn.train()
b = time.time()
print('Training time: {} s'.format(b-a))
# cnn.test_eval()
predictions, y = cnn.get_label_predictions()
predictions_decode = predictions
labels_decode = cnn.onenothot_labels(y)
#
np.save('grudetweightedsum_predictions.npy', predictions_decode)
np.save('grudetweightedsum_ground_truth.npy', labels_decode)
# Validation time
validation_data = cnn.validation_batcher()
answers = OrderedDict()
for sample in validation_data:
x = np.array(sample)
predictions = cnn.sess.run(
cnn.prediction,
feed_dict = {cnn.x: x})
time_index = np.arange(predictions.shape[0])
mask = predictions >= 0.5
runname = sample.name.split('/')[-1]
if np.sum(mask) != 0:
counts = np.sum(np.squeeze(x[:, -1, :]), axis=-1)
t = time_index[mask]
t = [int(i) for i in t]
index_guess = np.argmax(counts[t])
current_spectra = np.squeeze(x[t[index_guess], -1, :])
current_time = t[index_guess] + 15
answers[runname] = {'time': current_time,
'spectra': current_spectra}
else:
answers[runname] = {'time': 0,
'spectra': 0}
save_obj(answers, 'weightedsumgru_hits')
return
main()
|
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import os
import tempfile
import shutil
import subprocess
import time
import argparse
import logging
import sys
import re
from datetime import datetime
try:
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
from elasticsearch.exceptions import TransportError
except ImportError as e:
print('Can\'t import elasticsearch please install `sudo pip install elasticsearch`')
raise e
'''This file executes a basic upgrade test by running a full cluster restart.
The upgrade test starts 2 or more nodes of an old elasticserach version, indexes
a random number of documents into the running nodes and executes a full cluster restart.
After the nodes are recovered a small set of basic checks are executed to ensure all
documents are still searchable and field data can be loaded etc.
NOTE: This script requires the elasticsearch python client `elasticsearch-py` run the following command to install:
`sudo pip install elasticsearch`
if you are running python3 you need to install the client using pip3. On OSX `pip3` will be included in the Python 3.4
release available on `https://www.python.org/download/`:
`sudo pip3 install elasticsearch`
See `https://github.com/elasticsearch/elasticsearch-py` for details
In order to run this test two different version of elasticsearch are required. Both need to be unpacked into
the same directory:
```
$ cd /path/to/elasticsearch/clone
$ mkdir backwards && cd backwards
$ wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.3.1.tar.gz
$ wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.13.tar.gz
$ tar -zxvf elasticsearch-1.3.1.tar.gz && tar -zxvf elasticsearch-0.90.13.tar.gz
$ cd ..
$ python dev-tools/upgrade-tests.py --version.backwards 0.90.13 --version.current 1.3.1
```
'''
BLACK_LIST = {'1.2.0' : { 'reason': 'Contains a major bug where routing hashes are not consistent with previous version',
'issue': 'https://github.com/elasticsearch/elasticsearch/pull/6393'},
'1.3.0' : { 'reason': 'Lucene Related bug prevents upgrades from 0.90.7 and some earlier versions ',
'issue' : 'https://github.com/elasticsearch/elasticsearch/pull/7055'}}
# sometimes returns True
def rarely():
return random.randint(0, 10) == 0
# usually returns True
def frequently():
return not rarely()
# asserts the correctness of the given hits given they are sorted asc
def assert_sort(hits):
values = [hit['sort'] for hit in hits['hits']['hits']]
assert len(values) > 0, 'expected non emtpy result'
val = min(values)
for x in values:
assert x >= val, '%s >= %s' % (x, val)
val = x
# asserts that the cluster health didn't timeout etc.
def assert_health(cluster_health, num_shards, num_replicas):
assert cluster_health['timed_out'] == False, 'cluster health timed out %s' % cluster_health
# Starts a new elasticsearch node from a released & untared version.
# This node uses unicast discovery with the provided unicast host list and starts
# the nodes with the given data directory. This allows shutting down and starting up
# nodes on the same data dir simulating a full cluster restart.
def start_node(version, data_dir, node_dir, unicast_host_list, tcp_port, http_port):
es_run_path = os.path.join(node_dir, 'elasticsearch-%s' % (version), 'bin/elasticsearch')
if version.startswith('0.90.'):
foreground = '-f' # 0.90.x starts in background automatically
else:
foreground = ''
return subprocess.Popen([es_run_path,
'-Des.path.data=%s' % data_dir, '-Des.cluster.name=upgrade_test',
'-Des.discovery.zen.ping.unicast.hosts=%s' % unicast_host_list,
'-Des.discovery.zen.ping.multicast.enabled=false',
'-Des.script.disable_dynamic=true',
'-Des.transport.tcp.port=%s' % tcp_port,
'-Des.http.port=%s' % http_port,
foreground], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Indexes the given number of document into the given index
# and randomly runs refresh, optimize and flush commands
def index_documents(es, index_name, type, num_docs):
logging.info('Indexing %s docs' % num_docs)
for id in range(0, num_docs):
es.index(index=index_name, doc_type=type, id=id, body={'string': str(random.randint(0, 100)),
'long_sort': random.randint(0, 100),
'double_sort' : float(random.randint(0, 100))})
if rarely():
es.indices.refresh(index=index_name)
if rarely():
es.indices.flush(index=index_name, force=frequently())
if rarely():
es.indices.optimize(index=index_name)
es.indices.refresh(index=index_name)
# Runs a basic number of assertions including:
# - document counts
# - match all search with sort on double / long
# - Realtime GET operations
# TODO(simonw): we should add stuff like:
# - dates including sorting
# - string sorting
# - docvalues if available
# - global ordinal if available
def run_basic_asserts(es, index_name, type, num_docs):
count = es.count(index=index_name)['count']
assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count)
for _ in range(0, num_docs):
random_doc_id = random.randint(0, num_docs-1)
doc = es.get(index=index_name, doc_type=type, id=random_doc_id)
assert doc, 'Expected document for id %s but got %s' % (random_doc_id, doc)
assert_sort(es.search(index=index_name,
body={
'sort': [
{'double_sort': {'order': 'asc'}}
]
}))
assert_sort(es.search(index=index_name,
body={
'sort': [
{'long_sort': {'order': 'asc'}}
]
}))
# picks a random version or and entire random version tuple from the directory
# to run the backwards tests against.
def pick_random_upgrade_version(directory, lower_version=None, upper_version=None):
if lower_version and upper_version:
return lower_version, upper_version
assert os.path.isdir(directory), 'No such directory %s' % directory
versions = []
for version in map(lambda x : x[len('elasticsearch-'):], filter(lambda x : re.match(r'^elasticsearch-\d+[.]\d+[.]\d+$', x), os.listdir(directory))):
if not version in BLACK_LIST:
versions.append(build_tuple(version))
versions.sort()
if lower_version: # lower version is set - picking a higher one
versions = filter(lambda x : x > build_tuple(lower_version), versions)
assert len(versions) >= 1, 'Expected at least 1 higher version than %s version in %s ' % (lower_version, directory)
random.shuffle(versions)
return lower_version, build_version(versions[0])
if upper_version:
versions = filter(lambda x : x < build_tuple(upper_version), versions)
assert len(versions) >= 1, 'Expected at least 1 lower version than %s version in %s ' % (upper_version, directory)
random.shuffle(versions)
return build_version(versions[0]), upper_version
assert len(versions) >= 2, 'Expected at least 2 different version in %s but found %s' % (directory, len(versions))
random.shuffle(versions)
versions = versions[0:2]
versions.sort()
return build_version(versions[0]), build_version(versions[1])
def build_version(version_tuple):
return '.'.join([str(x) for x in version_tuple])
def build_tuple(version_string):
return [int(x) for x in version_string.split('.')]
# returns a new elasticsearch client and ensures the all nodes have joined the cluster
# this method waits at most 30 seconds for all nodes to join
def new_es_instance(num_nodes, http_port, timeout = 30):
logging.info('Waiting for %s nodes to join the cluster' % num_nodes)
for _ in range(0, timeout):
# TODO(simonw): ask Honza if there is a better way to do this?
try:
es = Elasticsearch([
{'host': '127.0.0.1', 'port': http_port + x}
for x in range(0, num_nodes)])
es.cluster.health(wait_for_nodes=num_nodes)
es.count() # can we actually search or do we get a 503? -- anyway retry
return es
except (ConnectionError, TransportError):
pass
time.sleep(1)
assert False, 'Timed out waiting for %s nodes for %s seconds' % (num_nodes, timeout)
def assert_versions(bwc_version, current_version, node_dir):
assert [int(x) for x in bwc_version.split('.')] < [int(x) for x in current_version.split('.')],\
'[%s] must be < than [%s]' % (bwc_version, current_version)
for version in [bwc_version, current_version]:
assert not version in BLACK_LIST, 'Version %s is blacklisted - %s, see %s' \
% (version, BLACK_LIST[version]['reason'],
BLACK_LIST[version]['issue'])
dir = os.path.join(node_dir, 'elasticsearch-%s' % current_version)
assert os.path.isdir(dir), 'Expected elasticsearch-%s install directory does not exists: %s' % (version, dir)
def full_cluster_restart(node_dir, current_version, bwc_version, tcp_port, http_port):
assert_versions(bwc_version, current_version, node_dir)
num_nodes = random.randint(2, 3)
nodes = []
data_dir = tempfile.mkdtemp()
logging.info('Running upgrade test from [%s] to [%s] seed: [%s] es.path.data: [%s] es.http.port [%s] es.tcp.port [%s]'
% (bwc_version, current_version, seed, data_dir, http_port, tcp_port))
try:
logging.info('Starting %s BWC nodes of version %s' % (num_nodes, bwc_version))
unicast_addresses = ','.join(['127.0.0.1:%s' % (tcp_port+x) for x in range(0, num_nodes)])
for id in range(0, num_nodes):
nodes.append(start_node(bwc_version, data_dir, node_dir, unicast_addresses, tcp_port+id, http_port+id))
es = new_es_instance(num_nodes, http_port)
es.indices.delete(index='test_index', ignore=404)
num_shards = random.randint(1, 10)
num_replicas = random.randint(0, 1)
logging.info('Create index with [%s] shards and [%s] replicas' % (num_shards, num_replicas))
es.indices.create(index='test_index', body={
# TODO(simonw): can we do more here in terms of randomization - seems hard due to all the different version
'settings': {
'number_of_shards': num_shards,
'number_of_replicas': num_replicas
}
})
logging.info('Nodes joined, waiting for green status')
health = es.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
assert_health(health, num_shards, num_replicas)
num_docs = random.randint(10, 100)
index_documents(es, 'test_index', 'test_type', num_docs)
logging.info('Run basic asserts before full cluster restart')
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
logging.info('kill bwc nodes -- prepare upgrade')
for node in nodes:
node.terminate()
# now upgrade the nodes and rerun the checks
tcp_port = tcp_port + len(nodes) # bump up port to make sure we can claim them
http_port = http_port + len(nodes)
logging.info('Full Cluster restart starts upgrading to version [elasticsearch-%s] es.http.port [%s] es.tcp.port [%s]'
% (current_version, http_port, tcp_port))
nodes = []
unicast_addresses = ','.join(['127.0.0.1:%s' % (tcp_port+x) for x in range(0, num_nodes)])
for id in range(0, num_nodes+1): # one more to trigger relocation
nodes.append(start_node(current_version, data_dir, node_dir, unicast_addresses, tcp_port+id, http_port+id))
es = new_es_instance(num_nodes+1, http_port)
logging.info('Nodes joined, waiting for green status')
health = es.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
assert_health(health, num_shards, num_replicas)
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
# by running the indexing again we try to catch possible mapping problems after the upgrade
index_documents(es, 'test_index', 'test_type', num_docs)
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
logging.info("[SUCCESS] - all test passed upgrading from version [%s] to version [%s]" % (bwc_version, current_version))
finally:
for node in nodes:
node.terminate()
time.sleep(1) # wait a second until removing the data dirs to give the nodes a chance to shutdown
shutil.rmtree(data_dir) # remove the temp data dir
if __name__ == '__main__':
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
datefmt='%Y-%m-%d %I:%M:%S %p')
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.WARN)
parser = argparse.ArgumentParser(description='Tests Full Cluster Restarts across major version')
parser.add_argument('--version.backwards', '-b', dest='backwards_version', metavar='V',
help='The elasticsearch version to upgrade from')
parser.add_argument('--version.current', '-c', dest='current_version', metavar='V',
help='The elasticsearch version to upgrade to')
parser.add_argument('--seed', '-s', dest='seed', metavar='N', type=int,
help='The random seed to use')
parser.add_argument('--backwards.dir', '-d', dest='bwc_directory', default='backwards', metavar='dir',
help='The directory to the backwards compatibility sources')
parser.add_argument('--tcp.port', '-p', dest='tcp_port', default=9300, metavar='port', type=int,
help='The port to use as the minimum port for TCP communication')
parser.add_argument('--http.port', '-t', dest='http_port', default=9200, metavar='port', type=int,
help='The port to use as the minimum port for HTTP communication')
parser.set_defaults(bwc_directory='backwards')
parser.set_defaults(seed=int(time.time()))
args = parser.parse_args()
node_dir = args.bwc_directory
current_version = args.current_version
bwc_version = args.backwards_version
seed = args.seed
random.seed(seed)
bwc_version, current_version = pick_random_upgrade_version(node_dir, bwc_version, current_version)
tcp_port = args.tcp_port
http_port = args.http_port
try:
full_cluster_restart(node_dir, current_version, bwc_version, tcp_port, http_port)
except:
logging.warn('REPRODUCE WITH: \n\t`python %s --version.backwards %s --version.current %s --seed %s --tcp.port %s --http.port %s`'
% (sys.argv[0], bwc_version, current_version, seed, tcp_port, http_port))
raise
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "cameo/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
"""Numpy pickle compatibility functions."""
import pickle
import os
import zlib
import inspect
from io import BytesIO
from .numpy_pickle_utils import _ZFILE_PREFIX
from .numpy_pickle_utils import Unpickler
from .numpy_pickle_utils import _ensure_native_byte_order
def hex_str(an_int):
"""Convert an int to an hexadecimal string."""
return '{:#x}'.format(an_int)
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
_MAX_LEN = len(hex_str(2 ** 64))
_CHUNK_SIZE = 64 * 1024
def read_zfile(file_handle):
"""Read the z-file and return the content as a string.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
header_length = len(_ZFILE_PREFIX) + _MAX_LEN
length = file_handle.read(header_length)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# With python2 and joblib version <= 0.8.4 compressed pickle header is one
# character wider so we need to ignore an additional space if present.
# Note: the first byte of the zlib data is guaranteed not to be a
# space according to
# https://tools.ietf.org/html/rfc6713#section-2.1
next_byte = file_handle.read(1)
if next_byte != b' ':
# The zlib compressed data has started and we need to go back
# one byte
file_handle.seek(header_length)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex_str(len(data))
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress))
###############################################################################
# Utility objects for persistence.
class NDArrayWrapper(object):
"""An object to be persisted instead of numpy arrays.
The only thing this object does, is to carry the filename in which
the array has been persisted, and the array subclass.
"""
def __init__(self, filename, subclass, allow_mmap=True):
"""Constructor. Store the useful information for later."""
self.filename = filename
self.subclass = subclass
self.allow_mmap = allow_mmap
def read(self, unpickler):
"""Reconstruct the array."""
filename = os.path.join(unpickler._dirname, self.filename)
# Load the array from the disk
# use getattr instead of self.allow_mmap to ensure backward compat
# with NDArrayWrapper instances pickled with joblib < 0.9.0
allow_mmap = getattr(self, 'allow_mmap', True)
kwargs = {}
if allow_mmap:
kwargs['mmap_mode'] = unpickler.mmap_mode
if "allow_pickle" in inspect.signature(unpickler.np.load).parameters:
# Required in numpy 1.16.3 and later to aknowledge the security
# risk.
kwargs["allow_pickle"] = True
array = unpickler.np.load(filename, **kwargs)
# Detect byte order mismatch and swap as needed.
array = _ensure_native_byte_order(array)
# Reconstruct subclasses. This does not work with old
# versions of numpy
if (hasattr(array, '__array_prepare__') and
self.subclass not in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
return new_array.__array_prepare__(array)
else:
return array
class ZNDArrayWrapper(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tobytes) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"""Constructor. Store the useful information for later."""
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"""Reconstruct the array from the meta-information and the z-file."""
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
with open(filename, 'rb') as f:
data = read_zfile(f)
state = self.state + (data,)
array.__setstate__(state)
return array
class ZipNumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles."""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
"""Constructor."""
self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _open_pickle(self, file_handle):
return BytesIO(read_zfile(file_handle))
def load_build(self):
"""Set the state of a newly created object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
"""
Unpickler.load_build(self)
if isinstance(self.stack[-1], NDArrayWrapper):
if self.np is None:
raise ImportError("Trying to unpickle an ndarray, "
"but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read(self)
self.stack.append(array)
dispatch[pickle.BUILD[0]] = load_build
def load_compatibility(filename):
"""Reconstruct a Python object from a file persisted with joblib.dump.
This function ensures the compatibility with joblib old persistence format
(<= 0.9.3).
Parameters
-----------
filename: string
The name of the file from which to load the object
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump.
"""
with open(filename, 'rb') as file_handle:
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames. That said, if data is stored in
# companion files, moving the directory will create a race when
# joblib tries to access the companion files.
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
try:
obj = unpickler.load()
except UnicodeDecodeError as exc:
# More user-friendly error message
new_exc = ValueError(
'You may be trying to read with '
'python 3 a joblib pickle generated with python 2. '
'This feature is not supported by joblib.')
new_exc.__cause__ = exc
raise new_exc
finally:
if hasattr(unpickler, 'file_handle'):
unpickler.file_handle.close()
return obj
|
|
from sqlalchemy.testing import assert_raises_message, eq_, \
AssertsCompiledSQL, is_
from sqlalchemy.testing import fixtures
from sqlalchemy.orm import relationships, foreign, remote
from sqlalchemy import MetaData, Table, Column, ForeignKey, Integer, \
select, ForeignKeyConstraint, exc, func, and_, String
from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
class _JoinFixtures(object):
@classmethod
def setup_class(cls):
m = MetaData()
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
cls.right_multi_fk = Table('rgt_multi_fk', m,
Column('id', Integer, primary_key=True),
Column('lid1', Integer, ForeignKey('lft.id')),
Column('lid2', Integer, ForeignKey('lft.id')),
)
cls.selfref = Table('selfref', m,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('selfref.id'))
)
cls.composite_selfref = Table('composite_selfref', m,
Column('id', Integer, primary_key=True),
Column('group_id', Integer, primary_key=True),
Column('parent_id', Integer),
ForeignKeyConstraint(
['parent_id', 'group_id'],
['composite_selfref.id', 'composite_selfref.group_id']
)
)
cls.m2mleft = Table('m2mlft', m,
Column('id', Integer, primary_key=True),
)
cls.m2mright = Table('m2mrgt', m,
Column('id', Integer, primary_key=True),
)
cls.m2msecondary = Table('m2msecondary', m,
Column('lid', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.m2msecondary_no_fks = Table('m2msecondary_no_fks', m,
Column('lid', Integer, primary_key=True),
Column('rid', Integer, primary_key=True),
)
cls.m2msecondary_ambig_fks = Table('m2msecondary_ambig_fks', m,
Column('lid1', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid1', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
Column('lid2', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid2', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.base_w_sub_rel = Table('base_w_sub_rel', m,
Column('id', Integer, primary_key=True),
Column('sub_id', Integer, ForeignKey('rel_sub.id'))
)
cls.rel_sub = Table('rel_sub', m,
Column('id', Integer, ForeignKey('base_w_sub_rel.id'),
primary_key=True)
)
cls.base = Table('base', m,
Column('id', Integer, primary_key=True),
)
cls.sub = Table('sub', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
)
cls.sub_w_base_rel = Table('sub_w_base_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.sub_w_sub_rel = Table('sub_w_sub_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('sub_id', Integer, ForeignKey('sub.id'))
)
cls.right_w_base_rel = Table('right_w_base_rel', m,
Column('id', Integer, primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.three_tab_a = Table('three_tab_a', m,
Column('id', Integer, primary_key=True),
)
cls.three_tab_b = Table('three_tab_b', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id'))
)
cls.three_tab_c = Table('three_tab_c', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id')),
Column('bid', Integer, ForeignKey('three_tab_b.id'))
)
cls.composite_target = Table('composite_target', m,
Column('uid', Integer, primary_key=True),
Column('oid', Integer, primary_key=True),
)
cls.composite_multi_ref = Table('composite_multi_ref', m,
Column('uid1', Integer),
Column('uid2', Integer),
Column('oid', Integer),
ForeignKeyConstraint(("uid1", "oid"),
("composite_target.uid", "composite_target.oid")),
ForeignKeyConstraint(("uid2", "oid"),
("composite_target.uid", "composite_target.oid")),
)
cls.purely_single_col = Table('purely_single_col', m,
Column('path', String)
)
def _join_fixture_overlapping_three_tables(self, **kw):
def _can_sync(*cols):
for c in cols:
if self.three_tab_c.c.contains_column(c):
return False
else:
return True
return relationships.JoinCondition(
self.three_tab_a,
self.three_tab_b,
self.three_tab_a,
self.three_tab_b,
support_sync=False,
can_be_synced_fn=_can_sync,
primaryjoin=and_(
self.three_tab_a.c.id == self.three_tab_b.c.aid,
self.three_tab_c.c.bid == self.three_tab_b.c.id,
self.three_tab_c.c.aid == self.three_tab_a.c.id
)
)
def _join_fixture_m2m(self, **kw):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary,
**kw
)
def _join_fixture_m2m_backref(self, **kw):
"""return JoinCondition in the same way RelationshipProperty
calls it for a backref on an m2m.
"""
j1 = self._join_fixture_m2m()
return j1, relationships.JoinCondition(
self.m2mright,
self.m2mleft,
self.m2mright,
self.m2mleft,
secondary=self.m2msecondary,
primaryjoin=j1.secondaryjoin_minus_local,
secondaryjoin=j1.primaryjoin_minus_local
)
def _join_fixture_o2m(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
**kw
)
def _join_fixture_m2o(self, **kw):
return relationships.JoinCondition(
self.right,
self.left,
self.right,
self.left,
**kw
)
def _join_fixture_o2m_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
**kw
)
def _join_fixture_m2o_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
remote_side=set([self.selfref.c.id]),
**kw
)
def _join_fixture_o2m_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
**kw
)
def _join_fixture_m2o_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
remote_side=set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_o2m_composite_selfref_func_remote_side(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
remote_side=set([self.composite_selfref.c.parent_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func_annotated(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
remote(self.composite_selfref.c.group_id) ==
func.foo(self.composite_selfref.c.group_id),
remote(self.composite_selfref.c.parent_id) ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_compound_expression_1(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.remote(relationships.foreign(
self.right.c.x * self.right.c.y
)),
**kw
)
def _join_fixture_compound_expression_2(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.foreign(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_compound_expression_1_non_annotated(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_base_to_joined_sub(self, **kw):
# see test/orm/inheritance/test_abc_inheritance:TestaTobM2O
# and others there
right = self.base_w_sub_rel.join(self.rel_sub,
self.base_w_sub_rel.c.id == self.rel_sub.c.id
)
return relationships.JoinCondition(
self.base_w_sub_rel,
right,
self.base_w_sub_rel,
self.rel_sub,
primaryjoin=self.base_w_sub_rel.c.sub_id == \
self.rel_sub.c.id,
**kw
)
def _join_fixture_o2m_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
self.base,
self.sub_w_base_rel,
self.base,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id
)
def _join_fixture_m2o_joined_sub_to_sub_on_base(self, **kw):
# this is a late add - a variant of the test case
# in #2491 where we join on the base cols instead. only
# m2o has a problem at the time of this test.
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_base_rel,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id,
)
def _join_fixture_o2m_joined_sub_to_sub(self, **kw):
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_sub_rel,
self.base.c.id == self.sub_w_sub_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_sub_rel,
primaryjoin=self.sub.c.id == self.sub_w_sub_rel.c.sub_id
)
def _join_fixture_m2o_sub_to_joined_sub(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
)
def _join_fixture_m2o_sub_to_joined_sub_func(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
primaryjoin=self.right_w_base_rel.c.base_id == \
func.foo(self.base.c.id)
)
def _join_fixture_o2o_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub,
self.base.c.id == self.sub.c.id)
# see test_relationships->AmbiguousJoinInterpretedAsSelfRef
return relationships.JoinCondition(
left,
self.sub,
left,
self.sub,
)
def _join_fixture_o2m_to_annotated_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
foreign(func.foo(self.right.c.lid)),
**kw
)
def _join_fixture_o2m_to_oldstyle_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
func.foo(self.right.c.lid),
consider_as_foreign_keys=[self.right.c.lid],
**kw
)
def _join_fixture_overlapping_composite_fks(self, **kw):
return relationships.JoinCondition(
self.composite_target,
self.composite_multi_ref,
self.composite_target,
self.composite_multi_ref,
consider_as_foreign_keys=[self.composite_multi_ref.c.uid2,
self.composite_multi_ref.c.oid],
**kw
)
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
def _join_fixture_o2m_o_side_none(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=and_(self.left.c.id == self.right.c.lid,
self.left.c.x == 5),
**kw
)
def _join_fixture_purely_single_o2m(self, **kw):
return relationships.JoinCondition(
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
support_sync=False,
primaryjoin=
self.purely_single_col.c.path.like(
remote(
foreign(
self.purely_single_col.c.path.concat('%')
)
)
)
)
def _join_fixture_purely_single_m2o(self, **kw):
return relationships.JoinCondition(
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
support_sync=False,
primaryjoin=
remote(self.purely_single_col.c.path).like(
foreign(self.purely_single_col.c.path.concat('%'))
)
)
def _join_fixture_remote_local_multiple_ref(self, **kw):
fn = lambda a, b: ((a == b) | (b == a))
return relationships.JoinCondition(
self.selfref, self.selfref,
self.selfref, self.selfref,
support_sync=False,
primaryjoin=fn(
# we're putting a do-nothing annotation on
# "a" so that the left/right is preserved;
# annotation vs. non seems to affect __eq__ behavior
self.selfref.c.sid._annotate({"foo": "bar"}),
foreign(remote(self.selfref.c.sid)))
)
def _assert_non_simple_warning(self, fn):
assert_raises_message(
exc.SAWarning,
"Non-simple column elements in "
"primary join condition for property "
r"None - consider using remote\(\) "
"annotations to mark the remote side.",
fn
)
def _assert_raises_no_relevant_fks(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
r"Could not locate any relevant foreign key columns "
r"for %s join condition '%s' on relationship %s. "
r"Ensure that referencing columns are associated with "
r"a ForeignKey or ForeignKeyConstraint, or are annotated "
r"in the join condition with the foreign\(\) annotation."
% (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_no_equality(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for %s join "
"condition '%s' on relationship %s. "
"Ensure that referencing columns are associated with a "
"ForeignKey or ForeignKeyConstraint, or are annotated in "
r"the join condition with the foreign\(\) annotation. "
"To allow comparison operators other than '==', "
"the relationship can be marked as viewonly=True." % (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_ambig_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are multiple foreign key paths linking the "
"tables via secondary table '%s'. "
"Specify the 'foreign_keys' argument, providing a list "
"of those columns which should be counted as "
"containing a foreign key reference from the "
"secondary table to each of the parent and child tables."
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
% (relname,),
fn, *arg, **kw)
def _assert_raises_no_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables "
"via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify 'primaryjoin' and "
"'secondaryjoin' expressions"
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify a 'primaryjoin' "
"expression."
% (relname,),
fn, *arg, **kw)
class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase,
AssertsCompiledSQL):
def test_determine_local_remote_pairs_o2o_joined_sub_to_base(self):
joincond = self._join_fixture_o2o_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub.c.id)]
)
def test_determine_synchronize_pairs_o2m_to_annotated_func(self):
joincond = self._join_fixture_o2m_to_annotated_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_o2m_to_oldstyle_func(self):
joincond = self._join_fixture_o2m_to_oldstyle_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determinelocal_remote_m2o_joined_sub_to_sub_on_base(self):
joincond = self._join_fixture_m2o_joined_sub_to_sub_on_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub_w_base_rel.c.base_id)]
)
def test_determine_local_remote_base_to_joined_sub(self):
joincond = self._join_fixture_base_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.base_w_sub_rel.c.sub_id, self.rel_sub.c.id)
]
)
def test_determine_local_remote_o2m_joined_sub_to_base(self):
joincond = self._join_fixture_o2m_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[
(self.sub_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_local_remote_m2o_sub_to_joined_sub(self):
joincond = self._join_fixture_m2o_sub_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.right_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_remote_columns_o2m_joined_sub_to_sub(self):
joincond = self._join_fixture_o2m_joined_sub_to_sub()
eq_(
joincond.local_remote_pairs,
[
(self.sub.c.id, self.sub_w_sub_rel.c.sub_id)
]
)
def test_determine_remote_columns_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_local_remote_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_3(self):
joincond = self._join_fixture_compound_expression_1()
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y),
]
)
def test_err_local_remote_compound_1(self):
self._assert_raises_no_relevant_fks(
self._join_fixture_compound_expression_1_non_annotated,
r'lft.x \+ lft.y = rgt.x \* rgt.y',
"None", "primary"
)
def test_determine_remote_columns_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_remote_columns_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.remote_columns,
set([self.right.c.lid])
)
def test_determine_remote_columns_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.sid])
)
def test_determine_local_remote_pairs_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_o2m_composite_selfref_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_rs(self):
# no warning
self._join_fixture_o2m_composite_selfref_func_remote_side()
def test_determine_local_remote_pairs_o2m_overlap_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_m2o_sub_to_joined_sub_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated(self):
joincond = self._join_fixture_o2m_composite_selfref_func_annotated()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_remote_columns_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
eq_(
joincond.remote_columns,
set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id])
)
def test_determine_remote_columns_m2o(self):
joincond = self._join_fixture_m2o()
eq_(
joincond.remote_columns,
set([self.left.c.id])
)
def test_determine_local_remote_pairs_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.local_remote_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.synchronize_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid)]
)
eq_(
joincond.secondary_synchronize_pairs,
[(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_o2m_backref(self):
joincond = self._join_fixture_o2m()
joincond2 = self._join_fixture_m2o(
primaryjoin=joincond.primaryjoin_reverse_remote,
)
eq_(
joincond2.local_remote_pairs,
[(self.right.c.lid, self.left.c.id)]
)
def test_determine_local_remote_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
eq_(
j2.local_remote_pairs,
[
(self.m2mright.c.id, self.m2msecondary.c.rid),
(self.m2mleft.c.id, self.m2msecondary.c.lid),
]
)
def test_determine_local_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_columns,
set([self.m2mleft.c.id])
)
eq_(
j2.local_columns,
set([self.m2mright.c.id])
)
def test_determine_remote_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
eq_(
j2.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
def test_determine_remote_columns_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.id])
)
def test_determine_local_remote_cols_three_tab_viewonly(self):
joincond = self._join_fixture_overlapping_three_tables()
eq_(
joincond.local_remote_pairs,
[(self.three_tab_a.c.id, self.three_tab_b.c.aid)]
)
eq_(
joincond.remote_columns,
set([self.three_tab_b.c.id, self.three_tab_b.c.aid])
)
def test_determine_local_remote_overlapping_composite_fks(self):
joincond = self._join_fixture_overlapping_composite_fks()
eq_(
joincond.local_remote_pairs,
[
(self.composite_target.c.uid, self.composite_multi_ref.c.uid2,),
(self.composite_target.c.oid, self.composite_multi_ref.c.oid,)
]
)
def test_determine_local_remote_pairs_purely_single_col_o2m(self):
joincond = self._join_fixture_purely_single_o2m()
eq_(
joincond.local_remote_pairs,
[(self.purely_single_col.c.path, self.purely_single_col.c.path)]
)
class DirectionTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
def test_determine_direction_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
is_(
joincond.direction,
ONETOMANY
)
def test_determine_direction_o2m(self):
joincond = self._join_fixture_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_m2o(self):
joincond = self._join_fixture_m2o()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_purely_single_o2m(self):
joincond = self._join_fixture_purely_single_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_purely_single_m2o(self):
joincond = self._join_fixture_purely_single_m2o()
is_(joincond.direction, MANYTOONE)
class DetermineJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_determine_join_o2m(self):
joincond = self._join_fixture_o2m()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o(self):
joincond = self._join_fixture_m2o()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_ambiguous_fks_o2m(self):
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship None - "
"there are multiple foreign key paths linking "
"the tables. Specify the 'foreign_keys' argument, "
"providing a list of those columns which "
"should be counted as containing a foreign "
"key reference to the parent table.",
relationships.JoinCondition,
self.left,
self.right_multi_fk,
self.left,
self.right_multi_fk,
)
def test_determine_join_no_fks_o2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", None,
self.left,
self.selfref,
self.left,
self.selfref,
)
def test_determine_join_ambiguous_fks_m2m(self):
self._assert_raises_ambig_join(
relationships.JoinCondition,
"None", self.m2msecondary_ambig_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks
)
def test_determine_join_no_fks_m2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", self.m2msecondary_no_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_no_fks
)
def _join_fixture_fks_ambig_m2m(self):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks,
consider_as_foreign_keys=[
self.m2msecondary_ambig_fks.c.lid1,
self.m2msecondary_ambig_fks.c.rid1]
)
def test_determine_join_w_fks_ambig_m2m(self):
joincond = self._join_fixture_fks_ambig_m2m()
self.assert_compile(
joincond.primaryjoin,
"m2mlft.id = m2msecondary_ambig_fks.lid1"
)
self.assert_compile(
joincond.secondaryjoin,
"m2mrgt.id = m2msecondary_ambig_fks.rid1"
)
class AdaptedJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_join_targets_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = selfref.sid"
)
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "selfref.id = pj.sid"
)
def test_join_targets_o2m_plain(self):
joincond = self._join_fixture_o2m()
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
joincond.child_selectable,
False)
self.assert_compile(
pj, "lft.id = rgt.lid"
)
def test_join_targets_o2m_left_aliased(self):
joincond = self._join_fixture_o2m()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = rgt.lid"
)
def test_join_targets_o2m_right_aliased(self):
joincond = self._join_fixture_o2m()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "lft.id = pj.lid"
)
def test_join_targets_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND composite_selfref.id = pj.parent_id"
)
def test_join_targets_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND pj.id = composite_selfref.parent_id"
)
class LazyClauseTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_lazy_clause_o2m(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid"
)
def test_lazy_clause_o2m_reverse(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns =\
joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1"
)
def test_lazy_clause_o2m_o_side_none(self):
# test for #2948. When the join is "o.id == m.oid AND o.something == something",
# we don't want 'o' brought into the lazy load for 'm'
joincond = self._join_fixture_o2m_o_side_none()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid AND :param_2 = :x_1",
checkparams={'param_1': None, 'param_2': None, 'x_1': 5}
)
def test_lazy_clause_o2m_o_side_none_reverse(self):
# continued test for #2948.
joincond = self._join_fixture_o2m_o_side_none()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1 AND lft.x = :x_1",
checkparams= {'param_1': None, 'x_1': 5}
)
def test_lazy_clause_remote_local_multiple_ref(self):
joincond = self._join_fixture_remote_local_multiple_ref()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = selfref.sid OR selfref.sid = :param_1",
checkparams={'param_1': None}
)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.solve."""
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_block_diag
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_composition
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_inversion
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
from tensorflow.python.ops.linalg import registrations_util
# By default, use a LinearOperatorComposition to delay the computation.
@linear_operator_algebra.RegisterSolve(
linear_operator.LinearOperator, linear_operator.LinearOperator)
def _solve_linear_operator(linop_a, linop_b):
"""Generic solve of two `LinearOperator`s."""
is_square = registrations_util.is_square(linop_a, linop_b)
is_non_singular = None
is_self_adjoint = None
is_positive_definite = None
if is_square:
is_non_singular = registrations_util.combined_non_singular_hint(
linop_a, linop_b)
elif is_square is False: # pylint:disable=g-bool-id-comparison
is_non_singular = False
is_self_adjoint = False
is_positive_definite = False
return linear_operator_composition.LinearOperatorComposition(
operators=[
linear_operator_inversion.LinearOperatorInversion(linop_a),
linop_b
],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
)
@linear_operator_algebra.RegisterSolve(
linear_operator_inversion.LinearOperatorInversion,
linear_operator.LinearOperator)
def _solve_inverse_linear_operator(linop_a, linop_b):
"""Solve inverse of generic `LinearOperator`s."""
return linop_a.operator.matmul(linop_b)
# Identity
@linear_operator_algebra.RegisterSolve(
linear_operator_identity.LinearOperatorIdentity,
linear_operator.LinearOperator)
def _solve_linear_operator_identity_left(identity, linop):
del identity
return linop
@linear_operator_algebra.RegisterSolve(
linear_operator.LinearOperator,
linear_operator_identity.LinearOperatorIdentity)
def _solve_linear_operator_identity_right(linop, identity):
del identity
return linop.inverse()
@linear_operator_algebra.RegisterSolve(
linear_operator_identity.LinearOperatorScaledIdentity,
linear_operator_identity.LinearOperatorScaledIdentity)
def _solve_linear_operator_scaled_identity(linop_a, linop_b):
"""Solve of two ScaledIdentity `LinearOperators`."""
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=linop_a.domain_dimension_tensor(),
multiplier=linop_b.multiplier / linop_a.multiplier,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
# Diag.
@linear_operator_algebra.RegisterSolve(
linear_operator_diag.LinearOperatorDiag,
linear_operator_diag.LinearOperatorDiag)
def _solve_linear_operator_diag(linop_a, linop_b):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_b.diag / linop_a.diag,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
@linear_operator_algebra.RegisterSolve(
linear_operator_diag.LinearOperatorDiag,
linear_operator_identity.LinearOperatorScaledIdentity)
def _solve_linear_operator_diag_scaled_identity_right(
linop_diag, linop_scaled_identity):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_scaled_identity.multiplier / linop_diag.diag,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_scaled_identity),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_scaled_identity),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_diag, linop_scaled_identity)),
is_square=True)
@linear_operator_algebra.RegisterSolve(
linear_operator_identity.LinearOperatorScaledIdentity,
linear_operator_diag.LinearOperatorDiag)
def _solve_linear_operator_diag_scaled_identity_left(
linop_scaled_identity, linop_diag):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_diag.diag / linop_scaled_identity.multiplier,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_scaled_identity),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_scaled_identity),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_diag, linop_scaled_identity)),
is_square=True)
@linear_operator_algebra.RegisterSolve(
linear_operator_diag.LinearOperatorDiag,
linear_operator_lower_triangular.LinearOperatorLowerTriangular)
def _solve_linear_operator_diag_tril(linop_diag, linop_triangular):
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=linop_triangular.to_dense() / linop_diag.diag[..., None],
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_triangular),
# This is safe to do since the Triangular matrix is only self-adjoint
# when it is a diagonal matrix, and hence commutes.
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_triangular),
is_positive_definite=None,
is_square=True)
# Circulant.
@linear_operator_algebra.RegisterSolve(
linear_operator_circulant.LinearOperatorCirculant,
linear_operator_circulant.LinearOperatorCirculant)
def _solve_linear_operator_circulant_circulant(linop_a, linop_b):
return linear_operator_circulant.LinearOperatorCirculant(
spectrum=linop_b.spectrum / linop_a.spectrum,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
# Block Diag
@linear_operator_algebra.RegisterSolve(
linear_operator_block_diag.LinearOperatorBlockDiag,
linear_operator_block_diag.LinearOperatorBlockDiag)
def _solve_linear_operator_block_diag_block_diag(linop_a, linop_b):
return linear_operator_block_diag.LinearOperatorBlockDiag(
operators=[
o1.solve(o2) for o1, o2 in zip(
linop_a.operators, linop_b.operators)],
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
# In general, a solve of self-adjoint positive-definite block diagonal
# matrices is not self-=adjoint.
is_self_adjoint=None,
# In general, a solve of positive-definite block diagonal matrices is
# not positive-definite.
is_positive_definite=None,
is_square=True)
|
|
import copy
from django import forms
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.utils.translation import gettext_lazy as _
from django.contrib.gis.db.models.fields import GeometryField
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Button, HTML, Submit
from crispy_forms.bootstrap import FormActions
from tinymce.widgets import TinyMCE
from paperclip.forms import AttachmentForm as BaseAttachmentForm
from .settings import app_settings
from .widgets import MapWidget
from .models import ENTITY_PERMISSION_UPDATE_GEOM
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.translator import translator, NotRegistered
class TranslatedModelForm(forms.ModelForm):
"""
Auto-expand translatable fields.
Expand means replace native (e.g. `name`) by translated (e.g. `name_fr`, `name_en`)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track translated fields
self.orig_fields = list(self.fields.keys())
self._translated = {}
if 'modeltranslation' in settings.INSTALLED_APPS:
self.replace_orig_fields()
self.populate_fields()
def replace_orig_fields(self):
# Expand i18n fields
try:
# Obtain model translation options
mto = translator.get_options_for_model(self._meta.model)
except NotRegistered:
# No translation field on this model, nothing to do
return
# For each translated model field
for modelfield in mto.fields:
if modelfield not in self.fields:
continue
# Remove form native field (e.g. `name`)
native = self.fields.pop(modelfield)
# Add translated fields (e.g. `name_fr`, `name_en`...)
for translated_language in app_settings['TRANSLATED_LANGUAGES']:
lang = translated_language[0]
name = '{0}_{1}'.format(modelfield, lang)
# Add to form.fields{}
translated = copy.deepcopy(native)
translated.required = native.required and (lang == settings.MODELTRANSLATION_DEFAULT_LANGUAGE)
translated.label = u"{0} [{1}]".format(translated.label, lang)
self.fields[name] = translated
# Keep track of replacements
self._translated.setdefault(modelfield, []).append(name)
def save(self, *args, **kwargs):
""" Manually saves translated fields on instance.
"""
# Save translated fields
for fields in self._translated.values():
for field in fields:
value = self.cleaned_data.get(field)
setattr(self.instance, field, value)
return super().save(*args, **kwargs)
def populate_fields(self):
""" Manually loads translated fields from instance.
"""
if self.instance:
for fields in self._translated.values():
for field in fields:
self.fields[field].initial = getattr(self.instance, field)
class SubmitButton(HTML):
def __init__(self, div_id, label):
content = ("""
<a id="{0}" class="btn btn-success"
onclick="javascript:$(this).parents('form').submit();">
<i class="bi bi-check-circle-fill"></i> {1}
</a>""".format(div_id, label))
super().__init__(content)
class MapEntityForm(TranslatedModelForm):
fieldslayout = None
geomfields = []
leftpanel_scrollable = True
hidden_fields = []
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.can_delete = kwargs.pop('can_delete', True)
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
# Default widgets
for fieldname, formfield in self.fields.items():
# Custom code because formfield_callback does not work with inherited forms
if formfield:
# Assign map widget to all geometry fields
try:
formmodel = self._meta.model
modelfield = formmodel._meta.get_field(fieldname)
needs_replace_widget = (isinstance(modelfield, GeometryField)
and not isinstance(formfield.widget, MapWidget))
if needs_replace_widget:
formfield.widget = MapWidget()
if self.instance.pk and self.user:
if not self.user.has_perm(self.instance.get_permission_codename(
ENTITY_PERMISSION_UPDATE_GEOM)):
formfield.widget.modifiable = False
formfield.widget.attrs['geom_type'] = formfield.geom_type
except FieldDoesNotExist:
pass
# Bypass widgets that inherit textareas, such as geometry fields
if formfield.widget.__class__ == forms.widgets.Textarea:
formfield.widget = TinyMCE()
if self.instance.pk and self.user:
if not self.user.has_perm(self.instance.get_permission_codename(
ENTITY_PERMISSION_UPDATE_GEOM)):
for field in self.geomfields:
self.fields.get(field).widget.modifiable = False
self._init_layout()
def _init_layout(self):
""" Setup form buttons, submit URL, layout
"""
is_creation = self.instance.pk is None
actions = [
Button('cancel', _('Cancel'), css_class="btn btn-light ml-auto mr-2"),
SubmitButton('save_changes', _('Create') if is_creation else _('Save changes')),
]
# Generic behaviour
if not is_creation:
self.helper.form_action = self.instance.get_update_url()
# Put delete url in Delete button
actions.insert(0, HTML(
"""<a class="btn {0} delete" href="{1}"><i class="bi bi-trash"></i> {2}</a>""".format(
'btn-danger' if self.can_delete else 'disabled',
self.instance.get_delete_url() if self.can_delete else '#',
_("Delete")
)))
else:
self.helper.form_action = self.instance.get_add_url()
# Check if fieldslayout is defined, otherwise use Meta.fields
fieldslayout = self.fieldslayout
if not fieldslayout:
# Remove geomfields from left part
fieldslayout = [fl for fl in self.orig_fields if fl not in self.geomfields]
# Replace native fields in Crispy layout by translated fields
fieldslayout = self.__replace_translatable_fields(fieldslayout)
has_geomfield = len(self.geomfields) > 0
leftpanel_css = "col-12"
if has_geomfield:
leftpanel_css = "col-12 col-sm-6 col-lg-5"
if self.leftpanel_scrollable:
leftpanel_css += " scrollable"
leftpanel = Div(
*fieldslayout,
css_class=leftpanel_css,
css_id="modelfields",
)
rightpanel = tuple()
if has_geomfield:
rightpanel = (Div(
*self.geomfields,
css_class="col-12 col-sm-6 col-lg-7",
css_id="geomfield"
),)
# Create form actions
# crispy_form bootstrap4 template is overriden
# because of label and field classes added but not wanted here
formactions = FormActions(
*actions,
css_class="form-actions",
template='mapentity/crispy_forms/bootstrap4/layout/formactions.html'
)
# Main form layout
self.helper.help_text_inline = True
self.helper.form_class = 'form-horizontal'
self.helper.form_style = "default"
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'controls col-md-9'
self.helper.layout = Layout(
Div(
Div(
leftpanel,
*rightpanel,
css_class="row"
),
css_class="container-fluid"
),
formactions,
)
def __replace_translatable_fields(self, fieldslayout):
newlayout = []
for field in fieldslayout:
# Layout fields can be nested (e.g. Div('f1', 'f2', Div('f3')))
if hasattr(field, 'fields'):
field.fields = self.__replace_translatable_fields(field.fields)
newlayout.append(field)
else:
# Add translated fields to layout
if field in self._translated:
field_is_required = self.fields[f"{field}_{settings.MODELTRANSLATION_DEFAULT_LANGUAGE}"].required
# Only if they are required or not hidden
if field_is_required or field not in self.hidden_fields:
newlayout.append(self.__tabbed_layout_for_field(field))
else:
newlayout.append(field)
return newlayout
def __tabbed_layout_for_field(self, field):
fields = []
for replacement in self._translated[field]:
active = "active" if replacement.endswith('_{0}'.format(settings.MODELTRANSLATION_DEFAULT_LANGUAGE)) else ""
fields.append(Div(replacement,
css_class="tab-pane " + active,
css_id=replacement))
layout = Div(
HTML("""
<ul class="nav nav-pills offset-md-3">
{{% for lang in TRANSLATED_LANGUAGES %}}
<li class="nav-item">
<a class="nav-link{{% if lang.0 == '{lang_code}'""" """ %}} active{{% endif %}}" href="#{field}_{{{{ lang.0 }}}}"
data-toggle="tab">{{{{ lang.0 }}}}
</a>
</li>
{{% endfor %}}
</ul>
""".format(lang_code=settings.MODELTRANSLATION_DEFAULT_LANGUAGE, field=field)),
Div(
*fields,
css_class="tab-content"
),
css_class="translatable tabbable"
)
return layout
class AttachmentForm(BaseAttachmentForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(form=self)
self.helper.form_tag = True
self.helper.form_class = 'attachment form-horizontal'
self.helper.help_text_inline = True
self.helper.form_style = "default"
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
if self.is_creation:
form_actions = [
Submit('submit_attachment',
_('Submit attachment'),
css_class="btn-primary")
]
else:
form_actions = [
Button('cancel', _('Cancel'), css_class=""),
Submit('submit_attachment',
_('Update attachment'),
css_class="btn-primary")
]
self.helper.form_action = self.form_url
self.helper.layout.fields.append(
FormActions(*form_actions, css_class="form-actions"))
|
|
# coding: utf-8
import functools
import re
from flask.ext import login
from google.appengine.api import mail
from flask.ext.oauthlib import client as oauth
from google.appengine.api import users
from google.appengine.ext import ndb
import flask
import unidecode
import config
import model
import task
import util
from main import app
_signals = flask.signals.Namespace()
###############################################################################
# Flask Login
###############################################################################
login_manager = login.LoginManager()
class AnonymousUser(login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
def has_permission(self, permission):
return False
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.admin = user_db.admin
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def has_permission(self, permission):
return self.user_db.has_permission(permission)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
def current_user_id():
return login.current_user.id
def current_user_key():
return login.current_user.user_db.key if login.current_user.user_db else None
def current_user_db():
return login.current_user.user_db
def is_logged_in():
return login.current_user.id != 0
###############################################################################
# Decorators
###############################################################################
def login_required(f):
decorator_order_guard(f, 'auth.login_required')
@functools.wraps(f)
def decorated_function(*args, **kws):
if is_logged_in():
return f(*args, **kws)
if flask.request.path.startswith('/_s/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return decorated_function
def admin_required(f):
decorator_order_guard(f, 'auth.admin_required')
@functools.wraps(f)
def decorated_function(*args, **kws):
if is_logged_in() and current_user_db().admin:
return f(*args, **kws)
if not is_logged_in() and flask.request.path.startswith('/_s/'):
return flask.abort(401)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
permission_registered = _signals.signal('permission-registered')
def permission_required(permission=None, methods=None):
def permission_decorator(f):
decorator_order_guard(f, 'auth.permission_required')
# default to decorated function name as permission
perm = permission or f.func_name
meths = [m.upper() for m in methods] if methods else None
permission_registered.send(f, permission=perm)
@functools.wraps(f)
def decorated_function(*args, **kws):
if meths and flask.request.method.upper() not in meths:
return f(*args, **kws)
if is_logged_in() and current_user_db().has_permission(perm):
return f(*args, **kws)
if not is_logged_in():
if flask.request.path.startswith('/_s/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
return permission_decorator
###############################################################################
# Sign in stuff
###############################################################################
@app.route('/login/')
@app.route('/signin/')
def signin():
next_url = util.get_next_url()
google_signin_url = flask.url_for('signin_google', next=next_url)
twitter_signin_url = flask.url_for('signin_twitter', next=next_url)
facebook_signin_url = flask.url_for('signin_facebook', next=next_url)
return flask.render_template(
'signin.html',
title='Please sign in',
html_class='signin',
google_signin_url=google_signin_url,
twitter_signin_url=twitter_signin_url,
facebook_signin_url=facebook_signin_url,
next_url=next_url,
)
@app.route('/signout/')
def signout():
login.logout_user()
flask.flash(u'You have been signed out.', category='success')
return flask.redirect(util.param('next') or flask.url_for('signin'))
###############################################################################
# Google
###############################################################################
@app.route('/signin/google/')
def signin_google():
save_request_params()
google_url = users.create_login_url(flask.url_for('google_authorized'))
return flask.redirect(google_url)
@app.route('/_s/callback/google/authorized/')
def google_authorized():
google_user = users.get_current_user()
if google_user is None:
flask.flash(u'You denied the request to sign in.')
return flask.redirect(util.get_next_url())
user_db = retrieve_user_from_google(google_user)
return signin_user_db(user_db)
def retrieve_user_from_google(google_user):
auth_id = 'federated_%s' % google_user.user_id()
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
if not user_db.admin and users.is_current_user_admin():
user_db.admin = True
user_db.put()
return user_db
return create_user_db(
auth_id,
util.create_name_from_email(google_user.email()),
google_user.email(),
google_user.email(),
verified=True,
admin=users.is_current_user_admin(),
)
###############################################################################
# Twitter
###############################################################################
twitter_oauth = oauth.OAuth()
app.config['TWITTER'] = dict(
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
)
twitter = twitter_oauth.remote_app('twitter', app_key='TWITTER')
twitter_oauth.init_app(app)
@app.route('/_s/callback/twitter/oauth-authorized/')
def twitter_authorized():
resp = twitter.authorized_response()
if resp is None:
flask.flash(u'You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
resp['oauth_token'],
resp['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(resp)
return signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
flask.session.pop('oauth_token', None)
save_request_params()
try:
return twitter.authorize(callback=flask.url_for('twitter_authorized'))
except:
flask.flash(
'Something went wrong with Twitter sign in. Please try again.',
category='danger',
)
return flask.redirect(flask.url_for('signin', next=util.get_next_url()))
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return create_user_db(
auth_id,
response['screen_name'],
response['screen_name'],
)
###############################################################################
# Facebook
###############################################################################
facebook_oauth = oauth.OAuth()
app.config['FACEBOOK'] = dict(
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=config.CONFIG_DB.facebook_app_id,
consumer_secret=config.CONFIG_DB.facebook_app_secret,
request_token_params={'scope': 'email'},
)
facebook = facebook_oauth.remote_app('facebook', app_key='FACEBOOK')
facebook_oauth.init_app(app)
@app.route('/_s/callback/facebook/oauth-authorized/')
def facebook_authorized():
resp = facebook.authorized_response()
if resp is None:
flask.flash(u'You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
user_db = retrieve_user_from_facebook(me.data)
return signin_user_db(user_db)
@facebook.tokengetter
def get_facebook_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/facebook/')
def signin_facebook():
save_request_params()
return facebook.authorize(callback=flask.url_for(
'facebook_authorized', _external=True
))
def retrieve_user_from_facebook(response):
auth_id = 'facebook_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return create_user_db(
auth_id,
response['name'],
response.get('username', response['name']),
response.get('email', ''),
verified=bool(response.get('email', '')),
)
###############################################################################
# Helpers
###############################################################################
def decorator_order_guard(f, decorator_name):
if f in app.view_functions.values():
raise SyntaxError(
'Do not use %s above app.route decorators as it would not be checked. '
'Instead move the line below the app.route lines.' % decorator_name
)
def create_user_db(auth_id, name, username, email='', verified=False, **props):
email = email.lower()
if verified and email:
user_dbs, user_cr = model.User.get_dbs(email=email, verified=True, limit=2)
if len(user_dbs) == 1:
user_db = user_dbs[0]
user_db.auth_ids.append(auth_id)
user_db.put()
task.new_user_notification(user_db)
return user_db
if isinstance(username, str):
username = username.decode('utf-8')
username = unidecode.unidecode(username.split('@')[0].lower()).strip()
username = re.sub(r'[\W_]+', '.', username)
new_username = username
n = 1
while not model.User.is_username_available(new_username):
new_username = '%s%d' % (username, n)
n += 1
user_db = model.User(
name=name,
email=email,
username=new_username,
auth_ids=[auth_id] if auth_id else [],
verified=verified,
token=util.uuid(),
**props
)
user_db.put()
task.new_user_notification(user_db)
return user_db
def save_request_params():
flask.session['auth-params'] = {
'next': util.get_next_url(),
'remember': util.param('remember', bool),
}
@ndb.toplevel
def signin_user_db(user_db):
if not user_db:
return flask.redirect(flask.url_for('signin'))
flask_user_db = FlaskUser(user_db)
auth_params = flask.session.get('auth-params', {
'next': flask.url_for('welcome'),
'remember': False,
})
flask.session.pop('auth-params', None)
if login.login_user(flask_user_db, remember=auth_params['remember']):
user_db.put_async()
flask.flash('Hello %s, welcome to %s.' % (
user_db.name, config.CONFIG_DB.brand_name,
), category='success')
return flask.redirect(util.get_next_url(auth_params['next']))
flask.flash('Sorry, but you could not sign in.', category='danger')
return flask.redirect(flask.url_for('signin'))
|
|
# $Id: ShowFeats.py 537 2007-08-20 14:54:35Z landrgr1 $
#
# Created by Greg Landrum Aug 2006
#
#
from __future__ import print_function
_version = "0.3.2"
_usage="""
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage="This is ShowFeats version %s"%(_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor':(0,1,1),
'Acceptor':(1,0,1),
'NegIonizable':(1,0,0),
'PosIonizable':(0,0,1),
'ZnBinder':(1,.5,.5),
'Aromatic':(1,.8,.2),
'LumpedHydrophobe':(.5,.25,0),
'Hydrophobe':(.5,.25,0),
}
def _getVectNormal(v,tol=1e-4):
if math.fabs(v.x)>tol:
res = Geometry.Point3D(v.y,-v.x,0)
elif math.fabs(v.y)>tol:
res = Geometry.Point3D(-v.y,v.x,0)
elif math.fabs(v.z)>tol:
res = Geometry.Point3D(1,0,0)
else:
raise ValueError('cannot find normal to the null vector')
res.Normalize()
return res
_canonArrowhead=None
def _buildCanonArrowhead(headFrac,nSteps,aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0,0,headFrac)
_canonArrowhead=[startP]
scale = headFrac*aspect
baseV = RDGeometry.Point3D(scale,0,0)
_canonArrowhead.append(baseV)
twopi = 2*math.pi
for i in range(1,nSteps):
v = RDGeometry.Point3D(scale*math.cos(i*twopi),scale*math.sin(i*twopi),0)
_canonArrowhead.append(v)
_globalArrowCGO=[]
_globalSphereCGO=[]
# taken from pymol's cgo.py
BEGIN=2
END=3
TRIANGLE_FAN=6
COLOR=6
VERTEX=4
NORMAL=5
SPHERE=7
CYLINDER=9
ALPHA=25
def _cgoArrowhead(viewer,tail,head,radius,color,label,headFrac=0.3,nSteps=10,aspect=.5):
global _globalArrowCGO
delta = head-tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head-tail
dv.Normalize()
dv *= headFrac
startP = head
normal*=headFrac*aspect
cgo = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,dv.x,dv.y,dv.z,
VERTEX,head.x+dv.x,head.y+dv.y,head.z+dv.z]
base = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,-dv.x,-dv.y,-dv.z,
VERTEX,head.x,head.y,head.z]
v = startP+normal
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
for i in range(1,nSteps):
v = FeatDirUtils.ArbAxisRotation(360./nSteps*i,delta,normal)
cgo.extend([NORMAL,v.x,v.y,v.z])
v += startP
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
base.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer,tail,head,radius,color,label,transparency=0,includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA,1-transparency])
else:
_globalArrowCGO.extend([ALPHA,1])
_globalArrowCGO.extend([CYLINDER,tail.x,tail.y,tail.z,
head.x,head.y,head.z,
radius*.10,
color[0],color[1],color[2],
color[0],color[1],color[2],
])
if includeArrowhead:
_cgoArrowhead(viewer,tail,head,radius,color,label)
def ShowMolFeats(mol,factory,viewer,radius=0.5,confId=-1,showOnly=True,
name='',transparency=0.0,colors=None,excludeTypes=[],
useFeatDirs=True,featLabel=None,dirLabel=None,includeArrowheads=True,
writeFeats=False,showMol=True,featMapFile=False):
global _globalSphereCGO
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
if showMol:
viewer.ShowMol(mol,name=name,showOnly=showOnly,confId=confId)
molFeats=factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel='%s-feats'%name
viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel=featLabel+"-dirs"
viewer.server.resetCGO(dirLabel)
for i,feat in enumerate(molFeats):
family=feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family,(.5,.5,.5))
nm = '%s(%d)'%(family,i+1)
if transparency:
_globalSphereCGO.extend([ALPHA,1-transparency])
else:
_globalSphereCGO.extend([ALPHA,1])
_globalSphereCGO.extend([COLOR,color[0],color[1],color[2],
SPHERE,pos.x,pos.y,pos.z,
radius])
if writeFeats:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print('%s\t%.3f\t%.3f\t%.3f\t1.0\t# %s'%(family,pos.x,pos.y,pos.z,aidText))
if featMapFile:
print(" family=%s pos=(%.3f,%.3f,%.3f) weight=1.0"%(family,pos.x,pos.y,pos.z),end='',file=featMapFile)
if useFeatDirs:
ps = []
if family=='Aromatic':
ps,fType = FeatDirUtils.GetAromaticFeatVects(mol.GetConformer(confId),
feat.GetAtomIds(),pos,
scale=1.0)
elif family=='Donor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetDonor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetDonor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetDonor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif family=='Acceptor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetAcceptor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetAcceptor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetAcceptor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
for tail,head in ps:
ShowArrow(viewer,tail,head,radius,color,dirLabel,
transparency=transparency,includeArrowhead=includeArrowheads)
if featMapFile:
vect = head-tail
print('dir=(%.3f,%.3f,%.3f)'%(vect.x,vect.y,vect.z),end='',file=featMapFile)
if featMapFile:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print('# %s'%(aidText),file=featMapFile)
# --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys,os,getopt
from rdkit import RDConfig
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('-x','--exclude',default='',
help='provide a list of feature names that should be excluded')
parser.add_option('-f','--fdef',default=os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef'),
help='provide the name of the feature definition (fdef) file.')
parser.add_option('--noDirs','--nodirs',dest='useDirs',default=True,action='store_false',
help='do not draw feature direction indicators')
parser.add_option('--noHeads',dest='includeArrowheads',default=True,action='store_false',
help='do not draw arrowheads on the feature direction indicators')
parser.add_option('--noClear','--noClear',dest='clearAll',default=False,action='store_true',
help='do not clear PyMol on startup')
parser.add_option('--noMols','--nomols',default=False,action='store_true',
help='do not draw the molecules')
parser.add_option('--writeFeats','--write',default=False,action='store_true',
help='print the feature information to the console')
parser.add_option('--featMapFile','--mapFile',default='',
help='save a feature map definition to the specified file')
parser.add_option('--verbose',default=False,action='store_true',
help='be verbose')
if __name__=='__main__':
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
options,args = parser.parse_args()
if len(args)<1:
parser.error('please provide either at least one sd or mol file')
try:
v = MolViewer()
except:
logger.error('Unable to connect to PyMol server.\nPlease run ~landrgr1/extern/PyMol/launch.sh to start it.')
sys.exit(1)
if options.clearAll:
v.DeleteAll()
try:
fdef = open(options.fdef,'r').read()
except IOError:
logger.error('ERROR: Could not open fdef file %s'%options.fdef)
sys.exit(1)
factory = AllChem.BuildFeatureFactoryFromString(fdef)
if options.writeFeats:
print('# Family \tX \tY \tZ \tRadius\t # Atom_ids')
if options.featMapFile:
if options.featMapFile=='-':
options.featMapFile=sys.stdout
else:
options.featMapFile=file(options.featMapFile,'w+')
print('# Feature map generated by ShowFeats v%s'%_version, file=options.featMapFile)
print("ScoreMode=All", file=options.featMapFile)
print("DirScoreMode=Ignore", file=options.featMapFile)
print("BeginParams", file=options.featMapFile)
for family in factory.GetFeatureFamilies():
print(" family=%s width=1.0 radius=3.0"%family, file=options.featMapFile)
print("EndParams", file=options.featMapFile)
print("BeginPoints", file=options.featMapFile)
i = 1
for midx,molN in enumerate(args):
if molN!='-':
featLabel='%s_Feats'%molN
else:
featLabel='Mol%d_Feats'%(midx+1)
v.server.resetCGO(featLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.sphere((0,0,0),.01,(1,0,1),featLabel)
dirLabel=featLabel+"-dirs"
v.server.resetCGO(dirLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.cylinder((0,0,0),(.01,.01,.01),.01,(1,0,1),dirLabel)
if molN != '-':
try:
ms = Chem.SDMolSupplier(molN)
except:
logger.error('Problems reading input file: %s'%molN)
ms = []
else:
ms = Chem.SDMolSupplier()
ms.SetData(sys.stdin.read())
for m in ms:
nm = 'Mol_%d'%(i)
if m.HasProp('_Name'):
nm += '_'+m.GetProp('_Name')
if options.verbose:
if m.HasProp('_Name'):
print("#Molecule: %s"%m.GetProp('_Name'))
else:
print("#Molecule: %s"%nm)
ShowMolFeats(m,factory,v,transparency=0.25,excludeTypes=options.exclude,name=nm,
showOnly=False,
useFeatDirs=options.useDirs,
featLabel=featLabel,dirLabel=dirLabel,
includeArrowheads=options.includeArrowheads,
writeFeats=options.writeFeats,showMol=not options.noMols,
featMapFile=options.featMapFile)
i += 1
if not i%100:
logger.info("Done %d poses"%i)
if ms:
v.server.renderCGO(_globalSphereCGO,featLabel,1)
if options.useDirs:
v.server.renderCGO(_globalArrowCGO,dirLabel,1)
if options.featMapFile:
print("EndPoints",file=options.featMapFile)
sys.exit(0)
|
|
import datetime
import Queue
from threading import Thread
import time
from django.test import TestCase
from haystack import connections, connection_router
from haystack.exceptions import SearchFieldError
from haystack import indexes
from haystack.utils.loading import UnifiedIndex
from core.models import MockModel, AThirdMockModel, AFifthMockModel
class BadSearchIndex1(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
class BadSearchIndex2(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
content2 = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
class GoodMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
extra = indexes.CharField(indexed=False, use_template=True)
def get_model(self):
return MockModel
# For testing inheritance...
class AltGoodMockSearchIndex(GoodMockSearchIndex, indexes.Indexable):
additional = indexes.CharField(model_attr='author')
def get_model(self):
return MockModel
class GoodCustomMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateTimeField(model_attr='pub_date', faceted=True)
extra = indexes.CharField(indexed=False, use_template=True)
hello = indexes.CharField(model_attr='hello')
def prepare(self, obj):
super(GoodCustomMockSearchIndex, self).prepare(obj)
self.prepared_data['whee'] = 'Custom preparation.'
return self.prepared_data
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data['author']
def load_all_queryset(self):
return self.get_model()._default_manager.filter(id__gt=1)
def get_model(self):
return MockModel
def index_queryset(self):
return MockModel.objects.all()
def read_queryset(self):
return MockModel.objects.filter(author__in=['daniel1', 'daniel3'])
def build_queryset(self, start_date=None, end_date=None):
return MockModel.objects.filter(author__in=['daniel1', 'daniel3'])
class GoodNullableMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author', null=True, faceted=True)
def get_model(self):
return MockModel
class GoodOverriddenFieldNameMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, index_fieldname='more_content')
author = indexes.CharField(model_attr='author', index_fieldname='name_s')
hello = indexes.CharField(model_attr='hello')
def get_model(self):
return MockModel
class GoodFacetedMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
author_foo = indexes.FacetCharField(facet_for='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
pub_date_exact = indexes.FacetDateTimeField(facet_for='pub_date')
def get_model(self):
return MockModel
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data['author']
def prepare_pub_date_exact(self, obj):
return "2010-10-26T01:54:32"
class MROFieldsSearchIndexA(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='test_a')
def get_model(self):
return MockModel
class MROFieldsSearchIndexB(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='test_b')
def get_model(self):
return MockModel
class MROFieldsSearchChild(MROFieldsSearchIndexA, MROFieldsSearchIndexB):
pass
class SearchIndexTestCase(TestCase):
def setUp(self):
super(SearchIndexTestCase, self).setUp()
self.sb = connections['default'].get_backend()
self.mi = GoodMockSearchIndex()
self.cmi = GoodCustomMockSearchIndex()
self.cnmi = GoodNullableMockSearchIndex()
self.gfmsi = GoodFacetedMockSearchIndex()
# Fake the unified index.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.ui.build(indexes=[self.mi])
connections['default']._index = self.ui
self.sample_docs = {
u'core.mockmodel.1': {
'text': u'Indexed!\n1',
'django_id': u'1',
'django_ct': u'core.mockmodel',
'extra': u'Stored!\n1',
'author': u'daniel1',
'pub_date': datetime.datetime(2009, 3, 17, 6, 0),
'id': u'core.mockmodel.1'
},
u'core.mockmodel.2': {
'text': u'Indexed!\n2',
'django_id': u'2',
'django_ct': u'core.mockmodel',
'extra': u'Stored!\n2',
'author': u'daniel2',
'pub_date': datetime.datetime(2009, 3, 17, 7, 0),
'id': u'core.mockmodel.2'
},
u'core.mockmodel.3': {
'text': u'Indexed!\n3',
'django_id': u'3',
'django_ct': u'core.mockmodel',
'extra': u'Stored!\n3',
'author': u'daniel3',
'pub_date': datetime.datetime(2009, 3, 17, 8, 0),
'id': u'core.mockmodel.3'
}
}
def tearDown(self):
connections['default']._index = self.old_unified_index
super(SearchIndexTestCase, self).tearDown()
def test_no_contentfield_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex1)
def test_too_many_contentfields_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex2)
def test_contentfield_present(self):
try:
mi = GoodMockSearchIndex()
except:
self.fail()
def test_proper_fields(self):
self.assertEqual(len(self.mi.fields), 4)
self.assertTrue('text' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['text'], indexes.CharField))
self.assertTrue('author' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['author'], indexes.CharField))
self.assertTrue('pub_date' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('extra' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['extra'], indexes.CharField))
self.assertEqual(len(self.cmi.fields), 7)
self.assertTrue('text' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['text'], indexes.CharField))
self.assertTrue('author' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['author'], indexes.CharField))
self.assertTrue('author_exact' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['author_exact'], indexes.FacetCharField))
self.assertTrue('pub_date' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('pub_date_exact' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['pub_date_exact'], indexes.FacetDateTimeField))
self.assertTrue('extra' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['extra'], indexes.CharField))
self.assertTrue('hello' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['extra'], indexes.CharField))
def test_index_queryset(self):
self.assertEqual(len(self.cmi.index_queryset()), 3)
def test_read_queryset(self):
self.assertEqual(len(self.cmi.read_queryset()), 2)
def test_build_queryset(self):
# The custom SearchIndex.build_queryset returns the same records as
# the read_queryset
self.assertEqual(len(self.cmi.build_queryset()), 2)
# Store a reference to the original method
old_guf = self.mi.__class__.get_updated_field
self.mi.__class__.get_updated_field = lambda self: 'pub_date'
# With an updated field, we should get have filtered results
sd = datetime.datetime(2009, 3, 17, 7, 0)
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2)
ed = datetime.datetime(2009, 3, 17, 7, 59)
self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2)
sd = datetime.datetime(2009, 3, 17, 6, 0)
ed = datetime.datetime(2009, 3, 17, 6, 59)
self.assertEqual(len(self.mi.build_queryset(start_date=sd,
end_date=ed)), 1)
# Remove the updated field for the next test
del self.mi.__class__.get_updated_field
# The default should return all 3 even if we specify a start date
# because there is no updated field specified
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3)
# Restore the original attribute
self.mi.__class__.get_updated_field = old_guf
def test_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.mi.prepare(mock)), 7)
self.assertEqual(sorted(self.mi.prepare(mock).keys()), ['author', 'django_ct', 'django_id', 'extra', 'id', 'pub_date', 'text'])
def test_custom_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
def test_thread_safety(self):
# This is a regression. ``SearchIndex`` used to write to
# ``self.prepared_data``, which would leak between threads if things
# went too fast.
exceptions = []
def threaded_prepare(queue, index, model):
try:
index.queue = queue
prepped = index.prepare(model)
except Exception, e:
exceptions.append(e)
raise
class ThreadedSearchIndex(GoodMockSearchIndex):
def prepare_author(self, obj):
if obj.pk == 20:
time.sleep(0.1)
else:
time.sleep(0.5)
queue.put(self.prepared_data['author'])
return self.prepared_data['author']
tmi = ThreadedSearchIndex()
queue = Queue.Queue()
mock_1 = MockModel()
mock_1.pk = 20
mock_1.author = 'foo'
mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock_2 = MockModel()
mock_2.pk = 21
mock_2.author = 'daniel%s' % mock_2.id
mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
th1 = Thread(target=threaded_prepare, args=(queue, tmi, mock_1))
th2 = Thread(target=threaded_prepare, args=(queue, tmi, mock_2))
th1.start()
th2.start()
th1.join()
th2.join()
mock_1_result = queue.get()
mock_2_result = queue.get()
self.assertEqual(mock_1_result, u'foo')
self.assertEqual(mock_2_result, u'daniel21')
def test_custom_prepare_author(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(self.cmi.prepared_data['author'], "Hi, I'm daniel20")
self.assertEqual(self.cmi.prepared_data['author_exact'], "Hi, I'm daniel20")
def test_custom_model_attr(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(self.cmi.prepared_data['hello'], u'World!')
def test_custom_index_fieldname(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
cofnmi = GoodOverriddenFieldNameMockSearchIndex()
self.assertEqual(len(cofnmi.prepare(mock)), 6)
self.assertEqual(sorted(cofnmi.prepare(mock).keys()), ['django_ct', 'django_id', 'hello', 'id', 'more_content', 'name_s'])
self.assertEqual(cofnmi.prepared_data['name_s'], u'daniel20')
self.assertEqual(cofnmi.get_content_field(), 'more_content')
def test_get_content_field(self):
self.assertEqual(self.mi.get_content_field(), 'text')
def test_update(self):
self.sb.clear()
self.assertEqual(self.sb.search('*')['hits'], 0)
self.mi.update()
self.assertEqual(self.sb.search('*')['hits'], 3)
self.sb.clear()
def test_update_object(self):
self.sb.clear()
self.assertEqual(self.sb.search('*')['hits'], 0)
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'20')])
self.sb.clear()
def test_remove_object(self):
self.mi.update()
self.assertEqual(self.sb.search('*')['hits'], 3)
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search('*')['hits'], 4)
self.mi.remove_object(mock)
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'1'), (u'core.mockmodel', u'2'), (u'core.mockmodel', u'3')])
self.sb.clear()
def test_clear(self):
self.mi.update()
self.assertEqual(self.sb.search('*')['hits'], 3)
self.mi.clear()
self.assertEqual(self.sb.search('*')['hits'], 0)
def test_reindex(self):
self.mi.reindex()
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'1'), (u'core.mockmodel', u'2'), (u'core.mockmodel', u'3')])
self.sb.clear()
def test_inheritance(self):
try:
agmi = AltGoodMockSearchIndex()
except:
self.fail()
self.assertEqual(len(agmi.fields), 5)
self.assertTrue('text' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['text'], indexes.CharField))
self.assertTrue('author' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['author'], indexes.CharField))
self.assertTrue('pub_date' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('extra' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['extra'], indexes.CharField))
self.assertTrue('additional' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['additional'], indexes.CharField))
def test_proper_field_resolution(self):
mrofsc = MROFieldsSearchChild()
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock.test_a = 'This is A'
mock.test_b = 'This is B'
self.assertEqual(len(mrofsc.fields), 1)
prepped_data = mrofsc.prepare(mock)
self.assertEqual(len(prepped_data), 4)
self.assertEqual(prepped_data['text'], 'This is A')
def test_load_all_queryset(self):
self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3])
def test_nullable(self):
mock = MockModel()
mock.pk = 20
mock.author = None
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.cnmi.prepare(mock)
self.assertEqual(len(prepared_data), 6)
self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'id', 'text'])
prepared_data = self.cnmi.full_prepare(mock)
self.assertEqual(len(prepared_data), 4)
self.assertEqual(sorted(prepared_data.keys()), ['django_ct', 'django_id', 'id', 'text'])
def test_custom_facet_fields(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel'
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.gfmsi.prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_foo', 'django_ct', 'django_id', 'id', 'pub_date', 'pub_date_exact', 'text'])
prepared_data = self.gfmsi.full_prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_foo', 'django_ct', 'django_id', 'id', 'pub_date', 'pub_date_exact', 'text'])
self.assertEqual(prepared_data['author_foo'], u"Hi, I'm daniel")
self.assertEqual(prepared_data['pub_date_exact'], '2010-10-26T01:54:32')
class BasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
class FieldsModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
fields = ['author', 'pub_date']
class ExcludesModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
excludes = ['author', 'foo']
class FieldsWithOverrideModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
foo = indexes.IntegerField(model_attr='foo')
class Meta:
model = MockModel
fields = ['author', 'foo']
def get_index_fieldname(self, f):
if f.name == 'author':
return 'author_bar'
else:
return f.name
class YetAnotherBasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
class Meta:
model = AThirdMockModel
class GhettoAFifthMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
def get_model(self):
return AFifthMockModel
def index_queryset(self):
# Index everything,
return self.get_model().objects.complete_set()
def read_queryset(self):
return self.get_model().objects.all()
class ReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr='author', document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self):
return self.get_model().objects.complete_set()
class TextReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='author', document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self):
return self.get_model().objects.complete_set()
class ModelSearchIndexTestCase(TestCase):
def setUp(self):
super(ModelSearchIndexTestCase, self).setUp()
self.sb = connections['default'].get_backend()
self.bmsi = BasicModelSearchIndex()
self.fmsi = FieldsModelSearchIndex()
self.emsi = ExcludesModelSearchIndex()
self.fwomsi = FieldsWithOverrideModelSearchIndex()
self.yabmsi = YetAnotherBasicModelSearchIndex()
def test_basic(self):
self.assertEqual(len(self.bmsi.fields), 4)
self.assertTrue('foo' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['foo'], indexes.CharField))
self.assertEqual(self.bmsi.fields['foo'].null, False)
self.assertEqual(self.bmsi.fields['foo'].index_fieldname, 'foo')
self.assertTrue('author' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['author'], indexes.CharField))
self.assertEqual(self.bmsi.fields['author'].null, False)
self.assertTrue('pub_date' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue(isinstance(self.bmsi.fields['pub_date'].default, datetime.datetime))
self.assertTrue('text' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['text'], indexes.CharField))
self.assertEqual(self.bmsi.fields['text'].document, True)
self.assertEqual(self.bmsi.fields['text'].use_template, True)
def test_fields(self):
self.assertEqual(len(self.fmsi.fields), 3)
self.assertTrue('author' in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields['author'], indexes.CharField))
self.assertTrue('pub_date' in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('text' in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields['text'], indexes.CharField))
def test_excludes(self):
self.assertEqual(len(self.emsi.fields), 2)
self.assertTrue('pub_date' in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('text' in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields['text'], indexes.CharField))
def test_fields_with_override(self):
self.assertEqual(len(self.fwomsi.fields), 3)
self.assertTrue('author' in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields['author'], indexes.CharField))
self.assertTrue('foo' in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields['foo'], indexes.IntegerField))
self.assertTrue('text' in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields['text'], indexes.CharField))
def test_overriding_field_name_with_get_index_fieldname(self):
self.assertTrue(self.fwomsi.fields['foo'].index_fieldname, 'foo')
self.assertTrue(self.fwomsi.fields['author'].index_fieldname, 'author_bar')
def test_float_integer_fields(self):
self.assertEqual(len(self.yabmsi.fields), 5)
self.assertEqual(sorted(self.yabmsi.fields.keys()), ['author', 'average_delay', 'pub_date', 'text', 'view_count'])
self.assertTrue('author' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['author'], indexes.CharField))
self.assertEqual(self.yabmsi.fields['author'].null, False)
self.assertTrue('pub_date' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue(isinstance(self.yabmsi.fields['pub_date'].default, datetime.datetime))
self.assertTrue('text' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['text'], indexes.CharField))
self.assertEqual(self.yabmsi.fields['text'].document, True)
self.assertEqual(self.yabmsi.fields['text'].use_template, False)
self.assertTrue('view_count' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['view_count'], indexes.IntegerField))
self.assertEqual(self.yabmsi.fields['view_count'].null, False)
self.assertEqual(self.yabmsi.fields['view_count'].index_fieldname, 'view_count')
self.assertTrue('average_delay' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['average_delay'], indexes.FloatField))
self.assertEqual(self.yabmsi.fields['average_delay'].null, False)
self.assertEqual(self.yabmsi.fields['average_delay'].index_fieldname, 'average_delay')
|
|
"""
This file is the declaration of benchmarks for HTTPie. It
is also used to run them with the current environment.
Each instance of BaseRunner class will be an individual
benchmark. And if run without any arguments, this file
will execute every benchmark instance and report the
timings.
The benchmarks are run through 'pyperf', which allows to
do get very precise results. For micro-benchmarks like startup,
please run `pyperf system tune` to get even more acurrate results.
Examples:
# Run everything as usual, the default is that we do 3 warmup runs
# and 5 actual runs.
$ python extras/profiling/benchmarks.py
# For retrieving results faster, pass --fast
$ python extras/profiling/benchmarks.py --fast
# For verify everything works as expected, pass --debug-single-value.
# It will only run everything once, so the resuls are not realiable. But
# very useful when iterating on a benchmark
$ python extras/profiling/benchmarks.py --debug-single-value
# If you want to run with a custom HTTPie command (for example with
# and HTTPie instance installed in another virtual environment),
# pass HTTPIE_COMMAND variable.
$ HTTPIE_COMMAND="/my/python /my/httpie" python extras/profiling/benchmarks.py
"""
from __future__ import annotations
import os
import shlex
import subprocess
import sys
import threading
from contextlib import ExitStack, contextmanager
from dataclasses import dataclass, field
from functools import cached_property, partial
from http.server import HTTPServer, SimpleHTTPRequestHandler
from tempfile import TemporaryDirectory
from typing import ClassVar, Final, List
import pyperf
# For download benchmarks, define a set of files.
# file: (block_size, count) => total_size = block_size * count
PREDEFINED_FILES: Final = {'3G': (3 * 1024 ** 2, 1024)}
class QuietSimpleHTTPServer(SimpleHTTPRequestHandler):
def log_message(self, *args, **kwargs):
pass
@contextmanager
def start_server():
"""Create a server to serve local files. It will create the
PREDEFINED_FILES through dd."""
with TemporaryDirectory() as directory:
for file_name, (block_size, count) in PREDEFINED_FILES.items():
subprocess.check_call(
[
'dd',
'if=/dev/zero',
f'of={file_name}',
f'bs={block_size}',
f'count={count}',
],
cwd=directory,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
handler = partial(QuietSimpleHTTPServer, directory=directory)
server = HTTPServer(('localhost', 0), handler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
yield '{}:{}'.format(*server.socket.getsockname())
server.shutdown()
thread.join(timeout=0.5)
@dataclass
class Context:
benchmarks: ClassVar[List[BaseRunner]] = []
stack: ExitStack = field(default_factory=ExitStack)
runner: pyperf.Runner = field(default_factory=pyperf.Runner)
def run(self) -> pyperf.BenchmarkSuite:
results = [benchmark.run(self) for benchmark in self.benchmarks]
return pyperf.BenchmarkSuite(results)
@property
def cmd(self) -> List[str]:
if cmd := os.getenv('HTTPIE_COMMAND'):
return shlex.split(cmd)
http = os.path.join(os.path.dirname(sys.executable), 'http')
assert os.path.exists(http)
return [sys.executable, http]
@cached_property
def server(self) -> str:
return self.stack.enter_context(start_server())
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stack.close()
@dataclass
class BaseRunner:
"""
An individual benchmark case. By default it has the category
(e.g like startup or download) and a name.
"""
category: str
title: str
def __post_init__(self):
Context.benchmarks.append(self)
def run(self, context: Context) -> pyperf.Benchmark:
raise NotImplementedError
@property
def name(self) -> str:
return f'{self.title} ({self.category})'
@dataclass
class CommandRunner(BaseRunner):
"""
Run a single command, and benchmark it.
"""
args: List[str]
def run(self, context: Context) -> pyperf.Benchmark:
return context.runner.bench_command(self.name, [*context.cmd, *self.args])
@dataclass
class DownloadRunner(BaseRunner):
"""
Benchmark downloading a single file from the
remote server.
"""
file_name: str
def run(self, context: Context) -> pyperf.Benchmark:
return context.runner.bench_command(
self.name,
[
*context.cmd,
'--download',
'GET',
f'{context.server}/{self.file_name}',
],
)
CommandRunner('startup', '`http --version`', ['--version'])
CommandRunner('startup', '`http --offline pie.dev/get`', ['--offline', 'pie.dev/get'])
for pretty in ['all', 'none']:
CommandRunner(
'startup',
f'`http --pretty={pretty} pie.dev/stream/1000`',
[
'--print=HBhb',
f'--pretty={pretty}',
'httpbin.org/stream/1000'
]
)
DownloadRunner('download', '`http --download :/big_file.txt` (3GB)', '3G')
def main() -> None:
# PyPerf will bring it's own argument parser, so configure the script.
# The somewhat fast and also precise enough configuration is this. We run
# benchmarks 3 times to warmup (e.g especially for download benchmark, this
# is important). And then 5 actual runs where we record.
sys.argv.extend(
['--worker', '--loops=1', '--warmup=3', '--values=5', '--processes=2']
)
with Context() as context:
context.run()
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithmic data generators for symbolic math tasks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import random
import six
from six.moves import range # pylint: disable=redefined-builtin
import sympy
class ExprOp(object):
"""Represents an algebraic operation, such as '+', '-', etc."""
def __init__(self, symbol, precedence, associative=False):
"""Constructor.
Args:
symbol: The character which represents this operation, such as '+' for
addition.
precedence: Operator precedence. This will determine where parentheses
are used.
associative: If true, the order of the operands does not matter.
"""
self.symbol = symbol
self.precedence = precedence
self.associative = associative
def __str__(self):
return self.symbol
def __eq__(self, other):
return isinstance(other, ExprOp) and self.symbol == other.symbol
class ExprNode(object):
"""A node in an expression tree.
ExprNode always holds an operator. Leaves are strings.
"""
def __init__(self, left, right, op):
self.left = left
self.right = right
self.op = op
left_depth = left.depth if isinstance(left, ExprNode) else 0
right_depth = right.depth if isinstance(right, ExprNode) else 0
self.depth = max(left_depth, right_depth) + 1
def __str__(self):
left_str = str(self.left)
right_str = str(self.right)
left_use_parens = (isinstance(self.left, ExprNode) and
self.left.op.precedence < self.op.precedence)
right_use_parens = (isinstance(self.right, ExprNode) and
self.right.op.precedence <= self.op.precedence and
not (self.op.associative and self.right.op == self.op))
left_final = "(" + left_str + ")" if left_use_parens else left_str
right_final = "(" + right_str + ")" if right_use_parens else right_str
return left_final + str(self.op) + right_final
def is_in(self, expr):
"""Returns True if `expr` is a subtree."""
if expr == self:
return True
is_in_left = is_in_expr(self.left, expr)
is_in_right = is_in_expr(self.right, expr)
return is_in_left or is_in_right
def is_in_expr(expr, find):
"""Returns True if `find` is a subtree of `expr`."""
return expr == find or (isinstance(expr, ExprNode) and expr.is_in(find))
def random_expr_with_required_var(depth, required_var, optional_list, ops):
"""Generate a random expression tree with a required variable.
The required variable appears exactly once in the expression.
Args:
depth: At least one leaf will be this many levels down from the top.
required_var: A char. This char is guaranteed to be placed exactly once at
a leaf somewhere in the tree. This is the var to solve for.
optional_list: A list of chars. These chars are randomly selected as leaf
values. These are constant vars.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
"""
if not depth:
if required_var:
return required_var
return str(optional_list[random.randrange(len(optional_list))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
required_var_side = random.randrange(2)
left = random_expr_with_required_var(
depth - 1 if max_depth_side else other_side_depth, required_var
if required_var_side else None, optional_list, ops)
right = random_expr_with_required_var(
depth - 1 if not max_depth_side else other_side_depth, required_var
if not required_var_side else None, optional_list, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
def random_expr(depth, vlist, ops):
"""Generate a random expression tree.
Args:
depth: At least one leaf will be this many levels down from the top.
vlist: A list of chars. These chars are randomly selected as leaf values.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
"""
if not depth:
return str(vlist[random.randrange(len(vlist))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
left = random_expr(depth - 1
if max_depth_side else other_side_depth, vlist, ops)
right = random_expr(depth - 1
if not max_depth_side else other_side_depth, vlist, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
def algebra_inverse_solve(left, right, var, solve_ops):
"""Solves for the value of the given var in an expression.
Args:
left: The root of the ExprNode tree on the left side of the equals sign.
right: The root of the ExprNode tree on the right side of the equals sign.
var: A char. The variable to solve for.
solve_ops: A dictionary with the following properties.
* For each operator in the expression, there is a rule that determines
how to cancel out a value either to the left or the right of that
operator.
* For each rule, there is an entry in the dictionary. The key is two
chars- the op char, and either 'l' or 'r' meaning rule for canceling
out the left or right sides. For example, '+l', '+r', '-l', '-r'.
* The value of each entry is a function with the following signature:
(left, right, to_tree) -> (new_from_tree, new_to_tree)
left- Expression on left side of the op.
right- Expression on the right side of the op.
to_tree- The tree on the other side of the equal sign. The canceled
out expression will be moved here.
new_from_tree- The resulting from_tree after the algebraic
manipulation.
new_to_tree- The resulting to_tree after the algebraic manipulation.
Returns:
The root of an ExprNode tree which holds the value of `var` after solving.
Raises:
ValueError: If `var` does not appear exactly once in the equation (which
includes the left and right sides).
"""
is_in_left = is_in_expr(left, var)
is_in_right = is_in_expr(right, var)
if is_in_left == is_in_right:
if is_in_left:
raise ValueError("Solve-variable '%s' is on both sides of the equation. "
"Only equations where the solve variable-appears once "
"are supported by this solver. Left: '%s', right: '%s'" %
(var, str(left), str(right)))
else:
raise ValueError("Solve-variable '%s' is not present in the equation. It "
"must appear once. Left: '%s', right: '%s'" %
(var, str(left), str(right)))
from_tree = left if is_in_left else right
to_tree = left if not is_in_left else right
while from_tree != var:
is_in_left = is_in_expr(from_tree.left, var)
is_in_right = is_in_expr(from_tree.right, var)
from_tree, to_tree = (solve_ops[str(from_tree.op)
+ ("l" if is_in_left else "r")](
from_tree.left, from_tree.right,
to_tree))
return to_tree
def format_sympy_expr(sympy_expr, functions=None):
"""Convert sympy expression into a string which can be encoded.
Args:
sympy_expr: Any sympy expression tree or string.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
A string representation of the expression suitable for encoding as a
sequence input.
"""
if functions is None:
functions = {}
str_expr = str(sympy_expr)
result = str_expr.replace(" ", "")
for fn_name, char in six.iteritems(functions):
result = result.replace(fn_name, char)
return result
def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth,
max_depth):
"""Randomly generate an algebra inverse dataset sample.
Given an input equation and variable, produce the expression equal to the
variable.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
solve_ops: See `solve_ops` documentation in `algebra_inverse_solve`.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input. Will be of the form
'solve_var:left_side=right_side'.
target: String representation of the solution.
"""
side = random.randrange(2)
left_depth = random.randrange(min_depth if side else 0, max_depth + 1)
right_depth = random.randrange(min_depth if not side else 0, max_depth + 1)
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = vlist[:var_index] + vlist[var_index + 1:]
left = random_expr_with_required_var(left_depth, var
if side else None, consts, ops)
right = random_expr_with_required_var(right_depth, var
if not side else None, consts, ops)
left_str = str(left)
right_str = str(right)
target = str(algebra_inverse_solve(left, right, var, solve_ops))
sample = "%s:%s=%s" % (var, left_str, right_str)
return sample, target
def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth):
"""Randomly generate an algebra simplify dataset sample.
Given an input expression, produce the simplified expression.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
Returns:
sample: String representation of the input.
target: String representation of the solution.
"""
depth = random.randrange(min_depth, max_depth + 1)
expr = random_expr(depth, vlist, ops)
sample = str(expr)
target = format_sympy_expr(sympy.simplify(sample))
return sample, target
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth,
functions):
"""Randomly generate a symbolic integral dataset sample.
Given an input expression, produce the indefinite integral.
Args:
vlist: Variable list. List of chars that can be used in the expression.
ops: List of ExprOp instances. The allowed operators for the expression.
min_depth: Expression trees will not have a smaller depth than this. 0 means
there is just a variable. 1 means there is one operation.
max_depth: Expression trees will not have a larger depth than this. To make
all trees have the same depth, set this equal to `min_depth`.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
Returns:
sample: String representation of the input. Will be of the form
'var:expression'.
target: String representation of the solution.
"""
var_index = random.randrange(len(vlist))
var = vlist[var_index]
consts = vlist[:var_index] + vlist[var_index + 1:]
depth = random.randrange(min_depth, max_depth + 1)
expr = random_expr_with_required_var(depth, var, consts, ops)
expr_str = str(expr)
sample = var + ":" + expr_str
target = format_sympy_expr(
sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions)
return sample, target
# AlgebraConfig holds objects required to generate the algebra inverse
# dataset.
# vlist: Variable list. A list of chars.
# dlist: Numberical digit list. A list of chars.
# flist: List of special function names. A list of chars.
# functions: Dict of special function names. Maps human readable string names to
# single char names used in flist.
# ops: Dict mapping op symbols (chars) to ExprOp instances.
# solve_ops: Encodes rules for how to algebraically cancel out each operation.
# See doc-string for `algebra_inverse_solve`.
# int_encoder: Function that maps a string to a list of tokens. Use this to
# encode an expression to feed into a model.
# int_decoder: Function that maps a list of tokens to a string. Use this to
# convert model input or output into a human readable string.
AlgebraConfig = namedtuple("AlgebraConfig", [
"vlist", "dlist", "flist", "functions", "ops", "solve_ops", "int_encoder",
"int_decoder"
])
def math_dataset_init(alphabet_size=26, digits=None, functions=None):
"""Initializes required objects to generate symbolic math datasets.
Produces token set, ExprOp instances, solve_op dictionary, encoders, and
decoders needed to generate the algebra inverse dataset.
Args:
alphabet_size: How many possible variables there are. Max 52.
digits: How many numerical digits to encode as tokens, "0" through
str(digits-1), or None to encode no digits.
functions: Defines special functions. A dict mapping human readable string
names, like "log", "exp", "sin", "cos", etc., to single chars. Each
function gets a unique token, like "L" for "log".
WARNING, Make sure these tokens do not conflict with the list of
possible variable names.
Returns:
AlgebraConfig instance holding all the objects listed above.
Raises:
ValueError: If `alphabet_size` is not in range [2, 52].
"""
ops_list = ["+", "-", "*", "/"]
ops = {
"+": ExprOp("+", 0, True),
"-": ExprOp("-", 0, False),
"*": ExprOp("*", 1, True),
"/": ExprOp("/", 1, False)
}
solve_ops = {
"+l": lambda l, r, to: (l, ExprNode(to, r, ops["-"])),
"+r": lambda l, r, to: (r, ExprNode(to, l, ops["-"])),
"-l": lambda l, r, to: (l, ExprNode(to, r, ops["+"])),
"-r": lambda l, r, to: (r, ExprNode(l, to, ops["-"])),
"*l": lambda l, r, to: (l, ExprNode(to, r, ops["/"])),
"*r": lambda l, r, to: (r, ExprNode(to, l, ops["/"])),
"/l": lambda l, r, to: (l, ExprNode(to, r, ops["*"])),
"/r": lambda l, r, to: (r, ExprNode(l, to, ops["/"])),
}
alphabet = (
[six.int2byte(ord("a") + c).decode("utf-8") for c in range(26)] +
[six.int2byte(ord("A") + c).decode("utf-8") for c in range(26)])
if alphabet_size > 52:
raise ValueError(
"alphabet_size cannot be greater than 52. Got %s." % alphabet_size)
if alphabet_size < 2:
raise ValueError(
"alphabet_size cannot be less than 2. Got %s." % alphabet_size)
if digits is not None and not 1 <= digits <= 10:
raise ValueError("digits cannot must be between 1 and 10. Got %s." % digits)
vlist = alphabet[:alphabet_size]
if digits is not None:
dlist = [str(d) for d in range(digits)]
else:
dlist = []
if functions is None:
functions = {}
flist = sorted(functions.values())
pad = "_"
tokens = [pad] + [":", "(", ")", "="] + ops_list + vlist + dlist + flist
if len(tokens) != len(set(tokens)):
raise ValueError("Duplicate token. Tokens: %s" % tokens)
token_map = dict([(t, i) for i, t in enumerate(tokens)])
def int_encoder(sequence):
return [token_map[s] for s in sequence]
def int_decoder(tensor_1d):
return "".join([tokens[i] for i in tensor_1d])
return AlgebraConfig(
vlist=vlist,
dlist=dlist,
flist=flist,
functions=functions,
ops=ops,
solve_ops=solve_ops,
int_encoder=int_encoder,
int_decoder=int_decoder)
def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2,
nbr_cases=10000):
"""Generate the algebra inverse dataset.
Each sample is a symbolic math equation involving unknown variables. The
task is to solve for the given variable. The target is the resulting
expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to solve for and the math
equation, and target-list is a list of tokens encoding the resulting math
expression after solving for the variable.
Raises:
ValueError: If `max_depth` < `min_depth`.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size)
for _ in range(nbr_cases):
sample, target = generate_algebra_inverse_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
def algebra_simplify(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the algebra simplify dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to simplify the expression. The target is the resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 52.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the expression to simplify, and
target-list is a list of tokens encoding the resulting math expression after
simplifying.
Raises:
ValueError: If `max_depth` < `min_depth`.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
alg_cfg = math_dataset_init(alphabet_size, digits=5)
for _ in range(nbr_cases):
sample, target = generate_algebra_simplify_sample(
alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
def calculus_integrate(alphabet_size=26,
min_depth=0,
max_depth=2,
nbr_cases=10000):
"""Generate the calculus integrate dataset.
Each sample is a symbolic math expression involving unknown variables. The
task is to take the indefinite integral of the expression. The target is the
resulting expression.
Args:
alphabet_size: How many possible variables there are. Max 26.
min_depth: Minimum depth of the expression trees on both sides of the
equals sign in the equation.
max_depth: Maximum depth of the expression trees on both sides of the
equals sign in the equation.
nbr_cases: The number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the tokens encoding the variable to integrate with respect
to and the expression to integrate, and target-list is a list of tokens
encoding the resulting math expression after integrating.
Raises:
ValueError: If `max_depth` < `min_depth`, or if alphabet_size > 26.
"""
if max_depth < min_depth:
raise ValueError("max_depth must be greater than or equal to min_depth. "
"Got max_depth=%s, min_depth=%s" % (max_depth, min_depth))
# Don't allow alphabet to use capital letters. Those are reserved for function
# names.
if alphabet_size > 26:
raise ValueError(
"alphabet_size must not be greater than 26. Got %s." % alphabet_size)
functions = {"log": "L"}
alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions)
nbr_case = 0
while nbr_case < nbr_cases:
try:
sample, target = generate_calculus_integrate_sample(
alg_cfg.vlist,
list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions)
yield {
"inputs": alg_cfg.int_encoder(sample),
"targets": alg_cfg.int_encoder(target)
}
except: # pylint:disable=bare-except
continue
if nbr_case % 10000 == 0:
print(" calculus_integrate: generating case %d." % nbr_case)
nbr_case += 1
|
|
from django.contrib.postgres.fields import ArrayField
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from simple_history.models import HistoricalRecords
from . import User, Organization, Story
from .discussion import Discussion
#-----------------------------------------------------------------------#
# FACET
#-----------------------------------------------------------------------#
@python_2_unicode_compatible
class FacetTemplate(models.Model):
"""Template for facets.
A template is a collection of fields so that when adding/editing a facet,
only appropriate fields are shown.
"""
name = models.CharField(
max_length=50,
)
# A template without an organization is a "site-wide" template;
# when listing templates for an organization, list the site-wide and
# ones that match the organization.
organization = models.ForeignKey(
"Organization",
blank=True,
null=True,
)
owner = models.ForeignKey(
"User",
blank=True,
null=True,
)
description = models.CharField(
max_length=100,
blank=True,
)
fields_used = ArrayField(
models.CharField(max_length=50),
default=list,
help_text='Fields used by this template.',
blank=True,
)
creation_date = models.DateTimeField(
auto_now_add=True,
help_text='When template was created.',
blank=True,
)
is_active = models.BooleanField(
default=True,
)
class Meta:
ordering = ['id']
unique_together = ['name', 'organization']
def __str__(self):
return self.name
@property
def search_title(self):
return self.name
@property
def type(self):
return "Facet Template"
def copy(self):
""" Create a copy of a facet template for a partner organization in a network."""
print "in facet template copy"
self.id = None
self.is_active=False
print "pre copy save"
self.save()
print "saved"
print "new id", self.id
return self
def get_absolute_url(self):
if self.organization:
return reverse('facet_template_edit', kwargs={'template': self.id, 'org': self.organization.id})
else:
return reverse('facet_template_detail', kwargs={'template': self.id})
# field which will appear on all facet-editing forms -- and therefore do not
# need to be in the "fields_used" for a template.
COMMON_FIELDS = {
"name",
"headline",
"description",
"editor",
"credit",
# "team",
"content",
"status",
"due_edit",
"run_date",
"keywords",
# "template",
# "story",
}
class Facet(models.Model):
"""A version of a story.
A facet must belong to a story and can only belong to one story. A facet is a version
of the story.
Ex. A story about wildfires could have:
- a web story facet that is a text article with photos and video
- a host-wrap facet that is the radio script of a story about the fire
- a video facet that is a video segment about the fire for distribution via social media.
"""
# ------------------------#
# required fields
# ------------------------#
# populated during save
owner = models.ForeignKey(
User,
related_name='facetowner'
)
organization = models.ForeignKey(
Organization,
help_text='Organization that owns this facet.'
)
template = models.ForeignKey(
FacetTemplate,
)
story = models.ForeignKey(
Story,
)
original = models.BooleanField(
default=True,
help_text='Was this facet originally created by a user from this organization?',
# If facet is not original, set to false and use FacetCopyDetail for additional info.
)
creation_date = models.DateTimeField(
auto_now_add=True,
help_text='Day facet was created.',
blank=True,
)
discussion = models.ForeignKey(
'Discussion',
help_text='Id of discussion for the facet.',
blank=True,
null=True,
)
# populated by user
name = models.TextField(
# displayed on form as label
help_text='Internal name for facet.'
)
headline = models.TextField(
help_text='Headline of the facet',
)
description = models.TextField(
help_text='Description of the facet.',
blank=True,
)
editor = models.ManyToManyField(
User,
related_name='faceteditor',
help_text='The full user name(s) to be listed as the editor(s) for the facet.',
blank=True,
)
credit = models.ManyToManyField(
User,
related_name='facetcredit',
help_text='The full user name(s) to be listed as the credit for the facet.',
blank=True,
)
team = models.ManyToManyField(
User,
through='FacetContributor',
help_text='Users that contributed to a facet. Used to associate multiple users to a facet.',
blank=True,
)
content = models.TextField(
help_text='Content of the facet.',
blank=True,
)
# Choices for facet status.
DRAFT = 'Draft'
PITCH = 'Pitch'
IN_PROGRESS = 'In Progress'
EDIT = 'Edit'
REVISION = 'Revision'
NEEDS_REVIEW = 'Needs Review'
READY = 'Ready'
FACET_STATUS_CHOICES = (
(DRAFT, 'Draft'),
(PITCH, 'Pitch'),
(IN_PROGRESS, 'In Progress'),
(EDIT, 'Edit'),
(REVISION, 'Revision'),
(NEEDS_REVIEW, 'Needs Review'),
(READY, 'Ready'),
)
status = models.CharField(
max_length=25,
choices=FACET_STATUS_CHOICES,
help_text='Facet status choice.'
)
due_edit = models.DateTimeField(
help_text='Due for edit.',
blank=True,
null=True,
)
run_date = models.DateTimeField(
help_text='Planned run date.',
blank=True,
null=True,
)
keywords = ArrayField(
models.CharField(max_length=100),
default=list,
help_text='List of keywords for search.',
blank=True,
)
# assets
image_assets = models.ManyToManyField(
'ImageAsset',
blank=True,
)
document_assets = models.ManyToManyField(
'DocumentAsset',
blank=True,
)
audio_assets = models.ManyToManyField(
'AudioAsset',
blank=True,
)
video_assets = models.ManyToManyField(
'VideoAsset',
blank=True,
)
# history
edit_history = HistoricalRecords()
# ------------------------#
# optional fields
# ------------------------#
update_note = models.TextField(
help_text='Text commenting regarding any updates or corrections made to the facet.',
blank=True,
)
excerpt = models.TextField(
help_text='Excerpt from the facet.',
blank=True,
)
dateline = models.CharField(
max_length=150,
help_text='Where and when the facet was created.',
blank=True,
)
share_note = models.TextField(
help_text='Information for organizations making a copy of the facet.',
blank=True,
)
topic_code = models.CharField(
max_length=75,
help_text='Unique code as needed to designate topic or coverage.',
blank=True,
)
internal_code = models.CharField(
max_length=75,
help_text='Unique code as needed for ingest sytems or internal use. Use as needed.',
blank=True,
)
length = models.CharField(
max_length=75,
help_text='Length of facet for audio or video.',
blank=True,
)
wordcount = models.CharField(
max_length=75,
help_text='Wordcount for text-based facets.',
blank=True,
)
content_license = models.ForeignKey(
'ContentLicense',
related_name='facetlicense',
blank=True,
null=True,
)
related_links = models.TextField(
help_text='Relevant links that can be included with the facet.',
blank=True,
)
github_link = models.URLField(
max_length=300,
help_text='Link to code for any custom feature.',
blank=True,
)
sources = models.TextField(
help_text='List of sources in the facet.',
blank=True,
)
edit_note = models.TextField(
help_text='Information regarding allowable extent of editing and suggestions for specific kinds of edits.',
blank=True,
)
pronounciations = models.TextField(
help_text='Information about pronouncing names or potentially difficult words.',
blank=True,
)
sponsors = models.TextField(
help_text='Sponsors or underwriters if need to indicate any.',
blank=True,
)
# ------------------------#
# web specific fields
# ------------------------#
# also relevant for print
pull_quotes = models.TextField(
help_text='List of quotes and attributions to be used as pull quotes.',
blank=True,
)
embeds = models.TextField(
help_text='The necessary information to embed something like a Tweet, FB post, map or video.',
blank=True,
)
# push to CMS history
# pushed_to_wp = models.BooleanField(
# default=False,
# help_text='Whether the facet has been pushed to the organization WordPress site.',
# )
# ------------------------#
# print specific fields
# ------------------------#
sidebar_content = models.TextField(
help_text='Content separate from body text meant for sidebar or inset presentation.',
blank=True,
)
# ------------------------#
# audio specific fields
# ------------------------#
# relevant for video
producer = models.ForeignKey(
User,
related_name='facetproducer',
blank=True,
null=True,
)
# ------------------------#
# tv and video specific
# ------------------------#
series_title = models.TextField(
help_text='Title of the video series.',
blank=True,
)
episode_number = models.CharField(
max_length=75,
help_text='If the video is part of a series, the episode number.',
blank=True,
)
usage_rights = models.TextField(
help_text='Information regarding the usage of the video if shared.',
blank=True,
)
tape_datetime = models.DateTimeField(
help_text='Tape date.',
blank=True,
null=True,
)
locations = models.TextField(
help_text='Shoot locations.',
blank=True,
)
# ------------------------#
# user defined fields
# ------------------------#
custom_one = models.TextField(
help_text='User-defined field.',
blank=True,
)
custom_two = models.TextField(
help_text='User-defined field.',
blank=True,
)
custom_three = models.TextField(
help_text='User-defined field.',
blank=True,
)
custom_four = models.TextField(
help_text='User-defined field.',
blank=True,
)
custom_five = models.TextField(
help_text='User-defined field.',
blank=True,
)
class Meta:
verbose_name='Facet'
verbose_name_plural='Facets'
# ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('facet_edit', kwargs={'pk': self.id, 'story': self.story_id})
def copy(self):
""" Create a copy of a facet for a partner organization in a network."""
self.id = None
self.original=False
self.code = ''
self.status= 'NR'
self.due_edit = None
self.run_date = None
self.discussion = Discussion.objects.create_discussion("F")
# self.edit_history = HistoricalRecords()
print "pre copy save"
self.save()
print "saved"
print "new id", self.id
return self
def get_facet_images(self):
"""Retrieve all images objects associated with a facet."""
return self.image_assets.all()
def get_facet_documents(self):
"""Retrieve all documents objects associated with a facet."""
return self.document_assets.all()
def get_facet_audio(self):
"""Retrieve all audio objects associated with a facet."""
return self.audio_assets.all()
def get_facet_video(self):
"""Retrieve all video objects associated with a facet."""
return self.video_assets.all()
def get_facet_download(self):
""" Return rst formatted string for downloading facet and its meta."""
# loop over m2m and get the values as string
credits = self.credit.all()
credits = [ user.credit_name for user in credits]
credits = ", ".join(credits)
editors = self.editor.all()
editors = [ user.credit_name for user in editors]
editors = ", ".join(editors)
# loop over m2m and get the values as string
images = self.image_assets.all()
images = [image.title for image in images]
images = ", ".join(images)
# loop over m2m and get the values as string
documents = self.document_assets.all()
documents = [document.title for document in documents]
documents = ", ".join(documents)
# loop over m2m and get the values as string
audiofiles = self.audio_assets.all()
audiofiles = [audiofile.title for audiofile in audiofiles]
audiofiles = ", ".join(audiofiles)
# Loop over m2m and get the values as string
videos = self.video_assets.all()
videos = [video.title for video in videos]
videos = ", ".join(videos)
# verify the text area fields have correct encoding
name = self.name.encode('utf-8')
description = self.description.encode('utf-8')
excerpt = self.excerpt.encode('utf-8')
share_note = self.share_note.encode('utf-8')
content = self.content.encode('utf-8')
facet_download = """
Facet\r\n
========\r\n
{name}\r\n
--------------\r\n
Description: {desc}\r\n
Story: {story}\r\n
Owner: {owner}\r\n
Organization: {organization}\r\n
Original: {original}\r\n
Editor: {editor}\r\n
Credit: {credit}\r\n
Code: {code}\r\n
Excerpt: {excerpt}\r\n
Keywords: {keywords}\r\n
Status: {status}\r\n
Due Edit: {dueedit}\r\n
Run Date: {rundate}\r\n
Share Note: {sharenote}\r\n
Images: {images}\r\n
Documents: {documents}\r\n
AudioFiles: {audiofiles}\r\n
Videos: {videos}\r\n
\r\n
Content\r\n
-------\r\n
{content}
""".format(name=name, desc=description, story=self.story, owner=self.owner,
organization=self.organization.name, original=self.original, editor=editors,
credit=credits, code=self.internal_code, excerpt=excerpt,
keywords=self.keywords, status=self.status, dueedit=self.due_edit, rundate=self.run_date,
sharenote=share_note, images=images, documents=documents, audiofiles=audiofiles, videos=videos, content=content)
return facet_download
@property
def search_title(self):
return self.name
@property
def type(self):
return "Facet"
def is_editable_by_org(self, org):
"""Can this facet be edited by this org?"""
# FIXME: add contractor access?
story = self.organization
return (org == story.organization or
(story.collaborate and org in story.collaborate_with.all()))
@receiver(post_save, sender=Facet)
def add_discussion(sender, instance, **kwargs):
if not instance.discussion:
instance.discussion = Discussion.objects.create_discussion("F")
instance.save()
#-----------------------------------------------------------------------#
# FacetContributor
#-----------------------------------------------------------------------#
@python_2_unicode_compatible
class FacetContributor(models.Model):
""" Which users are participating in creating the facet. """
facet = models.ForeignKey(
Facet,
)
user = models.ForeignKey(
User,
)
user_role = models.CharField(
max_length=255,
help_text='What did the user do?',
)
def __str__(self):
return "{facet}, {contributor}".format(
facet=self.facet.name,
contributor=self.user.credit_name,
)
#-----------------------------------------------------------------------#
# CONTENT LICENSE
#-----------------------------------------------------------------------#
@python_2_unicode_compatible
class ContentLicense(models.Model):
"""Content License for facets.
Facets can have a related content license. The data for this model
includes the 7 established variations of the Creative Commons license;
these have a blank Organization field.
Organizations can also create their own content licenses/reuse terms and
upload documents for the custom license.
"""
name = models.TextField(
help_text='Name for the license.',
)
organization = models.ForeignKey(
Organization,
null=True,
blank=True,
help_text='Organization that owns this license.',
)
terms = models.TextField(
help_text='Content of the terms.',
blank=True,
)
upload = models.FileField(
upload_to="license/%Y/%m/%d/",
null=True,
blank=True,
)
class Meta:
verbose_name = 'Content License'
verbose_name_plural = 'Content Licenses'
ordering = ['name']
def __str__(self):
return self.name
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator as ea
class _EventGenerator(object):
def __init__(self):
self.items = []
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
simple_value=value)]))
self.AddEvent(event)
def AddHistogram(self,
tag,
wall_time=0,
step=0,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=None,
hbucket=None):
histo = tf.HistogramProto(min=hmin,
max=hmax,
num=hnum,
sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
histo=histo)]))
self.AddEvent(event)
def AddImage(self,
tag,
wall_time=0,
step=0,
encoded_image_string=b'imgstr',
width=150,
height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width,
height=height)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
image=image)]))
self.AddEvent(event)
def AddAudio(self,
tag,
wall_time=0,
step=0,
encoded_audio_string=b'sndstr',
content_type='audio/wav',
sample_rate=44100,
length_frames=22050):
audio = tf.Summary.Audio(encoded_audio_string=encoded_audio_string,
content_type=content_type,
sample_rate=sample_rate,
length_frames=length_frames)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
audio=audio)]))
self.AddEvent(event)
def AddEvent(self, event):
self.items.append(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.empty = {ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False,
ea.RUN_METADATA: []}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.RUN_METADATA: []
})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
self.assertEqual(acc.Tags(), self.empty)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.RUN_METADATA: []
})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1,
max=2,
num=3,
sum=4,
sum_squares=5,
bucket_limit=[1, 2, 3],
bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2,
max=3,
num=4,
sum=5,
sum_squares=6,
bucket_limit=[2, 3, 4],
bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (
7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testPercentile(self):
def AssertExpectedForBps(bps, expected):
output = acc._Percentile(bps, bucket_limit, cumsum_weights, histo_min,
histo_max, histo_num)
self.assertAlmostEqual(expected, output)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
bucket_limit = [1, 2, 3, 4]
histo_num = 100
## All weights in the first bucket
cumsum_weights = [10000, 10000, 10000, 10000]
histo_min = -1
histo_max = .9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in second bucket
cumsum_weights = [0, 10000, 10000, 10000]
histo_min = 1.1
histo_max = 1.8
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## All weights in the last bucket
cumsum_weights = [0, 0, 0, 10000]
histo_min = 3.1
histo_max = 3.6
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 10000, histo_min, histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between two buckets
cumsum_weights = [0, 4000, 10000, 10000]
histo_min = 1.1
histo_max = 2.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 4000, histo_min,
bucket_limit[1]))
AssertExpectedForBps(5000, ea._Remap(5000, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(7500, ea._Remap(7500, 4000, 10000, bucket_limit[1],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Weights distributed between all buckets
cumsum_weights = [1000, 4000, 8000, 10000]
histo_min = -1
histo_max = 3.9
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 1000, 4000, bucket_limit[0],
bucket_limit[1]))
AssertExpectedForBps(5000, ea._Remap(5000, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(7500, ea._Remap(7500, 4000, 8000, bucket_limit[1],
bucket_limit[2]))
AssertExpectedForBps(9000, ea._Remap(9000, 8000, 10000, bucket_limit[2],
histo_max))
AssertExpectedForBps(10000, histo_max)
## Most weight in first bucket
cumsum_weights = [9000, 10000, 10000, 10000]
histo_min = -1
histo_max = 1.1
AssertExpectedForBps(0, histo_min)
AssertExpectedForBps(2500, ea._Remap(2500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(5000, ea._Remap(5000, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(7500, ea._Remap(7500, 0, 9000, histo_min,
bucket_limit[0]))
AssertExpectedForBps(9500, ea._Remap(9500, 9000, 10000, bucket_limit[0],
histo_max))
AssertExpectedForBps(10000, histo_max)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
im2 = ea.ImageEvent(wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
gen.AddImage('im1',
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
gen.AddImage('im2',
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testAudio(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
snd1 = ea.AudioEvent(wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
snd2 = ea.AudioEvent(wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
gen.AddAudio('snd1',
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
gen.AddAudio('snd2',
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
acc.Reload()
self.assertEqual(acc.Audio('snd1'), [snd1])
self.assertEqual(acc.Audio('snd2'), [snd2])
def testActivation(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
self.assertFalse(acc._activated)
with self.assertRaises(RuntimeError):
acc.Tags()
with self.assertRaises(RuntimeError):
acc.Scalars('s1')
acc.Reload()
self.assertTrue(acc._activated)
acc._activated = False
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
with self.assertRaises(KeyError):
acc.Audio('s1')
with self.assertRaises(KeyError):
acc.Audio('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.Event(wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
gen.AddAudio('snd1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.AUDIO: ['snd1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False,
ea.RUN_METADATA: []
})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is false.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300, 101,
201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = tf.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = tf.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = SessionLog(status=SessionLog.START)
gen.AddEvent(tf.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.train.SummaryWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
writer.add_graph(graph)
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# Write a bunch of events using the writer
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.RUN_METADATA: ['test run']
})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
if __name__ == '__main__':
tf.test.main()
|
|
"""Description of YANG & YIN syntax."""
import re
### Regular expressions - constraints on arguments
# keywords and identifiers
identifier = r"[_A-Za-z][._\-A-Za-z0-9]*"
prefix = identifier
keyword = '((' + prefix + '):)?(' + identifier + ')'
comment = '(/\*([^*]|[\r\n\s]|(\*+([^*/]|[\r\n\s])))*\*+/)|(//.*)|(/\*.*)'
# no group version of keyword
keyword_ng = '(?:(' + prefix + '):)?(?:' + identifier + ')'
re_keyword = re.compile(keyword)
re_keyword_start = re.compile('^' + keyword)
re_comment = re.compile(comment)
pos_integer = r"[1-9][0-9]*"
nonneg_integer = r"(0|[1-9])[0-9]*"
integer_ = r"[-+]?" + nonneg_integer
decimal_ = r"(\+|\-)?[0-9]+(\.[0-9]+)?"
length_str = '((min|max|[0-9]+)\s*' \
'(\.\.\s*' \
'(min|max|[0-9]+)\s*)?)'
length_expr = length_str + '(\|\s*' + length_str + ')*'
re_length_part = re.compile(length_str)
range_str = '((\-INF|min|max|((\+|\-)?[0-9]+(\.[0-9]+)?))\s*' \
'(\.\.\s*' \
'(INF|min|max|(\+|\-)?[0-9]+(\.[0-9]+)?)\s*)?)'
range_expr = range_str + '(\|\s*' + range_str + ')*'
re_range_part = re.compile(range_str)
re_identifier = re.compile("^" + identifier + "$")
# path and unique
node_id = keyword_ng
rel_path_keyexpr = r"(\.\./)+(" + node_id + "/)*" + node_id
path_key_expr = r"(current\s*\(\s*\)/" + rel_path_keyexpr + ")"
path_equality_expr = node_id + r"\s*=\s*" + path_key_expr
path_predicate = r"\s*\[\s*" + path_equality_expr + r"\s*\]\s*"
absolute_path_arg = "(?:/" + node_id + "(" + path_predicate + ")*)+"
descendant_path_arg = node_id + "(" + path_predicate + ")*" + \
"(?:" + absolute_path_arg + ")?"
relative_path_arg = r"(\.\./)*" + descendant_path_arg
deref_path_arg = r"deref\s*\(\s*(?:" + relative_path_arg + \
")\s*\)/\.\./" + relative_path_arg
path_arg = "(" + absolute_path_arg + "|" + relative_path_arg + "|" + \
deref_path_arg + ")"
absolute_schema_nodeid = "(/" + node_id + ")+"
descendant_schema_nodeid = node_id + "(" + absolute_schema_nodeid + ")?"
schema_nodeid = "("+absolute_schema_nodeid+"|"+descendant_schema_nodeid+")"
unique_arg = descendant_schema_nodeid + "(\s+" + descendant_schema_nodeid + ")*"
key_arg = node_id + "(\s+" + node_id + ")*"
re_schema_node_id_part = re.compile('/' + keyword)
# URI - RFC 3986, Appendix A
scheme = "[A-Za-z][-+.A-Za-z0-9]*"
unreserved = "[-._~A-Za-z0-9]"
pct_encoded = "%[0-9A-F]{2}"
sub_delims = "[!$&'()*+,;=]"
pchar = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|[:@])")
segment = pchar + "*"
segment_nz = pchar + "+"
userinfo = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|:)*")
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = "(" + dec_octet + r"\.){3}" + dec_octet
h16 = "[0-9A-F]{1,4}"
ls32 = "(" + h16 + ":" + h16 + "|" + ipv4address + ")"
ipv6address = (
"((" + h16 + ":){6}" + ls32 +
"|::(" + h16 + ":){5}" + ls32 +
"|(" + h16 + ")?::(" + h16 + ":){4}" + ls32 +
"|((" + h16 + ":)?" + h16 + ")?::(" + h16 + ":){3}" + ls32 +
"|((" + h16 + ":){,2}" + h16 + ")?::(" + h16 + ":){2}" + ls32 +
"|((" + h16 + ":){,3}" + h16 + ")?::" + h16 + ":" + ls32 +
"|((" + h16 + ":){,4}" + h16 + ")?::" + ls32 +
"|((" + h16 + ":){,5}" + h16 + ")?::" + h16 +
"|((" + h16 + ":){,6}" + h16 + ")?::)")
ipvfuture = r"v[0-9A-F]+\.(" + unreserved + "|" + sub_delims + "|:)+"
ip_literal = r"\[(" + ipv6address + "|" + ipvfuture + r")\]"
reg_name = "(" + unreserved + "|" + pct_encoded + "|" + sub_delims + ")*"
host = "(" + ip_literal + "|" + ipv4address + "|" + reg_name + ")"
port = "[0-9]*"
authority = "(" + userinfo + "@)?" + host + "(:" + port + ")?"
path_abempty = "(/" + segment + ")*"
path_absolute = "/(" + segment_nz + "(/" + segment + ")*)?"
path_rootless = segment_nz + "(/" + segment + ")*"
path_empty = pchar + "{0}"
hier_part = ("(" + "//" + authority + path_abempty + "|" +
path_absolute + "|" + path_rootless + "|" + path_empty + ")")
query = "(" + pchar + "|[/?])*"
fragment = query
uri = (scheme + ":" + hier_part + r"(\?" + query + ")?" +
"(#" + fragment + ")?")
# Date
date = r"[1-2][0-9]{3}-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])"
re_nonneg_integer = re.compile("^" + nonneg_integer + "$")
re_integer = re.compile("^" + integer_ + "$")
re_decimal = re.compile("^" + decimal_ + "$")
re_uri = re.compile("^" + uri + "$")
re_boolean = re.compile("^(true|false)$")
re_version = re.compile("^1$")
re_date = re.compile("^" + date +"$")
re_status = re.compile("^(current|obsolete|deprecated)$")
re_key = re.compile("^" + key_arg + "$")
re_length = re.compile("^" + length_expr + "$")
re_range = re.compile("^" + range_expr + "$")
re_pos_integer = re.compile(r"^(unbounded|" + pos_integer + r")$")
re_ordered_by = re.compile(r"^(user|system)$")
re_node_id = re.compile("^" + node_id + "$")
re_path = re.compile("^" + path_arg + "$")
re_absolute_path = re.compile("^" + absolute_path_arg + "$")
re_unique = re.compile("^" + unique_arg + "$")
re_schema_nodeid = re.compile("^" + schema_nodeid + "$")
re_absolute_schema_nodeid = re.compile("^" + absolute_schema_nodeid + "$")
re_descendant_schema_nodeid = re.compile("^" + descendant_schema_nodeid + "$")
re_deviate = re.compile("^(add|delete|replace|not-supported)$")
arg_type_map = {
"identifier": lambda s: re_identifier.search(s) is not None,
"non-negative-integer": lambda s: re_nonneg_integer.search(s) is not None,
"integer": lambda s: re_integer.search(s) is not None,
"uri": lambda s: re_uri.search(s) is not None,
"boolean": lambda s: re_boolean.search(s) is not None,
"version": lambda s: re_version.search(s) is not None,
"date": lambda s: re_date.search(s) is not None,
"status-arg": lambda s: re_status.search(s) is not None,
"key-arg": lambda s: re_key.search(s) is not None,
"length-arg": lambda s: re_length.search(s) is not None,
"range-arg": lambda s: re_range.search(s) is not None,
"max-value": lambda s: re_pos_integer.search(s) is not None,
"ordered-by-arg": lambda s: re_ordered_by.search(s) is not None,
"identifier-ref": lambda s: re_node_id.search(s) is not None,
"path-arg": lambda s: re_path.search(s) is not None,
"absolute-path-arg": lambda s: re_absolute_path.search(s) is not None,
"unique-arg": lambda s: re_unique.search(s) is not None,
"absolute-schema-nodeid": lambda s: \
re_absolute_schema_nodeid.search(s) is not None,
"descendant-schema-nodeid": lambda s: \
re_descendant_schema_nodeid.search(s) is not None,
"schema-nodeid": lambda s: \
re_schema_nodeid.search(s) is not None,
"enum-arg": lambda s: chk_enum_arg(s),
"fraction-digits-arg": lambda s: chk_fraction_digits_arg(s),
"deviate-arg": lambda s: re_deviate.search(s) is not None,
"_comment": lambda s: re_comment.search(s) is not None,
}
"""Argument type definitions.
Regular expressions for all argument types except plain string that
are checked directly by the parser.
"""
def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True
def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False
def add_arg_type(arg_type, regexp):
"""Add a new arg_type to the map.
Used by extension plugins to register their own argument types."""
arg_type_map[arg_type] = regexp
# keyword argument-name yin-element
yin_map = \
{'anyxml': ('name', False),
'argument': ('name', False),
'augment': ('target-node', False),
'base': ('name', False),
'belongs-to': ('module', False),
'bit': ('name', False),
'case': ('name', False),
'choice': ('name', False),
'config': ('value', False),
'contact': ('text', True),
'container': ('name', False),
'default': ('value', False),
'description': ('text', True),
'deviate': ('value', False),
'deviation': ('target-node', False),
'enum': ('name', False),
'error-app-tag': ('value', False),
'error-message': ('value', True),
'extension': ('name', False),
'feature': ('name', False),
'fraction-digits': ('value', False),
'grouping': ('name', False),
'identity': ('name', False),
'if-feature': ('name', False),
'import': ('module', False),
'include': ('module', False),
'input': (None, None),
'key': ('value', False),
'leaf': ('name', False),
'leaf-list': ('name', False),
'length': ('value', False),
'list': ('name', False),
'mandatory': ('value', False),
'max-elements': ('value', False),
'min-elements': ('value', False),
'module': ('name', False),
'must': ('condition', False),
'namespace': ('uri', False),
'notification': ('name', False),
'ordered-by': ('value', False),
'organization': ('text', True),
'output': (None, None),
'path': ('value', False),
'pattern': ('value', False),
'position': ('value', False),
'presence': ('value', False),
'prefix': ('value', False),
'range': ('value', False),
'reference': ('text', True),
'refine': ('target-node', False),
'require-instance': ('value', False),
'revision': ('date', False),
'revision-date': ('date', False),
'rpc': ('name', False),
'status': ('value', False),
'submodule': ('name', False),
'type': ('name', False),
'typedef': ('name', False),
'unique': ('tag', False),
'units': ('name', False),
'uses': ('name', False),
'value': ('value', False),
'when': ('condition', False),
'yang-version': ('value', False),
'yin-element': ('value', False),
}
"""Mapping of statements to the YIN representation of their arguments.
The values are pairs whose first component specifies whether the
argument is stored in a subelement and the second component is the
name of the attribute or subelement carrying the argument. See YANG
specification.
"""
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import contextlib
import subprocess
import inspect
import itertools
import functools
import re
import types
import abc
import warnings
from collections.abc import Mapping
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from operator import itemgetter
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives import flag
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.ext.autodoc import exclude_members_option
import lisa
import lisa.analysis
from lisa.analysis.base import AnalysisHelpers, TraceAnalysisBase
from lisa.utils import get_subclasses, import_all_submodules, DEPRECATED_MAP, get_sphinx_name, groupby, get_short_doc, order_as
from lisa.trace import TraceEventCheckerBase
from lisa.conf import KeyDesc, SimpleMultiSrcConf, TopLevelKeyDesc
from lisa.version import format_version
import lisa._git
class RecursiveDirective(Directive):
"""
Base class helping nested parsing.
Options:
* ``literal``: If set, a literal block will be used, otherwise the text
will be interpreted as reStructuredText.
"""
option_spec = {
'literal': flag,
}
def parse_nested(self, txt, source=None):
"""
Parse text as reStructuredText if the ``literal`` option is not set.
Otherwise, interpret the text as a line block.
"""
if 'literal' in self.options:
node = nodes.literal_block(txt, txt, classes=[])
# Avoid syntax highlight
node['language'] = 'text'
return [node]
else:
txt = ViewList(txt.splitlines(), source)
node = nodes.Element()
nested_parse_with_titles(self.state, txt, node)
return node.children
class ExecDirective(RecursiveDirective):
"""
reStructuredText directive to execute the specified python code and insert
the output into the document::
.. exec::
import sys
print(sys.version)
Options:
* ``literal``: If set, a literal block will be used, otherwise the text
will be interpreted as reStructuredText.
"""
has_content = True
def run(self):
stdout = io.StringIO()
code = '\n'.join(self.content)
with contextlib.redirect_stdout(stdout):
exec(code, {})
out = stdout.getvalue()
return self.parse_nested(out)
directives.register_directive('exec', ExecDirective)
class RunCommandDirective(RecursiveDirective):
"""
reStructuredText directive to execute the specified command and insert
the output into the document::
.. run-command::
:capture-stderr:
:ignore-error:
:literal:
exekall --help
Options:
* ``literal``: If set, a literal block will be used, otherwise the text
will be interpreted as reStructuredText.
* ``capture-stderr``: If set, stderr will be captured in addition to stdout.
* ``ignore-error``: The return status of the command will be ignored.
Otherwise, it will raise an exception and building the
documentation will fail.
"""
has_content = True
option_spec = {
'ignore-error': flag,
'capture-stderr': flag,
'literal': flag,
}
def run(self):
options = self.options
if 'capture-stderr' in options:
stderr = subprocess.STDOUT
else:
stderr = None
check = False if 'ignore-error' in options else True
cmd = '\n'.join(self.content)
out = subprocess.run(
cmd, shell=True, check=check,
stdout=subprocess.PIPE, stderr=stderr,
).stdout.decode('utf-8')
return self.parse_nested(out, cmd)
directives.register_directive('run-command', RunCommandDirective)
def is_test(method):
"""
Check if a method is a test method.
"""
if not callable(method):
return False
with contextlib.suppress(AttributeError):
if method.__name__.startswith('test_'):
return True
# Tests are methods with an annotated return type, with at least
# one base class with a name containing 'result'
try:
ret_type = inspect.signature(method).return_annotation
base_cls_list = inspect.getmro(ret_type)
except (ValueError, AttributeError, KeyError):
return False
else:
return any(
'result' in cls.__qualname__.casefold()
for cls in base_cls_list
)
def autodoc_process_test_method(app, what, name, obj, options, lines):
# Append the list of available test methods for all classes that appear to
# have some.
if what == 'class':
test_list = [
member
for member_name, member in inspect.getmembers(obj, is_test)
]
if test_list:
test_list_doc = '\n:Test methods:\n\n{}\n\n'.format('\n'.join(
' * :meth:`~{}`'.format(
method.__module__ + '.' + method.__qualname__
)
for method in test_list
))
lines.extend(test_list_doc.splitlines())
def autodoc_process_analysis_events(app, what, name, obj, options, lines):
"""
Append the list of required trace events
"""
# We look for events in the getter method of properties
if what == 'property':
obj = obj.fget
try:
used_events = obj.used_events
except AttributeError:
return
else:
if not isinstance(used_events, TraceEventCheckerBase):
return
events_doc = f"\n:Required trace events:\n\n{used_events.doc_str()}\n\n"
lines.extend(events_doc.splitlines())
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
"""
Enforce the "exclude-members" option, even in cases where it seems to be
ignored by Sphinx.
"""
excluded = options.get('exclude-members', set())
if excluded:
# Either it's a one-item set with the string passed in conf.py
try:
excluded, = excluded
# Or it's an already-processed set
except ValueError:
pass
else:
excluded = exclude_members_option(excluded)
# Import conf.py Sphinx configuration, since the "excluded-members" option
# can be overriden by the user in ReST directives.
import conf
default_excluded = exclude_members_option(
conf.autodoc_default_options.get('exclude-members', '')
)
excluded = excluded | default_excluded
name = name.split('.')[-1]
unwrapped = inspect.unwrap(obj)
# Get rid of the default implementation of dunder names, since it adds no
# value in the documentation
if any(
hasattr(cls, name) and getattr(cls, name) in (obj, unwrapped)
# providers of "uninteresting" methods that are useless in our
# documentation
for cls in (
object,
type,
abc.ABC,
abc.ABCMeta,
)
):
return True
# Some classes like ABCMeta are more sneaky so also ban things that are
# just builtin functions
elif any(
type_ in map(type, (obj, unwrapped))
for type_ in (
# Work with multiple Python versions
getattr(types, type_name)
for type_name in (
'BuiltinFunctionType',
'BuiltinMethodType',
'WrapperDescriptorType',
'MethodWrapperType',
'MethodDescriptorType',
'ClassMethodDescriptorType',
'GetSetDescriptorType',
'MemberDescriptorType',
)
if hasattr(types, type_name)
)
):
return True
# Dunder names without any doc are of no interest, they are probably just
# implementation details
elif name.startswith('__') and name.endswith('__') and not inspect.getdoc(obj):
return True
elif name in excluded:
return True
else:
return skip
class DocPlotConf(SimpleMultiSrcConf):
"""
Analysis plot method arguments configuration for the documentation.
{generated_help}
{yaml_example}
"""
STRUCTURE = TopLevelKeyDesc('doc-plot-conf', 'Plot methods configuration', (
# Avoid deepcopy of the value, since it contains a Trace object that we
# don't want to duplicate for speed reasons
KeyDesc('plots', 'Mapping of function qualnames to their settings', [Mapping], deepcopy_val=False),
))
def autodoc_process_analysis_plots(app, what, name, obj, options, lines, plot_conf):
if what != 'method':
return
plot_methods = set(itertools.chain.from_iterable(
subclass.get_plot_methods()
for subclass in get_subclasses(TraceAnalysisBase)
))
if obj not in plot_methods:
return
plot_conf = plot_conf['plots']
default_spec = plot_conf.get('default', {})
spec = plot_conf.get(obj.__qualname__, {})
spec = {**default_spec, **spec}
kwargs = spec.get('kwargs', {})
trace = spec['trace']
if spec.get('hide'):
return
print(f'Generating plot for {obj.__qualname__}')
# Suppress deprecation warnings so we can still have them in the doc
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
rst_figure = TraceAnalysisBase.call_on_trace(obj, trace, {
'backend': 'bokeh',
'output': 'sphinx-rst',
'interactive': False,
**kwargs
})
rst_figure = f'\n:Example plot:\n\n{rst_figure}'
lines.extend(rst_figure.splitlines())
def autodoc_process_analysis_methods(app, what, name, obj, options, lines):
"""
Append the list of required trace events
"""
methods = {
func: subclass
for subclass in get_subclasses(TraceAnalysisBase)
for name, func in inspect.getmembers(subclass, callable)
}
try:
cls = methods[obj]
except (KeyError, TypeError):
return
else:
on_trace_name = f'trace.ana.{cls.name}.{obj.__name__}'
extra_doc = f"\n*Called on* :class:`~lisa.trace.Trace` *instances as* ``{on_trace_name}()``\n\n"
# prepend
lines[:0] = extra_doc.splitlines()
def get_analysis_list(meth_type):
rst_list = []
deprecated = {
entry['obj']
for entry in get_deprecated_map().values()
}
for subclass in get_subclasses(AnalysisHelpers):
class_path = f"{subclass.__module__}.{subclass.__qualname__}"
if meth_type == 'plot':
meth_list = subclass.get_plot_methods()
elif meth_type == 'df':
meth_list = [
member
for name, member in inspect.getmembers(subclass, callable)
if name.startswith('df_')
]
else:
raise ValueError()
meth_list = [
f.__name__
for f in meth_list
if f not in deprecated
]
rst_list += [
f":class:`{subclass.name}<{class_path}>`::meth:`~{class_path}.{meth}`"
for meth in meth_list
]
joiner = '\n* '
return joiner + joiner.join(sorted(rst_list))
def find_dead_links(content):
"""
Look for HTTP URLs in ``content`` and return a dict of URL to errors when
trying to open them.
"""
regex = r"https?://[^\s]+"
links = re.findall(regex, content)
@functools.lru_cache(maxsize=None)
def check(url):
# Some HTTP servers (including ReadTheDocs) will return 403 Forbidden
# if no User-Agent is given
headers={
'User-Agent': 'Wget/1.13.4 (linux-gnu)',
}
request = Request(url, headers=headers)
try:
urlopen(request)
except (HTTPError, URLError) as e:
return e.reason
else:
return None
errors = {
link: check(link)
for link in links
if check(link) is not None
}
return errors
def check_dead_links(filename):
"""
Check ``filename`` for broken links, and raise an exception if there is any.
"""
with open(filename) as f:
dead_links = find_dead_links(f.read())
if dead_links:
raise RuntimeError('Found dead links in {}:\n {}'.format(
filename,
'\n '.join(
f'{url}: {error}'
for url, error in dead_links.items()
)))
def get_deprecated_map():
"""
Get the mapping of deprecated names with some metadata.
"""
# Import everything there is to import, so the map is fully populated
import_all_submodules(lisa)
return DEPRECATED_MAP
def get_deprecated_table():
"""
Get a reStructuredText tables with titles for all the deprecated names in
:mod:`lisa`.
"""
def indent(string, level=1):
idt = ' ' * 4
return string.replace('\n', '\n' + idt * level)
def make_entry(entry):
msg = entry.get('msg') or ''
removed_in = entry.get('removed_in')
if removed_in is None:
removed_in = ''
else:
removed_in = f'*Removed in: {format_version(removed_in)}*\n\n'
name = get_sphinx_name(entry['obj'], style='rst')
replaced_by = entry.get('replaced_by')
if replaced_by is None:
replaced_by = ''
else:
replaced_by = f"*Replaced by:* {get_sphinx_name(replaced_by, style='rst')}\n\n"
return "* - {name}{msg}{replaced_by}{removed_in}".format(
name=indent(name + '\n\n'),
msg=indent(msg + '\n\n' if msg else ''),
replaced_by=indent(replaced_by),
removed_in=indent(removed_in),
)
def make_table(entries, removed_in):
if entries:
entries = '\n'.join(
make_entry(entry)
for entry in sorted(entries, key=itemgetter('name'))
)
if removed_in:
if removed_in > lisa.version.version_tuple:
remove = 'to be removed'
else:
remove = 'removed'
removed_in = f' {remove} in {format_version(removed_in)}'
else:
removed_in = ''
table = ".. list-table:: Deprecated names{removed_in}\n :align: left{entries}".format(
entries=indent('\n\n' + entries),
removed_in=removed_in,
)
header = f'Deprecated names{removed_in}'
header += '\n' + '+' * len(header)
return header + '\n\n' + table
else:
return ''
entries = [
{'name': name, **info}
for name, info in get_deprecated_map().items()
]
unspecified_removal = [
entry
for entry in entries
if not entry['removed_in']
]
other_entries = [
entry
for entry in entries
if entry not in unspecified_removal
]
tables = []
tables.append(make_table(unspecified_removal, removed_in=None))
tables.extend(
make_table(entries, removed_in=removed_in)
for removed_in, entries in groupby(other_entries, itemgetter('removed_in'), reverse=True)
)
return '\n\n'.join(tables)
def get_xref_type(obj):
"""
Infer the Sphinx type a cross reference to ``obj`` should have.
For example, ``:py:class`FooBar`` has the type ``py:class``.
"""
if isinstance(obj, type):
if issubclass(obj, BaseException):
t = 'exc'
else:
t = 'class'
elif isinstance(obj, types.ModuleType):
t = 'mod'
elif callable(obj):
try:
qualname = obj.__qualname__
except AttributeError:
t = 'func'
else:
if len(qualname.split('.')) > 1:
t = 'meth'
else:
t = 'func'
else:
raise ValueError(f'Cannot infer the xref type of {obj}')
return f'py:{t}'
def get_subclasses_bullets(cls, abbrev=True, style=None, only_leaves=False):
"""
Return a formatted bullet list of the subclasses of the given class,
including a short description for each.
"""
return '\n'.join(
f'* {subcls}: {doc}'
for subcls, doc in sorted(
(
get_sphinx_name(subcls, style=style, abbrev=abbrev),
get_short_doc(subcls)
)
for subcls in get_subclasses(cls, only_leaves=only_leaves)
)
)
def make_changelog(repo):
"""
Generate a reStructuredText changelog to be included in the documentation.
.. note:: The git repository cannot be a shallow clone, as the changelog is
extracted from the git history.
"""
release_refs = ['HEAD'] + lisa._git.find_tags(repo, 'v*')
def update_release_name(name):
if name == 'HEAD':
return 'Next release'
else:
return name
MARKERS = ['FEATURE', 'FIX', 'BREAKING CHANGE']
# Filtering on the patterns we look for provides a considerable speedup
commit_pattern = '(' + '|'.join(map(re.escape, MARKERS)) + ')'
release_sha1s = {
update_release_name(y): lisa._git.find_commits(
repo=repo,
ref=f'{x}..{y}',
grep=commit_pattern,
regex=True,
)
for x, y in zip(release_refs[1:], release_refs)
}
release_msgs = {
release: [
lisa._git.get_commit_message(
repo=repo,
ref=ref,
format='%B',
).strip()
for ref in refs
]
for release, refs in release_sha1s.items()
}
def parse_msg(msg):
selected = tuple(sorted({
marker
for marker in MARKERS
if marker in msg
}))
for marker in selected:
pattern = f'^\s*{re.escape(marker)}\s*$'
msg = re.sub(pattern, '', msg, flags=re.MULTILINE)
return (msg, selected)
def expand(msg, markers):
for marker in markers:
yield (marker, msg)
release_msgs = {
release: dict(
map(
lambda x: (x[0], list(map(itemgetter(1), x[1]))),
groupby(
(
entry
for msg in msgs
for entry in expand(*parse_msg(msg))
),
key=itemgetter(0)
)
)
)
for release, msgs in release_msgs.items()
}
def indent(level, content):
idt = level * ' '
return idt + content.replace('\n', f'\n{idt}')
def format_release(name, sections):
title = f'{name}\n{len(name) * "="}\n'
body = '\n\n'.join(
format_section(marker, _msgs)
for marker, _msgs in order_as(
sections.items(),
order_as=MARKERS,
key=itemgetter(0),
)
)
return f'{title}\n{body}'
def format_section(name, msgs):
title = f'{name.capitalize()}\n{len(name) * "+"}\n'
body = '\n\n'.join(map(format_msg, sorted(msgs)))
body = indent(4, body)
return f'{title}\n{body}'
def format_msg(msg):
subject = msg.splitlines()[0]
return f'- {subject.strip()}'
rst = '\n\n'.join(
format_release(name, sections)
for name, sections in release_msgs.items()
)
return rst
class PlaceHolderRef:
"""
If you got redirected to here, this means that the reference points to
something private and undocumented, or is not expected to be even
documentable.
"""
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 model definition compatible with TensorFlow's eager execution.
Reference [Deep Residual Learning for Image
Recognition](https://arxiv.org/abs/1512.03385)
Adapted from tf.keras.applications.ResNet50. A notable difference is that the
model here outputs logits while the Keras model outputs probability.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
layers = tf.keras.layers
class _IdentityBlock(tf.keras.Model):
"""_IdentityBlock is the block that has no conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
"""
def __init__(self, kernel_size, filters, stage, block, data_format):
super(_IdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
data_format=data_format,
name=conv_name_base + '2b')
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
class _ConvBlock(tf.keras.Model):
"""_ConvBlock is the block that has a conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
strides: strides for the convolution. Note that from stage 3, the first
conv layer at main path is with strides=(2,2), and the shortcut should
have strides=(2,2) as well.
"""
def __init__(self,
kernel_size,
filters,
stage,
block,
data_format,
strides=(2, 2)):
super(_ConvBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(
filters1, (1, 1),
strides=strides,
name=conv_name_base + '2a',
data_format=data_format)
self.bn2a = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(
filters2,
kernel_size,
padding='same',
name=conv_name_base + '2b',
data_format=data_format)
self.bn2b = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(
filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '2c')
self.conv_shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
name=conv_name_base + '1',
data_format=data_format)
self.bn_shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
shortcut = self.conv_shortcut(input_tensor)
shortcut = self.bn_shortcut(shortcut, training=training)
x += shortcut
return tf.nn.relu(x)
# pylint: disable=not-callable
class ResNet50(tf.keras.Model):
"""Instantiates the ResNet50 architecture.
Args:
data_format: format for the image. Either 'channels_first' or
'channels_last'. 'channels_first' is typically faster on GPUs while
'channels_last' is typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
name: Prefix applied to names of variables created in the model.
trainable: Is the model trainable? If true, performs backward
and optimization after call() method.
include_top: whether to include the fully-connected layer at the top of the
network.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- `avg` means that global average pooling will be applied to the output of
the last convolutional layer, and thus the output of the model will be
a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True.
Raises:
ValueError: in case of invalid argument for data_format.
"""
def __init__(self,
data_format,
name='',
trainable=True,
include_top=True,
pooling=None,
classes=1000):
super(ResNet50, self).__init__(name=name)
valid_channel_values = ('channels_first', 'channels_last')
if data_format not in valid_channel_values:
raise ValueError('Unknown data_format: %s. Valid values: %s' %
(data_format, valid_channel_values))
self.include_top = include_top
def conv_block(filters, stage, block, strides=(2, 2)):
return _ConvBlock(
3,
filters,
stage=stage,
block=block,
data_format=data_format,
strides=strides)
def id_block(filters, stage, block):
return _IdentityBlock(
3, filters, stage=stage, block=block, data_format=data_format)
self.conv1 = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
data_format=data_format,
padding='same',
name='conv1')
bn_axis = 1 if data_format == 'channels_first' else 3
self.bn_conv1 = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')
self.max_pool = layers.MaxPooling2D(
(3, 3), strides=(2, 2), data_format=data_format)
self.l2a = conv_block([64, 64, 256], stage=2, block='a', strides=(1, 1))
self.l2b = id_block([64, 64, 256], stage=2, block='b')
self.l2c = id_block([64, 64, 256], stage=2, block='c')
self.l3a = conv_block([128, 128, 512], stage=3, block='a')
self.l3b = id_block([128, 128, 512], stage=3, block='b')
self.l3c = id_block([128, 128, 512], stage=3, block='c')
self.l3d = id_block([128, 128, 512], stage=3, block='d')
self.l4a = conv_block([256, 256, 1024], stage=4, block='a')
self.l4b = id_block([256, 256, 1024], stage=4, block='b')
self.l4c = id_block([256, 256, 1024], stage=4, block='c')
self.l4d = id_block([256, 256, 1024], stage=4, block='d')
self.l4e = id_block([256, 256, 1024], stage=4, block='e')
self.l4f = id_block([256, 256, 1024], stage=4, block='f')
self.l5a = conv_block([512, 512, 2048], stage=5, block='a')
self.l5b = id_block([512, 512, 2048], stage=5, block='b')
self.l5c = id_block([512, 512, 2048], stage=5, block='c')
self.avg_pool = layers.AveragePooling2D(
(7, 7), strides=(7, 7), data_format=data_format)
if self.include_top:
self.flatten = layers.Flatten()
self.fc1000 = layers.Dense(classes, name='fc1000')
else:
reduction_indices = [1, 2] if data_format == 'channels_last' else [2, 3]
reduction_indices = tf.constant(reduction_indices)
if pooling == 'avg':
self.global_pooling = functools.partial(
tf.reduce_mean,
reduction_indices=reduction_indices,
keep_dims=False)
elif pooling == 'max':
self.global_pooling = functools.partial(
tf.reduce_max, reduction_indices=reduction_indices, keep_dims=False)
else:
self.global_pooling = None
def call(self, inputs, training=True):
x = self.conv1(inputs)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
x = self.max_pool(x)
x = self.l2a(x, training=training)
x = self.l2b(x, training=training)
x = self.l2c(x, training=training)
x = self.l3a(x, training=training)
x = self.l3b(x, training=training)
x = self.l3c(x, training=training)
x = self.l3d(x, training=training)
x = self.l4a(x, training=training)
x = self.l4b(x, training=training)
x = self.l4c(x, training=training)
x = self.l4d(x, training=training)
x = self.l4e(x, training=training)
x = self.l4f(x, training=training)
x = self.l5a(x, training=training)
x = self.l5b(x, training=training)
x = self.l5c(x, training=training)
x = self.avg_pool(x)
if self.include_top:
return self.fc1000(self.flatten(x))
elif self.global_pooling:
return self.global_pooling(x)
else:
return x
|
|
__source__ = 'https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/#/description'
# https://github.com/kamyu104/LeetCode/blob/master/Python/best-time-to-buy-and-sell-stock-iii.py
# Time: O(n)
# Space: O(1)
# Dynamic Programming
#
# Description: Leetcode # 123. Best Time to Buy and Sell Stock III
#
# Say you have an array for which the ith element
# is the price of a given stock on day i.
#
# Design an algorithm to find the maximum profit.
# You may complete at most two transactions.
#
# Note:
# You may not engage in multiple transactions at the same time
# (ie, you must sell the stock before you buy again).
#
# Related Topics
# Array Dynamic Programming
# Similar Questions
# Best Time to Buy and Sell Stock Best Time to Buy and Sell Stock II Best Time to Buy and Sell Stock IV
#
# Thought:
# Comparing to I and II, III limits the number of transactions to 2.
# This can be solve by "devide and conquer".
# We use left[i] to track the maximum profit for transactions before i,
# and use right[i] to track the maximum profit for transactions after i.
# You can use the following example to understand the Java solution:
#
# Prices: 1 4 5 7 6 3 2 9
# left = [0, 3, 4, 6, 6, 6, 6, 8]
# right= [8, 7, 7, 7, 7, 7, 7, 0]
#
import unittest
# Time: O(n)
# Space: O(1)
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
hold1, hold2 = float("-inf"), float("-inf")
release1, release2 = 0, 0
for i in prices:
release2 = max(release2, hold2 + i)
hold2 = max(hold2, release1 - i)
release1 = max(release1, hold1 + i)
hold1 = max(hold1, -i)
print i, release2, hold2, release1, hold1
return release2
# Time: O(k * n)
# Space: O(k)
class Solution_GitHub2:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
return self.maxAtMostKPairsProfit(prices, 2)
def maxAtMostKPairsProfit(self,prices,k):
max_buy = [float("-inf") for _ in xrange(k + 1)]
max_sell = [0 for _ in xrange(k + 1)]
for i in xrange(len(prices)):
for j in xrange(1, min(k, i/2+1) + 1):
max_buy[j] = max(max_buy[j], max_sell[j-1] - prices[i])
max_sell[j] = max(max_sell[j], max_buy[j] + prices[i])
print i, j, max_buy, max_sell
return max_sell[k]
# Time: O(n)
# Space: O(n)
class Solution_GitHub3:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
min_price, max_profit_from_left, max_profits_from_left = float("inf"), 0, []
for price in prices:
min_price = min(min_price, price)
max_profit_from_left = max(max_profit_from_left, price - min_price)
max_profits_from_left.append(max_profit_from_left)
max_price, max_profit_from_right, max_profits_from_right = 0, 0, []
for i in reversed(range(len(prices))):
max_price = max(max_price, prices[i])
max_profit_from_right = max(max_profit_from_right, max_price - prices[i])
max_profits_from_right.insert(0, max_profit_from_right)
max_profit = 0
for i in range(len(prices)):
max_profit = max(max_profit, max_profits_from_left[i] + max_profits_from_right[i])
return max_profit
# explanation in JAVA http://www.programcreek.com/2014/02/leetcode-best-time-to-buy-and-sell-stock-iii-java/
class Solution2:
# @param prices, a list of integer
# @return an integer
# http://chaoren.is-programmer.com/posts/43727.html
def maxProfit(self, prices):
length = len(prices)
if length == 0:
return 0
maxProfitForward = []
minPrice = prices[0]
maxProfit = -1
for currPrice in prices:
minPrice = min(minPrice, currPrice)
maxProfit = max(maxProfit, currPrice - minPrice)
maxProfitForward.append(maxProfit)
maxProfitBackward = []
maxPrice = prices[-1]
maxProfit = -1
for currPrice in reversed(prices):
maxPrice = max(maxPrice, currPrice)
maxProfit = max(maxProfit, maxPrice - currPrice)
maxProfitBackward.insert(0, maxProfit)
# for 0 or 1 transaction
maxProfit = maxProfitForward[-1]
# >= 2 transactions
for i in xrange(length -1):
maxProfit = max(maxProfit, maxProfitForward[i] + maxProfitBackward[i+1])
return maxProfit
#test
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
test = Solution2()
prices = [3,3,5,0,0,3,1,4]
print test.maxProfit(prices)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 69.96% 3ms
class Solution {
public int maxProfit(int[] prices) {
if ( prices == null || prices.length == 0) return 0;
int[] left = new int[prices.length];
left[0] = 0;
int min = prices[0];
for(int i = 1; i < left.length ; i++) {
left[i] = Math.max(left[i-1], prices[i] - min);
min = Math.min(min, prices[i]);
}
int[] right = new int[prices.length];
right[prices.length-1] = 0;
int max = prices[prices.length-1];
for(int i = prices.length - 2; i >= 0; i--){
right[i] = Math.max(right[i], max - prices[i]);
max = Math.max(max, prices[i]);
}
int res = 0;
for (int i = 0; i < prices.length; i++) {
res = Math.max(res, left[i] + right[i]);
}
return res;
}
}
# 69.96% 3ms
class Solution {
public int maxProfit(int[] prices) {
int hold1 = Integer.MIN_VALUE, hold2 = Integer.MIN_VALUE;
int release1 = 0, release2 = 0;
for(int i:prices){ // Assume we only have 0 money at first
release2 = Math.max(release2, hold2+i); // The maximum if we've just sold 2nd stock so far.
hold2 = Math.max(hold2, release1-i); // The maximum if we've just buy 2nd stock so far.
release1 = Math.max(release1, hold1+i); // The maximum if we've just sold 1nd stock so far.
hold1 = Math.max(hold1, -i); // The maximum if we've just buy 1st stock so far.
}
return release2; ///Since release1 is initiated as 0, so release2 will always higher than release1.
}
}
# 100% 1ms
class Solution {
public int maxProfit(int[] prices) {
int buy1 = Integer.MIN_VALUE, sell1 = 0, buy2 = Integer.MIN_VALUE, sell2 = 0;
for(int price : prices) {
if(-price > buy1) {
buy1 = -price;
}
if(buy1 + price > sell1) {
sell1 = buy1 + price;
}
if(sell1 - price > buy2) {
buy2 = sell1 - price;
}
if(buy2 + price > sell2) {
sell2 = buy2 + price;
}
}
return sell2;
}
}
'''
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'CreditLine.product_rate'
db.delete_column(u'accounting_creditline', 'product_rate_id')
# Deleting field 'CreditLine.feature_rate'
db.delete_column(u'accounting_creditline', 'feature_rate_id')
def backwards(self, orm):
# Adding field 'CreditLine.product_rate'
db.add_column(u'accounting_creditline', 'product_rate', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounting.SoftwareProductRate'], null=True, blank=True), keep_default=False)
# Adding field 'CreditLine.feature_rate'
db.add_column(u'accounting_creditline', 'feature_rate', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounting.FeatureRate'], null=True, blank=True), keep_default=False)
models = {
u'accounting.billingaccount': {
'Meta': {'object_name': 'BillingAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CONTRACT'", 'max_length': '25'}),
'billing_admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.BillingAccountAdmin']", 'null': 'True', 'symmetrical': 'False'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'created_by_domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_confirmed_extra_charges': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto_invoiceable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'accounting.billingaccountadmin': {
'Meta': {'object_name': 'BillingAccountAdmin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'web_user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'})
},
u'accounting.billingcontactinfo': {
'Meta': {'object_name': 'BillingContactInfo'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounting.BillingAccount']", 'unique': 'True', 'primary_key': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'emails': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'first_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'second_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'state_province_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'accounting.billingrecord': {
'Meta': {'object_name': 'BillingRecord'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'emailed_to': ('django.db.models.fields.CharField', [], {'max_length': '254', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'pdf_data_id': ('django.db.models.fields.CharField', [], {'max_length': '48'}),
'skipped_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'accounting.creditadjustment': {
'Meta': {'object_name': 'CreditAdjustment'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'credit_line': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.CreditLine']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.LineItem']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'MANUAL'", 'max_length': '25'}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'accounting.creditline': {
'Meta': {'object_name': 'CreditLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']", 'null': 'True', 'blank': 'True'})
},
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'accounting.defaultproductplan': {
'Meta': {'object_name': 'DefaultProductPlan'},
'edition': ('django.db.models.fields.CharField', [], {'default': "'Community'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'accounting.feature': {
'Meta': {'object_name': 'Feature'},
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'accounting.featurerate': {
'Meta': {'object_name': 'FeatureRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Feature']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'monthly_limit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'per_excess_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'})
},
u'accounting.invoice': {
'Meta': {'object_name': 'Invoice'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_due': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_paid': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'})
},
u'accounting.lineitem': {
'Meta': {'object_name': 'LineItem'},
'base_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'base_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'unit_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting.softwareplan': {
'Meta': {'object_name': 'SoftwarePlan'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'default': "'Enterprise'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '10'})
},
u'accounting.softwareplanversion': {
'Meta': {'object_name': 'SoftwarePlanVersion'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.FeatureRate']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.SoftwareProductRate']", 'symmetrical': 'False', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_prbac.Role']"})
},
u'accounting.softwareproduct': {
'Meta': {'object_name': 'SoftwareProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
u'accounting.softwareproductrate': {
'Meta': {'object_name': 'SoftwareProductRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProduct']"})
},
u'accounting.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'})
},
u'accounting.subscription': {
'Meta': {'object_name': 'Subscription'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'do_not_invoice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlanVersion']"}),
'salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscriber']"})
},
u'accounting.subscriptionadjustment': {
'Meta': {'object_name': 'SubscriptionAdjustment'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '50'}),
'new_date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_start': ('django.db.models.fields.DateField', [], {}),
'new_salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'CREATE'", 'max_length': '50'}),
'related_subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptionadjustment_related'", 'null': 'True', 'to': u"orm['accounting.Subscription']"}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'django_prbac.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parameters': ('django_prbac.fields.StringSetField', [], {'default': '[]', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
}
}
complete_apps = ['accounting']
|
|
# -*- coding: utf-8 -*-
'''
Management of MySQL grants (user permissions)
=============================================
:depends: - MySQLdb Python module
:configuration: See :py:mod:`salt.modules.mysql` for setup instructions.
The mysql_grants module is used to grant and revoke MySQL permissions.
The ``name`` you pass in purely symbolic and does not have anything to do
with the grant itself.
The ``database`` parameter needs to specify a 'priv_level' in the same
specification as defined in the MySQL documentation:
* \\*
* \\*.\\*
* db_name.\\*
* db_name.tbl_name
* etc...
This state is not able to set password for the permission from the
specified host. See :py:mod:`salt.states.mysql_user` for further
instructions.
.. code-block:: yaml
frank_exampledb:
mysql_grants.present:
- grant: select,insert,update
- database: exampledb.*
- user: frank
- host: localhost
frank_otherdb:
mysql_grants.present:
- grant: all privileges
- database: otherdb.*
- user: frank
restricted_singletable:
mysql_grants.present:
- grant: select
- database: somedb.sometable
- user: joe
'''
from __future__ import absolute_import
import sys
def __virtual__():
'''
Only load if the mysql module is available
'''
return 'mysql.grant_exists' in __salt__
def _get_mysql_error():
'''
Look in module context for a MySQL error. Eventually we should make a less
ugly way of doing this.
'''
return sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('mysql.error', None)
def present(name,
grant=None,
database=None,
user=None,
host='localhost',
grant_option=False,
escape=True,
revoke_first=False,
ssl_option=False,
**connection_args):
'''
Ensure that the grant is present with the specified properties
name
The name (key) of the grant to add
grant
The grant priv_type (i.e. select,insert,update OR all privileges)
database
The database priv_level (i.e. db.tbl OR db.*)
user
The user to apply the grant to
host
The network/host that the grant should apply to
grant_option
Adds the WITH GRANT OPTION to the defined grant. Default is ``False``
escape
Defines if the database value gets escaped or not. Default is ``True``
revoke_first
By default, MySQL will not do anything if you issue a command to grant
privileges that are more restrictive than what's already in place. This
effectively means that you cannot downgrade permissions without first
revoking permissions applied to a db.table/user pair first.
To have Salt forcibly revoke perms before applying a new grant, enable
the 'revoke_first options.
WARNING: This will *remove* permissions for a database before attempting
to apply new permissions. There is no guarantee that new permissions
will be applied correctly which can leave your database security in an
unknown and potentially dangerous state.
Use with caution!
Default is ``False``
ssl_option
Adds the specified ssl options for the connecting user as requirements for
this grant. Value is a list of single-element dicts corresponding to the
list of ssl options to use.
Possible key/value pairings for the dicts in the value:
.. code-block:: text
- SSL: True
- X509: True
- SUBJECT: <subject>
- ISSUER: <issuer>
- CIPHER: <cipher>
The non-boolean ssl options take a string as their values, which should
be an appropriate value as specified by the MySQL documentation for these
options.
Default is ``False`` (no ssl options will be used)
'''
comment = 'Grant {0} on {1} to {2}@{3} is already present'
ret = {'name': name,
'changes': {},
'result': True,
'comment': comment.format(grant, database, user, host)
}
# check if grant exists
if __salt__['mysql.grant_exists'](
grant, database, user, host, grant_option, escape, **connection_args
):
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
if revoke_first and not __opts__['test']:
# for each grant, break into tokens and see if its on the same
# user/db/table as ours. (there is probably only one)
user_grants = __salt__['mysql.user_grants'](user, host, **connection_args)
if not user_grants:
user_grants = []
for user_grant in user_grants:
token_grants = __salt__['mysql.tokenize_grant'](user_grant)
db_part = database.rpartition('.')
my_db = db_part[0]
my_table = db_part[2]
my_db = __salt__['mysql.quote_identifier'](my_db, (my_table is '*'))
my_table = __salt__['mysql.quote_identifier'](my_table)
# Removing per table grants in case of database level grant !!!
if token_grants['database'] == my_db:
grant_to_revoke = ','.join(token_grants['grant']).rstrip(',')
__salt__['mysql.grant_revoke'](
grant=grant_to_revoke,
database=database,
user=user,
host=host,
grant_option=grant_option,
escape=escape,
**connection_args)
# The grant is not present, make it!
if __opts__['test']:
# there is probably better things to make in test mode
ret['result'] = None
ret['comment'] = ('MySQL grant {0} is set to be created').format(name)
return ret
if __salt__['mysql.grant_add'](
grant, database, user, host, grant_option, escape, ssl_option, **connection_args
):
ret['comment'] = 'Grant {0} on {1} to {2}@{3} has been added'
ret['comment'] = ret['comment'].format(grant, database, user, host)
ret['changes'][name] = 'Present'
else:
ret['comment'] = 'Failed to execute: "GRANT {0} ON {1} TO {2}@{3}"'
ret['comment'] = ret['comment'].format(grant, database, user, host)
err = _get_mysql_error()
if err is not None:
ret['comment'] += ' ({0})'.format(err)
ret['result'] = False
return ret
def absent(name,
grant=None,
database=None,
user=None,
host='localhost',
grant_option=False,
escape=True,
**connection_args):
'''
Ensure that the grant is absent
name
The name (key) of the grant to add
grant
The grant priv_type (i.e. select,insert,update OR all privileges)
database
The database priv_level (i.e. db.tbl OR db.*)
user
The user to apply the grant to
host
The network/host that the grant should apply to
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Check if grant exists, and if so, remove it
if __salt__['mysql.grant_exists'](
grant,
database,
user, host,
grant_option,
escape,
**connection_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'MySQL grant {0} is set to be ' \
'revoked'.format(name)
return ret
if __salt__['mysql.grant_revoke'](
grant,
database,
user,
host,
grant_option,
**connection_args):
ret['comment'] = 'Grant {0} on {1} for {2}@{3} has been ' \
'revoked'.format(grant, database, user, host)
ret['changes'][name] = 'Absent'
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = 'Unable to revoke grant {0} on {1} for ' \
'{2}@{3} ({4})'.format(grant, database,
user, host, err)
ret['result'] = False
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = 'Unable to determine if grant {0} on {1} for ' \
'{2}@{3} exists ({4})'.format(grant, database,
user, host, err)
ret['result'] = False
return ret
# fallback
ret['comment'] = ('Grant {0} on {1} to {2}@{3} is not present, so it'
' cannot be revoked').format(
grant,
database,
user,
host
)
return ret
|
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
import shutil
class TestProceduralHolder( IECoreHoudini.TestCase ):
def testProceduralHolder(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
proc = geo.createNode( "ieProceduralHolder" )
self.assert_( proc )
fn = IECoreHoudini.FnProceduralHolder( proc )
self.assert_( fn )
return fn
def testLoadProcedural(self):
fn = self.testProceduralHolder()
cl = IECore.ClassLoader.defaultProceduralLoader().load( "sphereProcedural", 0 )()
fn.setParameterised( cl )
self.assertNotEqual( fn.getParameterised(), None )
self.assertEqual( fn.getParameterised(), cl )
return fn
# tests creation within contexts (simulating from UIs)
def testContextCreator( self ) :
# test generic creation
n = IECoreHoudini.FnProceduralHolder.create( "test", "parameterTypes" )
self.assertEqual( n.path(), "/obj/test/test" )
# test contextArgs outside UI mode fallback to generic behaviour
contextArgs = { "toolname" : "ieProceduralHolder" }
n2 = IECoreHoudini.FnProceduralHolder.create( "test", "parameterTypes", contextArgs=contextArgs )
self.assertEqual( n2.path(), "/obj/test1/test" )
# test parent arg
geo = hou.node( "/obj" ).createNode( "geo", run_init_scripts=False )
n3 = IECoreHoudini.FnProceduralHolder.create( "test", "parameterTypes", parent=geo, contextArgs=contextArgs )
self.assertEqual( n3.path(), "/obj/geo1/test" )
# test automatic conversion
contextArgs["shiftclick"] = True
n4 = IECoreHoudini.FnProceduralHolder.create( "test", "parameterTypes", parent=geo, contextArgs=contextArgs )
self.assertEqual( n4.path(), "/obj/geo1/test1" )
self.assertEqual( len(n4.outputConnectors()[0]), 1 )
self.assertEqual( n4.outputConnectors()[0][0].outputNode().type().name(), "ieCortexConverter" )
# test automatic conversion and output connections
mountain = geo.createNode( "mountain" )
contextArgs["outputnodename"] = mountain.path()
n5 = IECoreHoudini.FnOpHolder.create( "test", "parameterTypes", parent=geo, contextArgs=contextArgs )
self.assertEqual( n5.path(), "/obj/geo1/test2" )
self.assertEqual( len(n5.outputConnectors()[0]), 1 )
converter = n5.outputConnectors()[0][0].outputNode()
self.assertEqual( converter.type().name(), "ieCortexConverter" )
self.assertEqual( len(converter.outputConnectors()[0]), 1 )
outputNode = converter.outputConnectors()[0][0].outputNode()
self.assertEqual( outputNode.type().name(), "mountain" )
self.assertEqual( outputNode, mountain )
def testProceduralParameters(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
proc = geo.createNode( "ieProceduralHolder" )
fn = IECoreHoudini.FnProceduralHolder( proc )
fn.setProcedural( "parameterTypes", 1 )
# set a lot of parameters via houdini
proc.parmTuple("parm_a").set( [123] )
proc.parmTuple("parm_d").set( ["hello"] )
proc.parmTuple("parm_g").set( (2,4) )
proc.parmTuple("parm_h").set( (1,4,8) )
proc.parmTuple("parm_i").set( (2,4) )
proc.parmTuple("parm_i_3").set( (1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16 ) )
proc.parmTuple("parm_i_4").set( (1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16 ) )
proc.parmTuple("parm_compound_j").set( (1,4,8) )
proc.parmTuple("parm_compound_k").set( (1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16 ) )
proc.parmTuple("parm_l").set( (1,0,0) )
proc.parmTuple("parm_m").set( (1,1,0,1) )
proc.parmTuple("parm_o").set( ["myFile.tif"] )
proc.parmTuple("parm_p").set( [os.getcwd()] )
proc.parmTuple("parm_q").set( [True] )
proc.parmTuple("parm_r").set( ["mySequence.####.tif"] )
proc.parmTuple("parm_s").set( [-1, -2, 10, 20] )
proc.parmTuple("parm_s_1").set( [-1, -2, 10, 20] )
proc.parmTuple("parm_s_2").set( [-1, -2, -3, 10, 20, 30] )
proc.parmTuple("parm_t").set( [-1, -2, -3, 10, 20, 30] )
proc.parmTuple("parm_u").set( (64, 128) )
proc.parmTuple("parm_v").set( (25,26,27) )
# flush our parameters through to our parameterised procedural
proc.cook(force=True)
# generate our bounds
parameterised = fn.getParameterised()
self.failUnless( parameterised.isInstanceOf( IECore.TypeId.RunTimeTyped ) )
box = parameterised.bound()
self.assertEqual( box, IECore.Box3f( IECore.V3f(0,0,0), IECore.V3f(1,1,1) ) )
return ( proc, parameterised )
def testLotsQuickly(self):
n = []
for i in range(1000):
n.append( IECoreHoudini.FnProceduralHolder.create( "cortex_sphere", "sphereProcedural", 1 ) )
for _n in n:
_n.destroy()
def testSaveAndLoad(self):
save_file = "test/proceduralHolder_testData/proceduralSave_test.hip"
# create a few procedurals
n = []
for i in range( 10 ):
n.append( IECoreHoudini.FnProceduralHolder.create( "cortex_sphere", "sphereProcedural", 1 ) )
for i in range( 10 ):
n.append( IECoreHoudini.FnProceduralHolder.create( "cortex_params", "parameterTypes", 1 ) )
# set some values
path1 = n[0].path()
n[0].parm("parm_radius").set(10)
n[0].parm("parm_theta").set(90)
path2 = n[9].path()
n[9].parm("parm_radius").set(5)
n[9].parm("parm_theta").set(45)
# save scene
hou.hipFile.save(save_file)
# new scene
hou.hipFile.clear(suppress_save_prompt=True)
# open scene
hou.hipFile.load(save_file)
# check parameters
proc = hou.node(path1)
self.failUnless( proc )
self.assertEqual( proc.evalParm( "__className" ), "sphereProcedural" )
self.assertEqual( proc.evalParm( "__classVersion" ), "1" )
self.assertEqual( proc.evalParm("parm_radius"), 10 )
self.assertEqual( proc.evalParm("parm_theta"), 90 )
proc = hou.node(path2)
self.failUnless( proc )
self.assertEqual( proc.evalParm( "__className" ), "sphereProcedural" )
self.assertEqual( proc.evalParm( "__classVersion" ), "1" )
self.assertEqual( proc.evalParm("parm_radius"), 5 )
self.assertEqual( proc.evalParm("parm_theta"), 45 )
def testObjectWasDeleted(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
proc = geo.createNode( "ieProceduralHolder" )
fn = IECoreHoudini.FnProceduralHolder( proc )
cl = IECore.ClassLoader.defaultProceduralLoader().load( "sphereProcedural", 1 )()
proc.destroy()
self.assertEqual( fn.hasParameterised(), False )
fn.setParameterised(cl)
def testProceduralReloadParameters(self):
sphere = IECoreHoudini.FnProceduralHolder.create( "cortex_sphere", "sphereProcedural", 1 )
# check the reload button doesn't clear expressions
sphere.parm("parm_radius").setExpression("sin($FF)")
hou.setFrame(0)
rad = sphere.evalParm("parm_radius")
self.assert_( rad > 0 )
hou.setFrame(100)
rad = sphere.evalParm("parm_radius")
self.assert_( rad > 0.984 )
self.assert_( rad < 0.985 )
sphere.parm( "__classReloadButton" ).pressButton()
rad = sphere.evalParm("parm_radius")
self.assert_( rad > 0.984 )
self.assert_( rad < 0.985 )
self.assertEqual( sphere.parm("parm_radius").expression(), "sin($FF)" )
hou.setFrame(0)
rad = sphere.evalParm("parm_radius")
self.assert_( rad > 0 )
# now change the version to v2 and check things are still ok
sphere.parm( "__classVersion" ).set( "2" )
# if we're changing the menu programatically then we need to call pressButton()!!
sphere.parm( "__classVersion" ).pressButton()
self.assert_( not sphere.evalParm("parm_extra") )
sphere.parm("parm_extra").set(True)
self.failUnless( sphere.evalParm("parm_extra") )
rad = sphere.evalParm("parm_radius")
self.assert_( rad < 0.015 )
hou.setFrame(100)
rad = sphere.evalParm("parm_radius")
self.assert_( rad > 0.984 )
self.assert_( rad < 0.985 )
def testHiddenParameters( self ):
( proc, cl ) = self.testProceduralParameters()
# check the hidden userData works
self.assertEqual( proc.parmTuple("parm_a").parmTemplate().isHidden(), True )
self.assertEqual( proc.parmTuple("parm_b").parmTemplate().isHidden(), False )
self.assertEqual( proc.parmTuple("parm_c").parmTemplate().isHidden(), True )
self.assertEqual( proc.parmTuple("parm_d").parmTemplate().isHidden(), False )
# check setting the parameter still works
proc.parmTuple("parm_a").set( [123] )
proc.cook(force=True)
self.assertEqual( cl['a'].getValue().value, 123 )
def testParameterLabels( self ):
( proc, cl ) = self.testProceduralParameters()
# check the hidden userData works
self.assertEqual( proc.parmTuple("parm_a").parmTemplate().label(), "Int" )
self.assertEqual( proc.parmTuple("parm_b").parmTemplate().label(), "B" )
self.assertEqual( proc.parmTuple("parm_c").parmTemplate().label(), "Double" )
self.assertEqual( proc.parmTuple("parm_d").parmTemplate().label(), "D" )
def testMatchString(self):
(op,fn)=self.testProceduralParameters()
fn = IECoreHoudini.FnProceduralHolder(op)
self.assertEqual( op.parm( "__classMatchString" ).eval(), "*" )
op.parm( "__className" ).set( "sphereProcedural" )
op.parm( "__className" ).pressButton()
cl = fn.getParameterised()
self.assertEqual( cl.typeName(), "sphereProcedural" )
op.parm( "__classMatchString" ).set( "nestedChild" )
results = fn.classNames()
self.assertEqual( len(fn.classNames()), 1 )
op.parm( "__className" ).set( "sphereProcedural" ) # this still works, should it be invalid?
op.parm( "__className" ).pressButton()
cl = fn.getParameterised()
self.assertEqual( cl.typeName(), "sphereProcedural" )
op.parm( "__classMatchString" ).set( "*" )
self.assert_( len(fn.classNames()) > 1 )
def createProcedural( self, path="primitiveParameters/multiple", version=1 ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
proc = geo.createNode( "ieProceduralHolder" )
fn = IECoreHoudini.FnProceduralHolder( proc )
fn.setProcedural( path, version )
return ( proc, fn )
def testObjectParameterConversion( self ) :
( proc, fn ) = self.createProcedural()
torus = proc.createInputNode( 2, "torus" )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
converterSop = proc.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.Group )
self.assertEqual( result.children()[0].typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.children()[0].numFaces(), 100 )
self.assertEqual( result.children()[1].typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.children()[1].numPoints, 0 )
torus.parm( "type" ).set( 1 )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.Group )
self.assertEqual( result.children()[0].typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.children()[0].numFaces(), 100 )
self.assertEqual( result.children()[1].typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.children()[1].numPoints, 0 )
def testObjectParameterWithMultipleTypesConversion( self ) :
( proc, fn ) = self.createProcedural()
torus = proc.createInputNode( 3, "torus" )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
converterSop = proc.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.Group )
self.assertEqual( result.children()[0].typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.children()[0].numFaces(), 100 )
self.assertEqual( result.children()[1].typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.children()[1].numPoints, 0 )
torus.parm( "type" ).set( 1 )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.numPoints, 100 )
def testPointsParameterConversion( self ) :
( proc, fn ) = self.createProcedural()
torus = proc.createInputNode( 1, "torus" )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
converterSop = proc.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.numPoints, 100 )
torus.parm( "type" ).set( 1 )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.numPoints, 100 )
def testMeshParameterConversion( self ) :
( proc, fn ) = self.createProcedural( "primitiveParameters/meshRender" )
torus = proc.createInputNode( 0, "torus" )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
converterSop = proc.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.numFaces(), 100 )
torus.parm( "type" ).set( 1 )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.numFaces(), 100 )
# test an proceduralHolder with multiple inputs
def testMultipleInputs( self ) :
( proc, fn ) = self.createProcedural()
torus = proc.createInputNode( 0, "torus" )
box = proc.createInputNode( 2, "box" )
torus2 = proc.createInputNode( 3, "torus" )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
converterSop = proc.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 208 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.Group )
self.assertEqual( result.children()[0].typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.children()[0].numFaces(), 206 )
self.assertEqual( result.children()[1].typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.children()[1].numPoints, 0 )
torus2.parm( "type" ).set( 1 )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
self.assertEqual( len(converterSop.geometry().points()), 208 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.Group )
self.assertEqual( result.children()[0].typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.children()[0].numFaces(), 106 )
self.assertEqual( result.children()[0]["P"].data.size(), 108 )
self.assertEqual( result.children()[1].typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.children()[1].numPoints, 100 )
# test using op holders and procedural holders as inputs
def testCortexInputs( self ) :
( proc, fn ) = self.createProcedural()
torus = proc.parent().createNode( "torus" )
op = torus.createOutputNode( "ieOpHolder" )
IECoreHoudini.FnOpHolder( op ).setOp( "objectDebug", 1 )
op.parm( "parm_quiet" ).set( True )
proc.setInput( 0, op )
box = proc.createInputNode( 2, "box" )
proc2 = proc.createInputNode( 3, "ieProceduralHolder" )
fn2 = IECoreHoudini.FnProceduralHolder( proc2 )
fn2.setProcedural( "primitiveParameters/meshRender", 1 )
torus2 = proc2.createInputNode( 0, "torus" )
proc.cook()
self.assertEqual( proc.errors(), "" )
self.assertEqual( proc2.errors(), "" )
self.assertEqual( op.errors(), "" )
self.assertEqual( len(proc.geometry().points()), 1 )
self.assertEqual( len(proc2.geometry().points()), 1 )
self.assertEqual( len(op.geometry().points()), 1 )
converterSop = op.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.numFaces(), 100 )
converterSop = proc2.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 100 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.numFaces(), 100 )
converterSop = proc.createOutputNode( "ieCortexConverter" )
self.assertEqual( len(converterSop.geometry().points()), 208 )
result = IECoreHoudini.FromHoudiniGeometryConverter.create( converterSop ).convert()
self.assertEqual( result.typeId(), IECore.TypeId.Group )
self.assertEqual( result.children()[0].typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.children()[0].numFaces(), 206 )
self.assertEqual( result.children()[1].typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result.children()[1].numPoints, 0 )
def testAnimatedValues( self ) :
sphere = IECoreHoudini.FnProceduralHolder.create( "test", "sphereProcedural", 1 )
fn = IECoreHoudini.FnProceduralHolder( sphere )
sphere.parm( "parm_radius" ).setExpression( "$FF" )
hou.setFrame( 1 )
self.assertEqual( sphere.evalParm( "parm_radius" ), 1 )
self.assertEqual( fn.getProcedural().parameters()["radius"].getTypedValue(), 1 )
hou.setFrame( 12.25 )
self.assertEqual( sphere.evalParm( "parm_radius" ), 12.25 )
# values haven't been flushed yet
self.assertAlmostEqual( fn.getProcedural().parameters()["radius"].getTypedValue(), 1 )
# so we flush them
fn.setParameterisedValues()
self.assertAlmostEqual( fn.getProcedural().parameters()["radius"].getTypedValue(), 12.25 )
def testNameFilter( self ) :
meshRender = IECoreHoudini.FnProceduralHolder.create( "meshRender", "primitiveParameters/meshRender", 1 )
boxA = meshRender.parent().createNode( "box" )
nameA = boxA.createOutputNode( "name" )
nameA.parm( "name1" ).set( "boxA" )
boxB = meshRender.parent().createNode( "box" )
transformB = boxB.createOutputNode( "xform" )
transformB.parm( "tx" ).set( 5 )
nameB = transformB.createOutputNode( "name" )
nameB.parm( "name1" ).set( "boxB" )
boxC = meshRender.parent().createNode( "box" )
transformC = boxC.createOutputNode( "xform" )
transformC.parm( "tx" ).set( 10 )
nameC = transformC.createOutputNode( "name" )
nameC.parm( "name1" ).set( "boxC" )
merge = meshRender.parent().createNode( "merge" )
merge.setInput( 0, nameA )
merge.setInput( 1, nameB )
merge.setInput( 2, nameC )
meshRender.setInput( 0, merge )
# converts all 3 meshes as one (because the parameter type forces it)
geo = meshRender.geometry()
self.assertEqual( len(geo.prims()), 1 )
self.assertEqual( geo.prims()[0].type(), hou.primType.Custom )
self.assertEqual( meshRender.errors(), "" )
self.assertEqual( meshRender.warnings(), "" )
proc = IECoreHoudini.FromHoudiniGeometryConverter.create( meshRender ).convert()
self.assertTrue( proc.isInstanceOf( IECore.TypeId.ParameterisedProcedural ) )
self.assertEqual( proc.bound(), IECore.Box3f( IECore.V3f( -0.5, -0.5, -0.5 ), IECore.V3f( 10.5, 0.5, 0.5 ) ) )
# setting to one name limits the bounds
meshRender.parm( "parm_mesh_nameFilter" ).set( "boxB" )
self.assertEqual( len(geo.prims()), 1 )
self.assertEqual( geo.prims()[0].type(), hou.primType.Custom )
self.assertEqual( meshRender.errors(), "" )
self.assertEqual( meshRender.warnings(), "" )
proc = IECoreHoudini.FromHoudiniGeometryConverter.create( meshRender ).convert()
self.assertTrue( proc.isInstanceOf( IECore.TypeId.ParameterisedProcedural ) )
self.assertEqual( proc.bound(), IECore.Box3f( IECore.V3f( 4.5, -0.5, -0.5 ), IECore.V3f( 5.5, 0.5, 0.5 ) ) )
# setting to multiple names expands the bounds, but not all the way
meshRender.parm( "parm_mesh_nameFilter" ).set( "* ^boxA" )
self.assertEqual( len(geo.prims()), 1 )
self.assertEqual( geo.prims()[0].type(), hou.primType.Custom )
self.assertEqual( meshRender.errors(), "" )
self.assertEqual( meshRender.warnings(), "" )
proc = IECoreHoudini.FromHoudiniGeometryConverter.create( meshRender ).convert()
self.assertTrue( proc.isInstanceOf( IECore.TypeId.ParameterisedProcedural ) )
self.assertEqual( proc.bound(), IECore.Box3f( IECore.V3f( 4.5, -0.5, -0.5 ), IECore.V3f( 10.5, 0.5, 0.5 ) ) )
# multiple CortexObjects cause warnings (because the parameter wants one mesh only)
converter = merge.createOutputNode( "ieCortexConverter" )
converter.parm( "resultType" ).set( 0 ) # Cortex
meshRender.setInput( 0, converter )
meshRender.parm( "__classReloadButton" ).pressButton() # clear the procedural parm values
self.assertEqual( len(geo.prims()), 1 )
self.assertEqual( geo.prims()[0].type(), hou.primType.Custom )
self.assertEqual( meshRender.errors(), "" )
self.assertNotEqual( meshRender.warnings(), "" )
proc = IECoreHoudini.FromHoudiniGeometryConverter.create( meshRender ).convert()
self.assertTrue( proc.isInstanceOf( IECore.TypeId.ParameterisedProcedural ) )
self.assertEqual( proc.bound(), IECore.MeshPrimitive().bound() )
# a single CortexObject will work fine
meshRender.parm( "parm_mesh_nameFilter" ).set( "boxB" )
self.assertEqual( len(geo.prims()), 1 )
self.assertEqual( geo.prims()[0].type(), hou.primType.Custom )
self.assertEqual( meshRender.errors(), "" )
self.assertEqual( meshRender.warnings(), "" )
proc = IECoreHoudini.FromHoudiniGeometryConverter.create( meshRender ).convert()
self.assertTrue( proc.isInstanceOf( IECore.TypeId.ParameterisedProcedural ) )
self.assertEqual( proc.bound(), IECore.Box3f( IECore.V3f( 4.5, -0.5, -0.5 ), IECore.V3f( 5.5, 0.5, 0.5 ) ) )
# disabling the nameFilter brings the warnings back
meshRender.setInput( 0, converter )
meshRender.parm( "parm_mesh_useNameFilter" ).set( False )
meshRender.parm( "__classReloadButton" ).pressButton() # clear the procedural parm values
self.assertEqual( len(geo.prims()), 1 )
self.assertEqual( geo.prims()[0].type(), hou.primType.Custom )
self.assertEqual( meshRender.errors(), "" )
self.assertNotEqual( meshRender.warnings(), "" )
proc = IECoreHoudini.FromHoudiniGeometryConverter.create( meshRender ).convert()
self.assertTrue( proc.isInstanceOf( IECore.TypeId.ParameterisedProcedural ) )
self.assertEqual( proc.bound(), IECore.MeshPrimitive().bound() )
def setUp( self ) :
IECoreHoudini.TestCase.setUp( self )
if not os.path.exists( "test/proceduralHolder_testData" ):
os.mkdir( "test/proceduralHolder_testData" )
def tearDown( self ) :
if os.path.exists( "test/proceduralHolder_testData" ):
shutil.rmtree( "test/proceduralHolder_testData" )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#Python Serial Port Extension for Win32, Linux, BSD, Jython
#module for serial IO for POSIX compatible systems, like Linux
#
#(C) 2001-2003 Chris Liechti <[email protected]>
# this is distributed under a free software license, the CNRI Python License
# see http://www.opensource.org/licenses/pythonpl.php
PARITY_NONE, PARITY_EVEN, PARITY_ODD = 'N', 'E', 'O'
STOPBITS_ONE, STOPBITS_TWO = (1, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5,6,7,8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
}
XON = chr(17)
XOFF = chr(19)
#Python < 2.2.3 compatibility
try:
True
except:
True = 1
False = not True
class SerialException(Exception):
"""Base class for serial port related exceptions."""
portNotOpenError = SerialException('Port not open')
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException("Write timeout")
class FileLike(object):
"""An abstract file like class.
This class implements readline and readlines based on read and
writelines based on write.
This class is used to provide the above functions for to Serial
port objects.
Note that when the serial port was opened with _NO_ timeout that
readline blocks until it sees a newline (or the specified size is
reached) and that readlines would never return and therefore
refuses to work (it raises an exception in this case)!
"""
def read(self, size): raise NotImplementedError
def write(self, s): raise NotImplementedError
def readline(self, size=None, eol='\n'):
"""read a line which is terminated with end-of-line (eol) character
('\n' by default) or until timeout"""
line = ''
while 1:
c = self.read(1)
if c:
line += c #not very efficient but lines are usually not that long
if c == eol:
break
if size is not None and len(line) >= size:
break
else:
break
return line
def readlines(self, sizehint=None, eol='\n'):
"""read a list of lines, until timeout
sizehint is ignored"""
if self.timeout is None:
raise ValueError, "Serial port MUST have enabled timeout for this function!"
lines = []
while 1:
line = self.readline(eol=eol)
if line:
lines.append(line)
if line[-1] != eol: #was the line received with a timeout?
break
else:
break
return lines
def xreadlines(self, sizehint=None):
"""just call readlines - here for compatibility"""
return self.readlines()
def writelines(self, sequence):
for line in sequence:
self.write(line)
def flush(self):
"""flush of file like objects"""
pass
class SerialBase(FileLike):
"""Serial port base class. Provides __init__ function and properties to
get/set port settings."""
#default values, may be overriden in subclasses that do not support all values
BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,
19200,38400,57600,115200,230400,460800,500000,576000,921600,
1000000,1152000,1500000,2000000,2500000,3000000,3500000,4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD)
STOPBITS = (STOPBITS_ONE, STOPBITS_TWO)
def __init__(self,
port = None, #number of device, numbering starts at
#zero. if everything fails, the user
#can specify a device string, note
#that this isn't portable anymore
#port will be opened if one is specified
baudrate=9600, #baudrate
bytesize=EIGHTBITS, #number of databits
parity=PARITY_NONE, #enable parity checking
stopbits=STOPBITS_ONE, #number of stopbits
timeout=None, #set a timeout value, None to wait forever
xonxoff=0, #enable software flow control
rtscts=0, #enable RTS/CTS flow control
writeTimeout=None, #set a timeout for writes
dsrdtr=None, #None: use rtscts setting, dsrdtr override if true or false
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
self._isOpen = False
self._port = None #correct value is assigned below trough properties
self._baudrate = None #correct value is assigned below trough properties
self._bytesize = None #correct value is assigned below trough properties
self._parity = None #correct value is assigned below trough properties
self._stopbits = None #correct value is assigned below trough properties
self._timeout = None #correct value is assigned below trough properties
self._writeTimeout = None #correct value is assigned below trough properties
self._xonxoff = None #correct value is assigned below trough properties
self._rtscts = None #correct value is assigned below trough properties
self._dsrdtr = None #correct value is assigned below trough properties
#assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.writeTimeout = writeTimeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
if port is not None:
self.open()
def isOpen(self):
"""Check if the port is opened."""
return self._isOpen
# - - - - - - - - - - - - - - - - - - - - - - - -
#TODO: these are not realy needed as the is the BAUDRATES etc attribute...
#maybe i remove them before the final release...
def getSupportedBaudrates(self):
return [(str(b), b) for b in self.BAUDRATES]
def getSupportedByteSizes(self):
return [(str(b), b) for b in self.BYTESIZES]
def getSupportedStopbits(self):
return [(str(b), b) for b in self.STOPBITS]
def getSupportedParities(self):
return [(PARITY_NAMES[b], b) for b in self.PARITIES]
# - - - - - - - - - - - - - - - - - - - - - - - -
def setPort(self, port):
"""Change the port. The attribute portstr is set to a string that
contains the name of the port."""
was_open = self._isOpen
if was_open: self.close()
if port is not None:
if type(port) in [type(''), type(u'')]: #strings are taken directly
self.portstr = port
else:
self.portstr = self.makeDeviceName(port)
else:
self.portstr = None
self._port = port
if was_open: self.open()
def getPort(self):
"""Get the current port setting. The value that was passed on init or using
setPort() is passed back. See also the attribute portstr which contains
the name of the port as a string."""
return self._port
port = property(getPort, setPort, doc="Port setting")
def setBaudrate(self, baudrate):
"""Change baudrate. It raises a ValueError if the port is open and the
baudrate is not possible. If the port is closed, then tha value is
accepted and the exception is raised when the port is opened."""
#~ if baudrate not in self.BAUDRATES: raise ValueError("Not a valid baudrate: %r" % baudrate)
try:
self._baudrate = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % baudrate)
else:
if self._isOpen: self._reconfigurePort()
def getBaudrate(self):
"""Get the current baudrate setting."""
return self._baudrate
baudrate = property(getBaudrate, setBaudrate, doc="Baudrate setting")
def setByteSize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % bytesize)
self._bytesize = bytesize
if self._isOpen: self._reconfigurePort()
def getByteSize(self):
"""Get the current byte size setting."""
return self._bytesize
bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
def setParity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % parity)
self._parity = parity
if self._isOpen: self._reconfigurePort()
def getParity(self):
"""Get the current parity setting."""
return self._parity
parity = property(getParity, setParity, doc="Parity setting")
def setStopbits(self, stopbits):
"""Change stopbits size."""
if stopbits not in self.STOPBITS: raise ValueError("Not a valid stopbit size: %r" % stopbits)
self._stopbits = stopbits
if self._isOpen: self._reconfigurePort()
def getStopbits(self):
"""Get the current stopbits setting."""
return self._stopbits
stopbits = property(getStopbits, setStopbits, doc="Stopbits setting")
def setTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0: raise ValueError("Not a valid timeout: %r" % timeout)
try:
timeout + 1 #test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._timeout = timeout
if self._isOpen: self._reconfigurePort()
def getTimeout(self):
"""Get the current timeout setting."""
return self._timeout
timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
def setWriteTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0: raise ValueError("Not a valid timeout: %r" % timeout)
try:
timeout + 1 #test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._writeTimeout = timeout
if self._isOpen: self._reconfigurePort()
def getWriteTimeout(self):
"""Get the current timeout setting."""
return self._writeTimeout
writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
def setXonXoff(self, xonxoff):
"""Change XonXoff setting."""
self._xonxoff = xonxoff
if self._isOpen: self._reconfigurePort()
def getXonXoff(self):
"""Get the current XonXoff setting."""
return self._xonxoff
xonxoff = property(getXonXoff, setXonXoff, doc="Xon/Xoff setting")
def setRtsCts(self, rtscts):
"""Change RtsCts flow control setting."""
self._rtscts = rtscts
if self._isOpen: self._reconfigurePort()
def getRtsCts(self):
"""Get the current RtsCts flow control setting."""
return self._rtscts
rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
def setDsrDtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
#if not set, keep backwards compatibility and follow rtscts setting
self._dsrdtr = self._rtscts
else:
#if defined independently, follow its value
self._dsrdtr = dsrdtr
if self._isOpen: self._reconfigurePort()
def getDsrDtr(self):
"""Get the current DsrDtr flow control setting."""
return self._dsrdtr
dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
self.__class__.__name__,
id(self),
self._isOpen,
self.portstr,
self.baudrate,
self.bytesize,
self.parity,
self.stopbits,
self.timeout,
self.xonxoff,
self.rtscts,
self.dsrdtr,
)
if __name__ == '__main__':
s = SerialBase()
print s.portstr
print s.getSupportedBaudrates()
print s.getSupportedByteSizes()
print s.getSupportedParities()
print s.getSupportedStopbits()
print s
|
|
import cattle
import os
import pytest
import random
import time
from datetime import datetime, timedelta
NOT_NONE = object()
DEFAULT_TIMEOUT = 90
DEFAULT_AGENT_URI = 'ssh://root@localhost:22'
DEFAULT_AGENT_UUID = 'test-agent'
SLEEP_DELAY = 0.5
@pytest.fixture(scope='session')
def cattle_url():
default_url = 'http://localhost:8080/v1/schemas'
return os.environ.get('CATTLE_URL', default_url)
def _admin_client():
return cattle.from_env(url=cattle_url(),
cache=False,
access_key='admin',
secrect_key='adminpass')
def _client_for_user(name, accounts):
return cattle.from_env(url=cattle_url(),
cache=False,
access_key=accounts[name][0],
secret_key=accounts[name][1])
def create_user(admin_client, user_name, kind=None):
if kind is None:
kind = user_name
password = user_name + 'pass'
account = create_type_by_uuid(admin_client, 'account', user_name,
kind=user_name,
name=user_name)
active_cred = None
for cred in account.credentials():
if cred.kind == 'apiKey' and cred.publicValue == user_name \
and cred.secretValue == password:
active_cred = cred
break
if active_cred is None:
active_cred = admin_client.create_credential({
'accountId': account.id,
'kind': 'apiKey',
'publicValue': user_name,
'secretValue': password
})
active_cred = wait_success(admin_client, active_cred)
if active_cred.state != 'active':
wait_success(admin_client, active_cred.activate())
return [user_name, password, account]
@pytest.fixture(scope='session')
def accounts():
result = {}
admin_client = _admin_client()
for user_name in ['admin', 'agent', 'user', 'agentRegister', 'test',
'readAdmin', 'token']:
result[user_name] = create_user(admin_client,
user_name,
kind=user_name)
result['admin'] = create_user(admin_client, 'admin')
system_account = admin_client.list_account(kind='system', uuid='system')[0]
result['system'] = [None, None, system_account]
return result
@pytest.fixture(scope='session')
def system_account(accounts):
return accounts['system'][2]
@pytest.fixture(scope='session')
def admin_account(accounts):
return accounts['admin'][2]
@pytest.fixture(scope='session')
def client(accounts):
return _client_for_user('user', accounts)
@pytest.fixture(scope='session')
def admin_client(accounts):
return _client_for_user('admin', accounts)
@pytest.fixture(scope='session')
def token_client(accounts):
return _client_for_user('token', accounts)
@pytest.fixture(scope='session')
def sim_context(request, admin_client):
context = kind_context(admin_client, 'sim', external_pool=True,
uri='sim://', uuid='simagent1', host_public=True)
context['imageUuid'] = 'sim:{}'.format(random_num())
host = context['host']
if len(host.ipAddresses()) == 0:
ip = create_and_activate(admin_client, 'ipAddress',
address='192.168.10.10',
isPublic=True)
map = admin_client.create_host_ip_address_map(hostId=host.id,
ipAddressId=ip.id)
map = admin_client.wait_success(map)
assert map.state == 'active'
context['hostIp'] = host.ipAddresses()[0]
request.addfinalizer(
lambda: stop_running_sim_instances(admin_client))
return context
@pytest.fixture(scope='session')
def sim_context2(admin_client):
context = kind_context(admin_client, 'sim', external_pool=True,
uri='sim://2', uuid='simagent2', host_public=True)
context['imageUuid'] = 'sim:{}'.format(random_num())
return context
@pytest.fixture(scope='session')
def sim_context3(admin_client):
context = kind_context(admin_client, 'sim', external_pool=True,
uri='sim://3', uuid='simagent3', host_public=True)
context['imageUuid'] = 'sim:{}'.format(random_num())
return context
def activate_resource(admin_client, obj):
if obj.state == 'inactive':
obj = wait_success(admin_client, obj.activate())
return obj
def find_by_uuid(admin_client, type, uuid, activate=True, **kw):
objs = admin_client.list(type, uuid=uuid)
assert len(objs) == 1
obj = wait_success(admin_client, objs[0])
if activate:
return activate_resource(admin_client, obj)
return obj
def create_type_by_uuid(admin_client, type, uuid, activate=True, validate=True,
**kw):
opts = dict(kw)
opts['uuid'] = uuid
objs = admin_client.list(type, uuid=uuid)
obj = None
if len(objs) == 0:
obj = admin_client.create(type, **opts)
else:
obj = objs[0]
obj = wait_success(admin_client, obj)
if activate and obj.state == 'inactive':
obj.activate()
obj = wait_success(admin_client, obj)
if validate:
for k, v in opts.items():
assert getattr(obj, k) == v
return obj
def random_num():
return random.randint(0, 1000000)
def random_str():
return 'random-{0}'.format(random_num())
def wait_all_success(client, objs, timeout=DEFAULT_TIMEOUT):
ret = []
for obj in objs:
obj = wait_success(client, obj, timeout)
ret.append(obj)
return ret
def wait_success(client, obj, timeout=DEFAULT_TIMEOUT):
return client.wait_success(obj, timeout=timeout)
def wait_transitioning(client, obj, timeout=DEFAULT_TIMEOUT):
return client.wait_transitioning(obj, timeout=timeout)
def assert_fields(obj, fields):
assert obj is not None
for k, v in fields.items():
assert k in obj
if v is None:
assert obj[k] is None
elif v is NOT_NONE:
assert obj[k] is not None
else:
assert obj[k] == v
def assert_removed_fields(obj):
assert obj.removed is not None
assert obj.removeTime is not None
assert obj.removeTimeTS > obj.removedTS
def assert_restored_fields(obj):
assert obj.removed is None
assert obj.removeTime is None
def now():
return datetime.utcnow()
def format_time(time):
return (time - timedelta(microseconds=time.microsecond)).isoformat() + 'Z'
def get_agent(admin_client, name, default_uri=DEFAULT_AGENT_URI,
default_agent_uuid=DEFAULT_AGENT_UUID):
name = name.upper()
uri_name = '{0}_URI'.format(name.upper())
uuid_name = '{0}_AGENT_UUID'.format(name.upper())
uri = os.getenv(uri_name, default_uri)
uuid = os.getenv(uuid_name, default_agent_uuid)
agent = create_type_by_uuid(admin_client, 'agent', uuid, validate=False,
uri=uri)
while len(agent.hosts()) == 0:
time.sleep(SLEEP_DELAY)
return agent
def kind_context(admin_client, kind, external_pool=False,
uri=DEFAULT_AGENT_URI,
uuid=DEFAULT_AGENT_UUID,
host_public=False,
agent=None):
if agent is None:
kind_agent = get_agent(admin_client, kind, default_agent_uuid=uuid,
default_uri=uri)
else:
kind_agent = agent
hosts = filter(lambda x: x.kind == kind and x.removed is None,
kind_agent.hosts())
assert len(hosts) == 1
kind_host = activate_resource(admin_client, hosts[0])
if kind_host.isPublic != host_public:
kind_host = admin_client.update(kind_host, isPublic=host_public)
assert kind_host.isPublic == host_public
assert kind_host.accountId == kind_agent.accountId or \
get_plain_id(admin_client, kind_host.account()) == \
str(kind_agent.data.agentResourcesAccountId)
pools = kind_host.storagePools()
assert len(pools) == 1
kind_pool = activate_resource(admin_client, pools[0])
assert kind_pool.accountId == kind_agent.accountId or \
get_plain_id(admin_client, kind_pool.account()) == \
str(kind_agent.data.agentResourcesAccountId)
context = {
'host': kind_host,
'pool': kind_pool,
'agent': kind_agent
}
if external_pool:
pools = admin_client.list_storagePool(kind=kind, external=True)
assert len(pools) == 1
context['external_pool'] = activate_resource(admin_client, pools[0])
assert pools[0].accountId is not None
return context
def assert_required_fields(method, **kw):
method(**kw)
for k in kw.keys():
args = dict(kw)
del args[k]
try:
method(**args)
# This is supposed to fail
assert k == ''
except cattle.ApiError as e:
assert e.error.code == 'MissingRequired'
assert e.error.fieldName == k
def get_plain_id(admin_client, obj):
ret = admin_client.list(obj.type, uuid=obj.uuid, _plainId='true')
assert len(ret) == 1
return ret[0].id
def get_by_plain_id(admin_client, type, id):
obj = admin_client.by_id(type, id, _plainId='true')
if obj is None:
return None
objs = admin_client.list(type, uuid=obj.uuid)
if len(objs) == 0:
return None
return objs[0]
def create_and_activate(client, type, **kw):
obj = client.create(type, **kw)
obj = client.wait_success(obj)
if obj.state == 'inactive':
obj = client.wait_success(obj.activate())
assert obj.state == 'active'
return obj
def stop_running_sim_instances(admin_client):
to_stop = []
to_stop.extend(admin_client.list_instance(state='running', limit=1000))
to_stop.extend(admin_client.list_instance(state='starting', limit=1000))
for c in to_stop:
if c.hosts()[0].kind == 'sim':
nsps = c.networkServiceProviders()
if len(nsps) > 0 and nsps[0].uuid == 'nsp-test-nsp':
continue
try:
c.stop()
except:
pass
for state in ['active', 'reconnecting']:
for a in admin_client.list_agent(state=state, include='instances',
uri_like='delegate%'):
if not callable(a.instances):
for i in a.instances:
if i.state != 'running':
a.deactivate()
def one(method, *args, **kw):
ret = method(*args, **kw)
assert len(ret) == 1
return ret[0]
def process_instances(admin_client, obj, id=None, type=None):
if id is None:
id = get_plain_id(admin_client, obj)
if type is None:
type = obj.type
return admin_client.list_process_instance(resourceType=type, resourceId=id,
sort='startTime')
def auth_check(schema, id, access, props=None):
type = schema.types[id]
access_actual = set()
try:
if 'GET' in type.collectionMethods:
access_actual.add('r')
except AttributeError:
pass
try:
if 'GET' in type.resourceMethods:
access_actual.add('r')
except AttributeError:
pass
try:
if 'POST' in type.collectionMethods:
access_actual.add('c')
except AttributeError:
pass
try:
if 'DELETE' in type.resourceMethods:
access_actual.add('d')
except AttributeError:
pass
try:
if 'PUT' in type.resourceMethods:
access_actual.add('u')
except AttributeError:
pass
assert access_actual == set(access)
if props is None:
return 1
for i in ['name', 'description']:
if i not in props and i in type.resourceFields:
acl = set('r')
if 'c' in access_actual:
acl.add('c')
if 'u' in access_actual:
acl.add('u')
props[i] = ''.join(acl)
for i in ['created', 'removed', 'transitioning', 'transitioningProgress',
'transitioningMessage', 'id', 'uuid', 'kind', 'state']:
if i not in props and i in type.resourceFields:
props[i] = 'r'
prop = set(props.keys())
prop_actual = set(type.resourceFields.keys())
assert prop_actual == prop
for name, field in type.resourceFields.items():
assert name in props
prop = set(props[name])
prop_actual = set('r')
prop.add(name)
prop_actual.add(name)
if field.create and 'c' in access_actual:
prop_actual.add('c')
if field.update and 'u' in access_actual:
prop_actual.add('u')
assert prop_actual == prop
return 1
def wait_for(callback, timeout=DEFAULT_TIMEOUT):
start = time.time()
ret = callback()
while ret is None:
time.sleep(.5)
if time.time() - start > timeout:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def find_one(method, *args, **kw):
return find_count(1, method, *args, **kw)[0]
def find_count(count, method, *args, **kw):
ret = method(*args, **kw)
assert len(ret) == count
return ret
def create_sim_container(admin_client, sim_context, *args, **kw):
c = admin_client.create_container(*args,
imageUuid=sim_context['imageUuid'],
**kw)
c = admin_client.wait_success(c)
assert c.state == 'running'
return c
def create_agent_instance_nsp(admin_client, sim_context):
network = create_and_activate(admin_client, 'hostOnlyNetwork',
hostVnetUri='test:///',
dynamicCreateVnet=True)
create_and_activate(admin_client, 'subnet',
networkAddress='192.168.0.0',
networkId=network.id)
return create_and_activate(admin_client, 'agentInstanceProvider',
networkId=network.id,
agentInstanceImageUuid=sim_context['imageUuid'])
@pytest.fixture(scope='session')
def test_network(admin_client, sim_context):
network = create_type_by_uuid(admin_client, 'hostOnlyNetwork',
'nsp-test-network',
hostVnetUri='test:///',
dynamicCreateVnet=True)
create_type_by_uuid(admin_client, 'subnet',
'nsp-test-subnet',
networkAddress='192.168.0.0',
networkId=network.id)
nsp = create_type_by_uuid(admin_client, 'agentInstanceProvider',
'nsp-test-nsp',
networkId=network.id,
agentInstanceImageUuid='sim:test-nsp')
create_type_by_uuid(admin_client, 'portService',
'nsp-test-port-service',
networkId=network.id,
networkServiceProviderId=nsp.id)
for i in nsp.instances():
i = admin_client.wait_success(i)
if i.state != 'running':
admin_client.wait_success(i.start())
agent = admin_client.wait_success(i.agent())
if agent.state != 'active':
admin_client.wait_success(agent.activate())
return network
def resource_pool_items(admin_client, obj, type=None, qualifier=None):
id = get_plain_id(admin_client, obj)
if type is None:
type = obj.type
if qualifier is None:
return admin_client.list_resource_pool(ownerType=type,
ownerId=id)
else:
return admin_client.list_resource_pool(ownerType=type,
ownerId=id,
qualifier=qualifier)
@pytest.fixture(scope='session')
def network(admin_client):
network = create_type_by_uuid(admin_client, 'network', 'test_vm_network',
isPublic=True)
subnet = create_type_by_uuid(admin_client, 'subnet', 'test_vm_subnet',
isPublic=True,
networkId=network.id,
networkAddress='192.168.0.0',
cidrSize=24)
vnet = create_type_by_uuid(admin_client, 'vnet', 'test_vm_vnet',
networkId=network.id,
uri='fake://')
create_type_by_uuid(admin_client, 'subnetVnetMap', 'test_vm_vnet_map',
subnetId=subnet.id,
vnetId=vnet.id)
return network
@pytest.fixture(scope='session')
def subnet(admin_client, network):
subnets = network.subnets()
assert len(subnets) == 1
return subnets[0]
@pytest.fixture(scope='session')
def vnet(admin_client, subnet):
vnets = subnet.vnets()
assert len(vnets) == 1
return vnets[0]
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define statistical functions of a tensor
import numpy as np
from ..fluid.framework import Variable
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import core, in_dygraph_mode
from ..fluid import layers
from .search import where
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
from paddle import _C_ops
__all__ = []
def mean(x, axis=None, keepdim=False, name=None):
"""
Computes the mean of the input tensor's elements along ``axis``.
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform mean
calculations. ``axis`` should be int, list(int) or tuple(int). If
``axis`` is a list/tuple of dimension(s), mean is calculated along
all element(s) of ``axis`` . ``axis`` or element(s) of ``axis``
should be in range [-D, D), where D is the dimensions of ``x`` . If
``axis`` or element(s) of ``axis`` is less than 0, it works the
same way as :math:`axis + D` . If ``axis`` is None, mean is
calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of average along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.]],
[[13., 14., 15., 16.],
[17., 18., 19., 20.],
[21., 22., 23., 24.]]])
out1 = paddle.mean(x)
# [12.5]
out2 = paddle.mean(x, axis=-1)
# [[ 2.5 6.5 10.5]
# [14.5 18.5 22.5]]
out3 = paddle.mean(x, axis=-1, keepdim=True)
# [[[ 2.5]
# [ 6.5]
# [10.5]]
# [[14.5]
# [18.5]
# [22.5]]]
out4 = paddle.mean(x, axis=[0, 2])
# [ 8.5 12.5 16.5]
"""
if isinstance(axis, int):
axis = [axis]
reduce_all = True if axis is None \
or len(axis)==0 \
or len(axis) == len(x.shape) else False
if axis is None or len(axis) == 0:
axis = [0]
if in_dygraph_mode():
return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x/input',
['uint16', 'float16', 'float32', 'float64'],
'mean/reduce_mean')
check_type(axis, 'axis/dim', (int, list, tuple), 'mean/reduce_mean')
if isinstance(axis, (list, tuple)):
for item in axis:
check_type(item, 'elements of axis/dim', (int), 'mean/reduce_mean')
helper = LayerHelper('mean', **locals())
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
return out
def var(x, axis=None, unbiased=True, keepdim=False, name=None):
"""
Computes the variance of ``x`` along ``axis`` .
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform
variance calculations. ``axis`` should be int, list(int) or
tuple(int). If ``axis`` is a list/tuple of dimension(s), variance
is calculated along all element(s) of ``axis`` . ``axis`` or
element(s) of ``axis`` should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is less
than 0, it works the same way as :math:`axis + D` . If ``axis`` is
None, variance is calculated over all elements of ``x``. Default
is None.
unbiased (bool, optional): Whether to use the unbiased estimation. If
``unbiased`` is True, the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along ``axis`` , otherwise the divisor is :math:`N`. Default is True.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of variance along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.var(x)
# [2.66666667]
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var')
u = mean(x, axis, True, name)
out = paddle.sum((x - u)**2, axis, keepdim=keepdim, name=name)
n = paddle.cast(paddle.numel(x), x.dtype) \
/ paddle.cast(paddle.numel(out), x.dtype)
if unbiased:
one_const = paddle.ones([1], x.dtype)
n = where(n > one_const, n - 1., one_const)
out /= n
return out
def std(x, axis=None, unbiased=True, keepdim=False, name=None):
"""
Computes the standard-deviation of ``x`` along ``axis`` .
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform
standard-deviation calculations. ``axis`` should be int, list(int)
or tuple(int). If ``axis`` is a list/tuple of dimension(s),
standard-deviation is calculated along all element(s) of ``axis`` .
``axis`` or element(s) of ``axis`` should be in range [-D, D),
where D is the dimensions of ``x`` . If ``axis`` or element(s) of
``axis`` is less than 0, it works the same way as :math:`axis + D` .
If ``axis`` is None, standard-deviation is calculated over all
elements of ``x``. Default is None.
unbiased (bool, optional): Whether to use the unbiased estimation. If
``unbiased`` is True, the standard-deviation is calculated via the
unbiased estimator. If ``unbiased`` is True, the divisor used in
the computation is :math:`N - 1`, where :math:`N` represents the
number of elements along ``axis`` , otherwise the divisor is
:math:`N`. Default is True.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of standard-deviation along ``axis`` of ``x``, with the
same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.std(x)
# [1.63299316]
out2 = paddle.std(x, axis=1)
# [1. 2.081666]
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std')
out = var(**locals())
return paddle.sqrt(out)
def numel(x, name=None):
"""
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1] in static mode
or a scalar value in imperative mode
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
Returns:
Tensor: The number of elements for the input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32')
numel = paddle.numel(x) # 140
"""
if in_dygraph_mode():
return _C_ops.size(x)
if not isinstance(x, Variable):
raise TypeError("x must be a Tensor in numel")
helper = LayerHelper('numel', **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out})
return out
def median(x, axis=None, keepdim=False, name=None):
"""
Compute the median along the specified axis.
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
axis (int, optional): The axis along which to perform median calculations ``axis`` should be int.
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
If ``axis`` is None, median is calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of median along ``axis`` of ``x``. If data type of ``x`` is float64, data type of results will be float64, otherwise data type will be float32.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12).reshape([3, 4])
# x is [[0 , 1 , 2 , 3 ],
# [4 , 5 , 6 , 7 ],
# [8 , 9 , 10, 11]]
y1 = paddle.median(x)
# y1 is [5.5]
y2 = paddle.median(x, axis=0)
# y2 is [4., 5., 6., 7.]
y3 = paddle.median(x, axis=1)
# y3 is [1.5, 5.5, 9.5]
y4 = paddle.median(x, axis=0, keepdim=True)
# y4 is [[4., 5., 6., 7.]]
"""
if not isinstance(x, Variable):
raise TypeError("In median, the input x should be a Tensor.")
is_flatten = axis is None
dims = len(x.shape)
if is_flatten:
x = paddle.flatten(x)
axis = 0
else:
if not isinstance(axis, int) or not (axis < dims and axis >= -dims):
raise ValueError(
"In median, axis should be none or an integer in range [-rank(x), rank(x))."
)
if axis < 0:
axis += dims
sz = x.shape[axis]
kth = sz >> 1
tensor_topk, idx = paddle.topk(x, kth + 1, axis=axis, largest=False)
dtype = 'float64' if x.dtype == core.VarDesc.VarType.FP64 else 'float32'
if sz & 1 == 0:
out_tensor = paddle.slice(
tensor_topk, axes=[axis], starts=[kth - 1],
ends=[kth]) + paddle.slice(
tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1])
out_tensor = paddle.cast(out_tensor, dtype=dtype) / 2
else:
out_tensor = paddle.cast(
paddle.slice(
tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1]),
dtype=dtype)
if not keepdim or is_flatten:
if not is_flatten:
newshape = x.shape[:axis] + x.shape[axis + 1:]
elif not keepdim:
newshape = [1]
else:
newshape = [1] * dims
else:
newshape = out_tensor.shape
out_tensor = out_tensor.reshape(newshape, name=name)
return out_tensor
def quantile(x, q, axis=None, keepdim=False):
"""
Compute the quantile of the input along the specified axis.
Args:
x (Tensor): The input Tensor, it's data type can be float32, float64.
q (int|float|list): The q for calculate quantile, which should be in range [0, 1]. If q is a list,
each q will be calculated and the first dimension of output is same to the number of ``q`` .
axis (int|list, optional): The axis along which to calculate quantile. ``axis`` should be int or list of int.
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
If ``axis`` is a list, quantile is calculated over all elements of given axises.
If ``axis`` is None, quantile is calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of quantile along ``axis`` of ``x``. If data type of ``x`` is float64, data type of results will be float64, otherwise data type will be float32.
Examples:
.. code-block:: python
import paddle
x = paddle.randn((2,3))
#[[-1.28740597, 0.49533170, -1.00698614],
# [-1.11656201, -1.01010525, -2.23457789]])
y1 = paddle.quantile(x, q=0.5, axis=[0, 1])
# y1 = -1.06333363
y2 = paddle.quantile(x, q=0.5, axis=1)
# y2 = [-1.00698614, -1.11656201]
y3 = paddle.quantile(x, q=[0.3, 0.5], axis=1)
# y3 =[[-1.11915410, -1.56376839],
# [-1.00698614, -1.11656201]]
y4 = paddle.quantile(x, q=0.8, axis=1, keepdim=True)
# y4 = [[-0.10559537],
# [-1.05268800]])
"""
if not isinstance(x, Variable):
raise TypeError("input x should be a Tensor.")
dims = len(x.shape)
out_shape = x.shape
if axis is None:
x = paddle.flatten(x)
axis = 0
out_shape = [1] * dims
else:
if isinstance(axis, list):
if (len(axis) <= 0):
raise ValueError("axis should not be empty")
axis_src, axis_dst = [], []
for axis_single in axis:
if not isinstance(axis_single, int) or not (
axis_single < dims and axis_single >= -dims):
raise ValueError(
"Axis should be None, int, or a list, element should in range [-rank(x), rank(x))."
)
if axis_single < 0:
axis_single = axis_single + dims
axis_src.append(axis_single)
out_shape[axis_single] = 1
axis_dst = list(range(-len(axis), 0))
x = paddle.moveaxis(x, axis_src, axis_dst)
x = paddle.flatten(x, axis_dst[0], axis_dst[-1])
axis = axis_dst[0]
else:
if not isinstance(axis, int) or not (axis < dims and axis >= -dims):
raise ValueError(
"Axis should be None, int, or a list, element should in range [-rank(x), rank(x))."
)
if axis < 0:
axis += dims
out_shape[axis] = 1
indices = []
if isinstance(q, (int, float)):
if q < 0 or q > 1:
raise ValueError("q should be in range [0, 1]")
indices.append(q * (x.shape[axis] - 1))
elif isinstance(q, (list, tuple)):
if len(q) <= 0:
raise ValueError("q should not be empty")
for q_num in q:
if q_num < 0 or q_num > 1:
raise ValueError("q should be in range [0, 1]")
indices.append(q_num * (x.shape[axis] - 1))
else:
raise TypeError("Type of q should be int, float, list or tuple.")
indices = paddle.to_tensor(indices).astype(paddle.float32)
sorted_tensor = paddle.sort(x, axis)
indices_below = paddle.floor(indices).astype(paddle.int32)
indices_upper = paddle.ceil(indices).astype(paddle.int32)
outputs = []
def expand_dim(indices, sorted_tensor_shape, axis):
assert axis < len(list(sorted_tensor_shape))
expanded_shape = [1] * len(list(sorted_tensor_shape))
expanded_shape[axis] = len(indices)
expanded_shape = tuple(expanded_shape)
indices = indices.reshape(expanded_shape)
return indices
# TODO(chenjianye): replace the for-loop to directly take elements.
for i in range(len(indices)):
if (indices_upper[i] != indices_below[i]):
tensor_below = paddle.take_along_axis(
sorted_tensor,
expand_dim(indices_below[i], sorted_tensor.shape, axis), axis)
tensor_upper = paddle.take_along_axis(
sorted_tensor,
expand_dim(indices_upper[i], sorted_tensor.shape, axis), axis)
weights = (indices[i] - indices_below[i]).astype(x.dtype)
out = paddle.lerp(tensor_below, tensor_upper, weights)
else:
out = paddle.take_along_axis(
sorted_tensor,
expand_dim(indices_below[i], sorted_tensor.shape, axis), axis)
if not keepdim:
out = paddle.squeeze(out, axis=axis)
else:
out = out.reshape(out_shape)
outputs.append(out)
if isinstance(q, (list, tuple)):
return paddle.stack(outputs, 0)
else:
return outputs[0]
|
|
# Copyright 2008-2009 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import subprocess
from twisted.internet import protocol, reactor
from twisted.python import log
from twisted.trial import unittest
from nagcat.plugins import query_oracle
from nagcat.unittests.queries import QueryTestCase
from nagcat import errors
from coil.struct import Struct
try:
import cx_Oracle
from lxml import etree
except ImportError:
cx_Oracle = None
etree = None
class OracleBase(QueryTestCase):
if not cx_Oracle or not etree:
skip = "Missing cx_Oracle or lxml"
elif not ('ORA_DSN' in os.environ and
'ORA_USER' in os.environ and
'ORA_PASS' in os.environ):
skip = "Missing oracle credentials"
SQL_SETUP = ()
SQL_CLEAN = ()
QUERY_TYPE = "oracle_sql"
def setUp(self):
super(OracleBase, self).setUp()
self.config = {
'type': self.QUERY_TYPE,
'user': os.environ['ORA_USER'],
'password':os.environ['ORA_PASS'],
'dsn':os.environ['ORA_DSN']}
if self.SQL_SETUP:
self.execute(self.SQL_SETUP)
def tearDown(self):
if self.SQL_CLEAN:
self.execute(self.SQL_CLEAN)
def execute(self, sqlseq):
conn = cx_Oracle.Connection(user=self.config['user'],
password=self.config['password'],
dsn=self.config['dsn'],
threaded=True)
self.execute_in_connection(sqlseq, conn)
conn.close()
def execute_in_connection(self, sqlseq, conn):
cursor = conn.cursor()
for sql in sqlseq:
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError, ex:
raise Exception("%s: %s" % (ex, sql))
cursor.close()
def startQuery(self, **kwargs):
return super(OracleBase, self).startQuery(self.config, **kwargs)
def assertEqualsXML(self, result, expect):
# Parse the xml, strip white space, and convert back
# this allows us to compare if they are logically equal
parser = etree.XMLParser(remove_blank_text=True)
result = etree.tostring(etree.XML(result, parser))
expect = etree.tostring(etree.XML(expect, parser))
self.assertEquals(result, expect)
class SimpleTestCase(OracleBase):
def testSimple(self):
def check(result):
self.assertEqualsXML(result, (
'<queryresult><row>'
'<data type="NUMBER">1</data>'
'</row></queryresult>'))
d = self.startQuery(sql='select 1 as data from dual')
d.addCallback(check)
return d
def testBinds(self):
def check(result):
self.assertEqualsXML(result, (
'<queryresult><row>'
'<data type="NUMBER">1</data>'
'</row></queryresult>'))
d = self.startQuery(
sql='select :blah as data from dual',
binds=[1])
d.addCallback(check)
return d
def testParams1(self):
def check(result):
self.assertEqualsXML(result, (
'<queryresult><row>'
'<data type="NUMBER">2</data>'
'</row></queryresult>'))
d = self.startQuery(
sql='select :blah as data from dual',
parameters=[2])
d.addCallback(check)
return d
def testParams2(self):
def check(result):
self.assertEqualsXML(result, (
'<queryresult><row>'
'<data type="NUMBER">2</data>'
'</row></queryresult>'))
d = self.startQuery(
sql='select :blah as data from dual',
parameters=Struct({'blah': 2}))
d.addCallback(check)
return d
def testString(self):
def check(result):
self.assertEqualsXML(result, (
'<queryresult><row>'
'<data type="FIXED_CHAR">foo</data>'
'</row></queryresult>'))
d = self.startQuery(sql="select 'foo' as data from dual")
d.addCallback(check)
return d
def testBadQuery(self):
def check(result):
self.assertIsInstance(result, errors.Failure)
d = self.startQuery(sql='select 1')
d.addBoth(check)
return d
def testBadUser(self):
def check(result):
self.assertIsInstance(result, errors.Failure)
d = self.startQuery(sql='select 1 from dual', user='baduser')
d.addBoth(check)
return d
class DataTestCase(OracleBase):
SQL_SETUP = (
"create table test (a number, b varchar2(10))",
"insert into test values (1, 'aaa')",
"insert into test values (2, 'bbb')",
"insert into test values (3, 'ccc')",
"insert into test values (4, 'ddd')",
"insert into test values (5, 'eee')",
"commit")
SQL_CLEAN = ("drop table test", "commit")
def testSelectAll(self):
def check(result):
self.assertEqualsXML(result, """<queryresult>
<row><a type="NUMBER">1</a><b type="STRING">aaa</b></row>
<row><a type="NUMBER">2</a><b type="STRING">bbb</b></row>
<row><a type="NUMBER">3</a><b type="STRING">ccc</b></row>
<row><a type="NUMBER">4</a><b type="STRING">ddd</b></row>
<row><a type="NUMBER">5</a><b type="STRING">eee</b></row>
</queryresult>""")
d = self.startQuery(sql='select * from test')
d.addCallback(check)
return d
def testSelectCount(self):
def check(result):
self.assertEqualsXML(result, """<queryresult>
<row><count type="NUMBER">5</count></row>
</queryresult>""")
d = self.startQuery(sql='select count(*) from test')
d.addCallback(check)
return d
def testNonSelect(self):
# The result should be empty if we didn't actually select data
def check(result):
self.assertEqualsXML(result, "<queryresult></queryresult>")
d = self.startQuery(sql="insert into test values (0, 'xxx')")
d.addCallback(check)
return d
class TimeoutQueryTestCase(OracleBase):
SQL_SETUP = ("create table test (a number)", "commit")
SQL_CLEAN = ("drop table test", "commit")
def setUp(self):
super(TimeoutQueryTestCase, self).setUp()
self.locked_conn = cx_Oracle.Connection(
user=self.config['user'],
password=self.config['password'],
dsn=self.config['dsn'],
threaded=True)
self.execute_in_connection((
"lock table test in exclusive mode",
), self.locked_conn)
def tearDown(self):
self.locked_conn.close()
self.locked_conn = None
super(TimeoutQueryTestCase, self).tearDown()
def test_timeout(self):
def check(result):
self.assertIsInstance(result, errors.Failure)
self.assert_(str(result.value).startswith("Timeout"),
"Wrong error, got: %s" % result.value)
deferred = self.startQuery(
sql='lock table test in exclusive mode',
timeout=0.5)
deferred.addBoth(check)
return deferred
class DummyFactory(protocol.Factory):
protocol = protocol.Protocol
class TimeoutConnectionTestCase(QueryTestCase):
"""Test killing hanging TCP connections"""
if not cx_Oracle or not etree:
skip = "Missing cx_Oracle"
def setUp(self):
super(TimeoutConnectionTestCase, self).setUp()
# I assume this test isn't going to be run on an Oracle server...
self.server = reactor.listenTCP(1521, DummyFactory())
self.config = {
'type': 'oracle_sql',
'user': 'nobody',
'password': 'ponies',
'timeout': 0.5,
'dsn': 'localhost/blackhole'}
def tearDown(self):
return self.server.stopListening()
def test_timeout(self):
def check(result):
self.assertIsInstance(result, errors.Failure)
self.assert_(str(result.value).startswith("Timeout"),
"Wrong error, got: %s" % result.value)
d = self.startQuery(self.config)
d.addBoth(check)
return d
class PLSQLTestCase(OracleBase):
SQL_CLEAN = ("drop package pltest", "commit")
QUERY_TYPE = "oracle_plsql"
def setUp(self):
super(PLSQLTestCase, self).setUp()
path = "%s/%s" % (os.path.dirname(os.path.abspath(__file__)),
"oracle_package.sql")
# For some reason running this SQL via cx_Oracle doesn't
# work, but it does with sqlplus. I don't know why. :-(
input = open(path)
proc = subprocess.Popen(
["sqlplus", "-S", "-L", "%s/%s@%s" % (
self.config['user'],
self.config['password'],
self.config['dsn'])],
stdin=input,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
input.close()
out,bleh = proc.communicate()
for line in out.splitlines():
line = line.strip()
if line:
log.msg("[sqlplus] %s" % line)
assert proc.returncode == 0
def test_1(self):
def check(result):
self.assertEqualsXML(result,
"""<result>
<p_out type="NUMBER">1.0</p_out>
</result>""")
d = self.startQuery(procedure="pltest.one",
parameters=[['out', 'p_out', "number"]])
d.addCallback(check)
return d
def test_2(self):
def check(result):
self.assertEqualsXML(result,
"""<result>
<p_out type="NUMBER">3.0</p_out>
</result>""")
d = self.startQuery(procedure="pltest.two",
parameters=[['in', 'p_in', 7],
['out', 'p_out', 'number']])
d.addCallback(check)
return d
def test_3(self):
def check(result):
self.assertEqualsXML(result,
"""<result>
<p_out>
<row><level type="NUMBER">1</level></row>
<row><level type="NUMBER">2</level></row>
<row><level type="NUMBER">3</level></row>
<row><level type="NUMBER">4</level></row>
<row><level type="NUMBER">5</level></row>
<row><level type="NUMBER">6</level></row>
<row><level type="NUMBER">7</level></row>
<row><level type="NUMBER">8</level></row>
<row><level type="NUMBER">9</level></row>
<row><level type="NUMBER">10</level></row>
</p_out>
</result>""")
d = self.startQuery(procedure="pltest.three",
parameters=[['out', 'p_out', 'cursor']])
d.addCallback(check)
return d
def test_4(self):
def check(result):
self.assertEqualsXML(result,
"""<result>
<p_one>
<row><level type="NUMBER">1</level></row>
<row><level type="NUMBER">2</level></row>
<row><level type="NUMBER">3</level></row>
<row><level type="NUMBER">4</level></row>
<row><level type="NUMBER">5</level></row>
<row><level type="NUMBER">6</level></row>
<row><level type="NUMBER">7</level></row>
<row><level type="NUMBER">8</level></row>
<row><level type="NUMBER">9</level></row>
<row><level type="NUMBER">10</level></row>
</p_one>
<p_two>
<row><level type="NUMBER">1</level></row>
<row><level type="NUMBER">2</level></row>
<row><level type="NUMBER">3</level></row>
<row><level type="NUMBER">4</level></row>
<row><level type="NUMBER">5</level></row>
<row><level type="NUMBER">6</level></row>
<row><level type="NUMBER">7</level></row>
<row><level type="NUMBER">8</level></row>
<row><level type="NUMBER">9</level></row>
<row><level type="NUMBER">10</level></row>
</p_two>
</result>""")
d = self.startQuery(procedure="pltest.four",
parameters=[['out', 'p_one', 'cursor'],
['out', 'p_two', 'cursor']])
d.addCallback(check)
return d
def test_5(self):
def check(result):
# The current behavior of the conversion to XML is to
# represent NULL as an empty element. But what about
# NULL vs empty strings? Do we care?
self.assertEqualsXML(result,
"""<result>
<p_one type="NUMBER">1.0</p_one>
<p_two type="NUMBER"/>
</result>""")
d = self.startQuery(procedure="pltest.five",
parameters=[['out', 'p_one', 'number'],
['out', 'p_two', 'number']])
d.addCallback(check)
return d
class ForkItTestCase(unittest.TestCase):
"""Test my defer-to-subprocess type class"""
def testNone(self):
proc = query_oracle.ForkIt(1, lambda: None)
d = proc.getResult()
d.addBoth(self.assertIdentical, None)
return d
def testTrue(self):
proc = query_oracle.ForkIt(1, lambda: True)
d = proc.getResult()
d.addBoth(self.assertIdentical, True)
return d
def testAbort(self):
def check(result):
self.assertIsInstance(result, errors.Failure)
self.assertIsInstance(result.value, errors.TestUnknown)
self.assertIn("subprocess exited with no results",
str(result.value))
proc = query_oracle.ForkIt(1, os._exit, 0)
d = proc.getResult()
d.addBoth(check)
return d
testAbort.skip = "Flaky test :-/"
def testTimeout(self):
def check(result):
self.assertIsInstance(result, errors.Failure)
self.assertIsInstance(result.value, errors.TestCritical)
self.assertIn("Timeout", str(result.value))
proc = query_oracle.ForkIt(0.1, time.sleep, 10)
d = proc.getResult()
d.addBoth(check)
return d
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
from concurrent import futures
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple('_Service', ('servicer_methods', 'server',
'stub',))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService():
"""Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods()
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
"""Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
pass
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator():
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request():
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
def testIncompleteServicer(self):
service = _CreateIncompleteService()
request = request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNIMPLEMENTED)
def testUnaryCall(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
def testUnaryCallFutureCancelled(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
def testUnaryCallFutureFailed(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
def testStreamingOutputCall(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(expected_responses,
responses):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
def testStreamingOutputCallCancelled(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
def testStreamingOutputCallFailed(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
def testStreamingInputCall(self):
service = _CreateService()
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator())
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future.exception().code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
def testStreamingInputCallFutureFailed(self):
service = _CreateService()
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
def testFullDuplexCall(self):
service = _CreateService()
responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(expected_responses,
responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
def testFullDuplexCallCancelled(self):
service = _CreateService()
request_iterator = _full_duplex_request_iterator()
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.CANCELLED)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
def testHalfDuplexCall(self):
service = _CreateService()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(expected_responses,
responses):
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService()
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
# License: EPL
import os
import sys
import traceback
try:
xrange
except:
xrange = range
#===============================================================================
# Things that are dependent on having the pydevd debugger
#===============================================================================
def log_debug(msg):
from _pydev_bundle import pydev_log
pydev_log.debug(msg)
def log_error_once(msg):
from _pydev_bundle import pydev_log
pydev_log.error_once(msg)
pydev_src_dir = os.path.dirname(os.path.dirname(__file__))
def _get_pydevd_args():
new_args = []
for x in sys.original_argv:
new_args.append(x)
if x == '--file':
break
return new_args
def _get_python_c_args(host, port, indC, args):
return ("import sys; sys.path.append(r'%s'); import pydevd; "
"pydevd.settrace(host='%s', port=%s, suspend=False, trace_only_current_thread=False, patch_multiprocessing=True); "
"sys.original_argv = %s; %s"
) % (
pydev_src_dir,
host,
port,
_get_pydevd_args(),
args[indC + 1])
def _get_host_port():
import pydevd
host, port = pydevd.dispatch()
return host, port
def _is_managed_arg(arg):
if arg.endswith('pydevd.py'):
return True
return False
def _on_forked_process():
import pydevd
pydevd.threadingCurrentThread().__pydevd_main_thread = True
pydevd.settrace_forked()
def _on_set_trace_for_new_thread(global_debugger):
if global_debugger is not None:
global_debugger.SetTrace(global_debugger.trace_dispatch)
#===============================================================================
# Things related to monkey-patching
#===============================================================================
def is_python(path):
if path.endswith("'") or path.endswith('"'):
path = path[1:len(path) - 1]
filename = os.path.basename(path).lower()
for name in ['python', 'jython', 'pypy']:
if filename.find(name) != -1:
return True
return False
def remove_quotes_from_args(args):
new_args = []
for x in args:
if len(x) > 1 and x.startswith('"') and x.endswith('"'):
x = x[1:-1]
new_args.append(x)
return new_args
def quote_args(args):
if sys.platform == "win32":
quoted_args = []
for x in args:
if x.startswith('"') and x.endswith('"'):
quoted_args.append(x)
else:
if ' ' in x:
x = x.replace('"', '\\"')
quoted_args.append('"%s"' % x)
else:
quoted_args.append(x)
return quoted_args
else:
return args
def patch_args(args):
try:
log_debug("Patching args: %s"% str(args))
args = remove_quotes_from_args(args)
import sys
new_args = []
i = 0
if len(args) == 0:
return args
if is_python(args[0]):
try:
indC = args.index('-c')
except ValueError:
indC = -1
if indC != -1:
host, port = _get_host_port()
if port is not None:
new_args.extend(args)
new_args[indC + 1] = _get_python_c_args(host, port, indC, args)
return quote_args(new_args)
else:
# Check for Python ZIP Applications and don't patch the args for them.
# Assumes the first non `-<flag>` argument is what we need to check.
# There's probably a better way to determine this but it works for most cases.
continue_next = False
for i in range(1, len(args)):
if continue_next:
continue_next = False
continue
arg = args[i]
if arg.startswith('-'):
# Skip the next arg too if this flag expects a value.
continue_next = arg in ['-m', '-W', '-X']
continue
if arg.rsplit('.')[-1] in ['zip', 'pyz', 'pyzw']:
log_debug('Executing a PyZip, returning')
return args
break
new_args.append(args[0])
else:
log_debug("Process is not python, returning.")
return args
i = 1
# Original args should be something as:
# ['X:\\pysrc\\pydevd.py', '--multiprocess', '--print-in-debugger-startup',
# '--vm_type', 'python', '--client', '127.0.0.1', '--port', '56352', '--file', 'x:\\snippet1.py']
original = sys.original_argv[:]
while i < len(args):
if args[i] == '-m':
# Always insert at pos == 1 (i.e.: pydevd "--module" --multiprocess ...)
original.insert(1, '--module')
else:
if args[i].startswith('-'):
new_args.append(args[i])
else:
break
i += 1
# Note: undoing https://github.com/Elizaveta239/PyDev.Debugger/commit/053c9d6b1b455530bca267e7419a9f63bf51cddf
# (i >= len(args) instead of i < len(args))
# in practice it'd raise an exception here and would return original args, which is not what we want... providing
# a proper fix for https://youtrack.jetbrains.com/issue/PY-9767 elsewhere.
if i >= len(args) or _is_managed_arg(args[i]): # no need to add pydevd twice
return args
for x in original:
new_args.append(x)
if x == '--file':
break
while i < len(args):
new_args.append(args[i])
i += 1
return quote_args(new_args)
except:
traceback.print_exc()
return args
def str_to_args_windows(args):
# see http:#msdn.microsoft.com/en-us/library/a1y7w461.aspx
result = []
DEFAULT = 0
ARG = 1
IN_DOUBLE_QUOTE = 2
state = DEFAULT
backslashes = 0
buf = ''
args_len = len(args)
for i in xrange(args_len):
ch = args[i]
if (ch == '\\'):
backslashes += 1
continue
elif (backslashes != 0):
if ch == '"':
while backslashes >= 2:
backslashes -= 2
buf += '\\'
if (backslashes == 1):
if (state == DEFAULT):
state = ARG
buf += '"'
backslashes = 0
continue
# else fall through to switch
else:
# false alarm, treat passed backslashes literally...
if (state == DEFAULT):
state = ARG
while backslashes > 0:
backslashes -= 1
buf += '\\'
# fall through to switch
if ch in (' ', '\t'):
if (state == DEFAULT):
# skip
continue
elif (state == ARG):
state = DEFAULT
result.append(buf)
buf = ''
continue
if state in (DEFAULT, ARG):
if ch == '"':
state = IN_DOUBLE_QUOTE
else:
state = ARG
buf += ch
elif state == IN_DOUBLE_QUOTE:
if ch == '"':
if (i + 1 < args_len and args[i + 1] == '"'):
# Undocumented feature in Windows:
# Two consecutive double quotes inside a double-quoted argument are interpreted as
# a single double quote.
buf += '"'
i += 1
elif len(buf) == 0:
# empty string on Windows platform. Account for bug in constructor of
# JDK's java.lang.ProcessImpl.
result.append("\"\"")
state = DEFAULT
else:
state = ARG
else:
buf += ch
else:
raise RuntimeError('Illegal condition')
if len(buf) > 0 or state != DEFAULT:
result.append(buf)
return result
def patch_arg_str_win(arg_str):
args = str_to_args_windows(arg_str)
# Fix https://youtrack.jetbrains.com/issue/PY-9767 (args may be empty)
if not args or not is_python(args[0]):
return arg_str
arg_str = ' '.join(patch_args(args))
log_debug("New args: %s" % arg_str)
return arg_str
def monkey_patch_module(module, funcname, create_func):
if hasattr(module, funcname):
original_name = 'original_' + funcname
if not hasattr(module, original_name):
setattr(module, original_name, getattr(module, funcname))
setattr(module, funcname, create_func(original_name))
def monkey_patch_os(funcname, create_func):
monkey_patch_module(os, funcname, create_func)
def warn_multiproc():
log_error_once(
"pydev debugger: New process is launching (breakpoints won't work in the new process).\n"
"pydev debugger: To debug that process please enable 'Attach to subprocess automatically while debugging?' option in the debugger settings.\n")
def create_warn_multiproc(original_name):
def new_warn_multiproc(*args):
import os
warn_multiproc()
return getattr(os, original_name)(*args)
return new_warn_multiproc
def create_execl(original_name):
def new_execl(path, *args):
"""
os.execl(path, arg0, arg1, ...)
os.execle(path, arg0, arg1, ..., env)
os.execlp(file, arg0, arg1, ...)
os.execlpe(file, arg0, arg1, ..., env)
"""
import os
args = patch_args(args)
return getattr(os, original_name)(path, *args)
return new_execl
def create_execv(original_name):
def new_execv(path, args):
"""
os.execv(path, args)
os.execvp(file, args)
"""
import os
return getattr(os, original_name)(path, patch_args(args))
return new_execv
def create_execve(original_name):
"""
os.execve(path, args, env)
os.execvpe(file, args, env)
"""
def new_execve(path, args, env):
import os
return getattr(os, original_name)(path, patch_args(args), env)
return new_execve
def create_spawnl(original_name):
def new_spawnl(mode, path, *args):
"""
os.spawnl(mode, path, arg0, arg1, ...)
os.spawnlp(mode, file, arg0, arg1, ...)
"""
import os
args = patch_args(args)
return getattr(os, original_name)(mode, path, *args)
return new_spawnl
def create_spawnv(original_name):
def new_spawnv(mode, path, args):
"""
os.spawnv(mode, path, args)
os.spawnvp(mode, file, args)
"""
import os
return getattr(os, original_name)(mode, path, patch_args(args))
return new_spawnv
def create_spawnve(original_name):
"""
os.spawnve(mode, path, args, env)
os.spawnvpe(mode, file, args, env)
"""
def new_spawnve(mode, path, args, env):
import os
return getattr(os, original_name)(mode, path, patch_args(args), env)
return new_spawnve
def create_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_fork_exec(args, *other_args):
import _posixsubprocess # @UnresolvedImport
args = patch_args(args)
return getattr(_posixsubprocess, original_name)(args, *other_args)
return new_fork_exec
def create_warn_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_warn_fork_exec(*args):
try:
import _posixsubprocess
warn_multiproc()
return getattr(_posixsubprocess, original_name)(*args)
except:
pass
return new_warn_fork_exec
def create_CreateProcess(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(app_name, cmd_line, *args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
return getattr(_subprocess, original_name)(app_name, patch_arg_str_win(cmd_line), *args)
return new_CreateProcess
def create_CreateProcessWarnMultiproc(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(*args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
warn_multiproc()
return getattr(_subprocess, original_name)(*args)
return new_CreateProcess
def create_fork(original_name):
def new_fork():
import os
# A simple fork will result in a new python process
is_new_python_process = True
frame = sys._getframe()
while frame is not None:
if frame.f_code.co_name == '_execute_child' and 'subprocess' in frame.f_code.co_filename:
# If we're actually in subprocess.Popen creating a child, it may
# result in something which is not a Python process, (so, we
# don't want to connect with it in the forked version).
executable = frame.f_locals.get('executable')
if executable is not None:
is_new_python_process = False
if is_python(executable):
is_new_python_process = True
break
frame = frame.f_back
frame = None # Just make sure we don't hold on to it.
child_process = getattr(os, original_name)() # fork
if not child_process:
if is_new_python_process:
_on_forked_process()
return child_process
return new_fork
def patch_new_process_functions():
# os.execl(path, arg0, arg1, ...)
# os.execle(path, arg0, arg1, ..., env)
# os.execlp(file, arg0, arg1, ...)
# os.execlpe(file, arg0, arg1, ..., env)
# os.execv(path, args)
# os.execve(path, args, env)
# os.execvp(file, args)
# os.execvpe(file, args, env)
monkey_patch_os('execl', create_execl)
monkey_patch_os('execle', create_execl)
monkey_patch_os('execlp', create_execl)
monkey_patch_os('execlpe', create_execl)
monkey_patch_os('execv', create_execv)
monkey_patch_os('execve', create_execve)
monkey_patch_os('execvp', create_execv)
monkey_patch_os('execvpe', create_execve)
# os.spawnl(mode, path, ...)
# os.spawnle(mode, path, ..., env)
# os.spawnlp(mode, file, ...)
# os.spawnlpe(mode, file, ..., env)
# os.spawnv(mode, path, args)
# os.spawnve(mode, path, args, env)
# os.spawnvp(mode, file, args)
# os.spawnvpe(mode, file, args, env)
monkey_patch_os('spawnl', create_spawnl)
monkey_patch_os('spawnle', create_spawnl)
monkey_patch_os('spawnlp', create_spawnl)
monkey_patch_os('spawnlpe', create_spawnl)
monkey_patch_os('spawnv', create_spawnv)
monkey_patch_os('spawnve', create_spawnve)
monkey_patch_os('spawnvp', create_spawnv)
monkey_patch_os('spawnvpe', create_spawnve)
if sys.platform != 'win32':
monkey_patch_os('fork', create_fork)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_fork_exec)
except ImportError:
pass
else:
# Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcess)
def patch_new_process_functions_with_warning():
monkey_patch_os('execl', create_warn_multiproc)
monkey_patch_os('execle', create_warn_multiproc)
monkey_patch_os('execlp', create_warn_multiproc)
monkey_patch_os('execlpe', create_warn_multiproc)
monkey_patch_os('execv', create_warn_multiproc)
monkey_patch_os('execve', create_warn_multiproc)
monkey_patch_os('execvp', create_warn_multiproc)
monkey_patch_os('execvpe', create_warn_multiproc)
monkey_patch_os('spawnl', create_warn_multiproc)
monkey_patch_os('spawnle', create_warn_multiproc)
monkey_patch_os('spawnlp', create_warn_multiproc)
monkey_patch_os('spawnlpe', create_warn_multiproc)
monkey_patch_os('spawnv', create_warn_multiproc)
monkey_patch_os('spawnve', create_warn_multiproc)
monkey_patch_os('spawnvp', create_warn_multiproc)
monkey_patch_os('spawnvpe', create_warn_multiproc)
if sys.platform != 'win32':
monkey_patch_os('fork', create_warn_multiproc)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_warn_fork_exec)
except ImportError:
pass
else:
# Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcessWarnMultiproc)
class _NewThreadStartupWithTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
self.global_debugger = self.get_debugger()
def get_debugger(self):
from _pydevd_bundle.pydevd_comm import get_global_debugger
return get_global_debugger()
def __call__(self):
_on_set_trace_for_new_thread(self.global_debugger)
global_debugger = self.global_debugger
if global_debugger is not None and global_debugger.thread_analyser is not None:
# we can detect start_new_thread only here
try:
from pydevd_concurrency_analyser.pydevd_concurrency_logger import log_new_thread
log_new_thread(global_debugger)
except:
sys.stderr.write("Failed to detect new thread for visualization")
return self.original_func(*self.args, **self.kwargs)
class _NewThreadStartupWithoutTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.original_func(*self.args, **self.kwargs)
_UseNewThreadStartup = _NewThreadStartupWithTrace
def _get_threading_modules_to_patch():
threading_modules_to_patch = []
try:
import thread as _thread
except:
import _thread
threading_modules_to_patch.append(_thread)
return threading_modules_to_patch
threading_modules_to_patch = _get_threading_modules_to_patch()
def patch_thread_module(thread):
if getattr(thread, '_original_start_new_thread', None) is None:
_original_start_new_thread = thread._original_start_new_thread = thread.start_new_thread
else:
_original_start_new_thread = thread._original_start_new_thread
class ClassWithPydevStartNewThread:
def pydev_start_new_thread(self, function, args=(), kwargs={}):
'''
We need to replace the original thread.start_new_thread with this function so that threads started
through it and not through the threading module are properly traced.
'''
return _original_start_new_thread(_UseNewThreadStartup(function, args, kwargs), ())
# This is a hack for the situation where the thread.start_new_thread is declared inside a class, such as the one below
# class F(object):
# start_new_thread = thread.start_new_thread
#
# def start_it(self):
# self.start_new_thread(self.function, args, kwargs)
# So, if it's an already bound method, calling self.start_new_thread won't really receive a different 'self' -- it
# does work in the default case because in builtins self isn't passed either.
pydev_start_new_thread = ClassWithPydevStartNewThread().pydev_start_new_thread
try:
# We need to replace the original thread.start_new_thread with this function so that threads started through
# it and not through the threading module are properly traced.
thread.start_new_thread = pydev_start_new_thread
thread.start_new = pydev_start_new_thread
except:
pass
def patch_thread_modules():
for t in threading_modules_to_patch:
patch_thread_module(t)
def undo_patch_thread_modules():
for t in threading_modules_to_patch:
try:
t.start_new_thread = t._original_start_new_thread
except:
pass
try:
t.start_new = t._original_start_new_thread
except:
pass
def disable_trace_thread_modules():
'''
Can be used to temporarily stop tracing threads created with thread.start_new_thread.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithoutTrace
def enable_trace_thread_modules():
'''
Can be used to start tracing threads created with thread.start_new_thread again.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithTrace
def get_original_start_new_thread(threading_module):
try:
return threading_module._original_start_new_thread
except:
return threading_module.start_new_thread
|
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier ([email protected])
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .version import __version__
import os
import math, operator
import numpy
from .. import output_dir, __CHGDIR_ON_START__
if __CHGDIR_ON_START__:
CWD = output_dir
else:
CWD = os.getcwd()
from .InfixParser import MyInfixParser
InfixParser = MyInfixParser()
InfixParser.buildlexer()
InfixParser.buildparser(
debug=0,
debugfile='infix.dbg',
tabmodule='infix_tabmodule',
outputdir=output_dir,
)
InfixParser.setNameStr('self.', '()')
os.chdir(CWD)
class MapList(list):
def __init__(self, *args):
list.__init__(self, *args)
def asSet(self):
return set(self.__getslice__(0, self.__len__()))
class NewCoreBase(object):
__DEBUG__ = False
name = None
annotations = None
def getName(self):
return self.name
def setName(self, name):
self.name = name
def get(self, attr):
"""Return an attribute whose name is str(attr)"""
return self.__getattribute__(attr)
def getAnnotation(self):
"""Returns an annotation dictionary"""
if self.annotations == None:
self.annotations = {}
return self.annotations.copy()
def setAnnotation(self, key, value):
"""Set an annotation as a key:value pair"""
if self.annotations == None:
self.annotations = {}
self.annotations.update({key: value})
class NumberBase(NewCoreBase):
value = None
value_initial = None
def __call__(self):
return self.value
def getValue(self):
return self.value
def setValue(self, v):
self.value = v
class Compartment(NewCoreBase):
size = None
dimensions = None
Compartment = None
reactions = None
species = None
area = None
def __init__(self, name, compartment=None):
self.name = name
self.Compartment = compartment
self.reactions = []
self.species = []
def __call__(self):
return self.size
def setSize(self, size, dim):
self.size = size
assert dim in [0, 1, 2, 3], '\nOkeee! %s dimensions?' % dim
self.dimensions = dim
def setArea(self, area=None):
if area == None and self.dimensions == 2:
self.area = self.size
if self.__DEBUG__:
print('Setting reactive area to size for 2D compartment %s' % self.name)
elif area == None and self.dimensions == 3:
self.area = (113.09733552923255 * self.size ** 2.0) ** (0.33333333333333331)
if self.__DEBUG__:
print(
'Setting reactive area to surface area for 3D compartment %s (assuming a sphere geometry)'
% self.name
)
self.area = area
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def hasSpecies(self):
return MapList([s.name for s in self.species])
def addReaction(self, reaction):
if reaction.name not in self.hasReactions():
self.reactions.append(reaction)
self.__setattr__(reaction.name, reaction)
if self.__DEBUG__:
print('Adding reaction %s' % reaction.name)
def addSpecies(self, species):
if species.name not in self.hasSpecies():
self.species.append(species)
self.__setattr__(species.name, species)
if self.__DEBUG__:
print('Adding species %s' % species.name)
else:
if self.__DEBUG__:
print('Species %s already added' % species.name)
def getDimensions(self):
return self.dimensions
def getCompartment(self):
return self.Compartment
def hasCompartment(self):
if self.Compartment != None:
return True
else:
return False
def isVolume(self):
if self.dimensions == 3:
return True
else:
return False
def isArea(self):
if self.dimensions == 2:
return True
else:
return False
def isLength(self):
if self.dimensions == 1:
return True
else:
return False
def isPoint(self):
if self.dimensions == 0:
return True
else:
return False
class BaseUnit(NewCoreBase):
'''Base Unit can be of type: time, substance, volume'''
_types = ('time', 'substance', 'volume', 'area', 'length')
value = 1.0
type = None
def __init__(self, name, type):
self.name = name
assert type in self._types, '\nType must be one of: %s' % str(self._types)
self.type = type
def __call__(self):
return self.value
def getType(self):
return self.type
class SimpleUnit(NewCoreBase):
exponent = 1.0
scale = 0.0
multiplier = 1.0
baseunit = None
type = None
def __init__(self, baseunit, name, exp=1.0, scale=0.0, mult=1.0):
self.baseunit = baseunit
self.exponent = exp
self.scale = scale
self.multiplier = mult
self.name = name
self.type = baseunit.type
def __call__(self):
return (self.multiplier * self.baseunit() * 10 ** self.scale) ** self.exponent
def getType(self):
return self.type
class CompoundUnit(NewCoreBase):
units = None
_HAS_USERNAME = False
def __init__(self, name=None):
self.units = []
if name != None:
self.name = name
self._HAS_USERNAME = True
else:
self.name = ''
def __call__(self):
U = 1.0
for u in self.units:
U *= u()
return U
def addUnit(self, unit):
self.units.append(unit)
if not self._HAS_USERNAME:
self.name = '%s%s' % (self.name, unit.getName())
def getUnits(self):
return self.units
def hasUnits(self):
return MapList([u.getName() for u in self.units])
class Species(NumberBase):
subs = None
prods = None
mods = None
fixed = False
Compartment = None
__amount__ = False
def __init__(self, name, value):
self.setName(name)
self.value = value
self.value_initial = value
self.subs = []
self.prods = []
self.mods = []
def getCompartment(self):
return self.Compartment
def setCompartment(self, c):
self.Compartment = c
def hasCompartment(self):
if self.Compartment != None:
return True
else:
return False
def setSubstrate(self, reaction):
self.__setattr__(reaction.name, reaction)
self.subs.append(reaction)
def setProduct(self, reaction):
self.__setattr__(reaction.name, reaction)
self.prods.append(reaction)
def setModifier(self, reaction):
self.__setattr__(reaction.name, reaction)
self.mods.append(reaction)
def isSubstrateOf(self):
return MapList([r.name for r in self.subs])
def isProductOf(self):
return MapList([r.name for r in self.prods])
def isModifierOf(self):
return MapList([r.name for r in self.mods])
def isReagentOf(self):
return MapList(self.isSubstrateOf() + self.isProductOf())
def setAmount(self, b):
self.__amount__ = bool(b)
def isAmount(self):
return self.__amount__
class SpeciesAssignmentRule(Species):
formula = None
code_string = None
_names = None
_functions = None
type = 'assignment'
_TIME_ = None
def __init__(self, name, value):
Species.__init__(self, name, value)
def __call__(self):
exec(self.xcode)
return self.value
def addFormula(self, formula):
formula = formula.replace('self.', '')
self.formula = formula
InfixParser.setNameStr('self.', '()')
InfixParser.parse(formula)
self.code_string = 'self.value=%s' % InfixParser.output
self._names = InfixParser.names
self._functions = InfixParser.functions
self.xcode = compile(self.code_string, '<string>', 'exec')
def addModelAttr(self, obj):
self.__setattr__(obj.name, obj)
class Function(NewCoreBase):
formula = None
code_string = None
xcode = None
value = None
_names = None
args = None
_TIME_ = None
def __init__(self, name):
self.setName(name)
self.args = []
def __call__(self, *args):
for ar in range(len(args)):
self.__setattr__(self.args[ar], args[ar])
exec(self.xcode)
return self.value
def setArg(self, var, value=None):
self.__setattr__(var, value)
self.args.append(var)
def addFormula(self, formula):
formula = formula.replace('self.', '')
self.formula = formula
InfixParser.setNameStr('self.', '')
InfixParser.SymbolReplacements = {'_TIME_': '_TIME_()'}
InfixParser.parse(formula)
self._names = InfixParser.names
self.code_string = 'self.value=%s' % InfixParser.output
self.xcode = compile(self.code_string, '<string>', 'exec')
class Reaction(NewCoreBase):
modifiers = None
substrates = None
products = None
stoichiometry = None
multistoich = None
multistoich_enabled = False
parameters = None
functions = None
reversible = True
formula = None
code_string = None
rate = None
xcode = None
_names = None
_functions = None
_TIME_ = None
Compartment = None
def __call__(self):
exec(self.xcode)
return self.rate
def __init__(self, name):
self.setName(name)
self.modifiers = []
self.substrates = []
self.products = []
self.stoichiometry = {}
self.parameters = []
self.functions = []
self.multistoich = []
def addSubstrate(self, species):
self.__setattr__(species.name, species)
self.substrates.append(species)
def addProduct(self, species):
self.__setattr__(species.name, species)
self.products.append(species)
def addModifier(self, species):
self.__setattr__(species.name, species)
self.modifiers.append(species)
def addFormula(self, formula):
formula = formula.replace('self.', '')
self.formula = formula
InfixParser.setNameStr('self.', '()')
InfixParser.parse(formula)
self._names = InfixParser.names
self._functions = InfixParser.functions
self.code_string = 'self.rate=%s' % InfixParser.output
self.xcode = compile(self.code_string, '<string>', 'exec')
def addParameter(self, par):
self.__setattr__(par.name, par)
self.parameters.append(par)
def addFunction(self, func):
self.__setattr__(func.name, func)
self.functions.append(func)
def hasProducts(self, t=type):
return MapList([p.name for p in self.products])
def hasSubstrates(self):
return MapList([s.name for s in self.substrates])
def hasModifiers(self):
return MapList([m.name for m in self.modifiers])
def hasParameters(self):
return MapList([p.name for p in self.parameters])
def hasReagents(self):
return MapList(self.hasSubstrates() + self.hasProducts())
def setCompartment(self, compartment):
self.Compartment = compartment
def getCompartment(self):
return self.Compartment
def hasCompartment(self):
if self.Compartment != None:
return True
else:
return False
class Parameter(NumberBase):
association = None
def __init__(self, name, value):
self.name = name
self.value = value
self.value_initial = value
self.association = []
def setAssociation(self, reac):
self.association.append(reac)
self.__setattr__(reac.name, reac)
def isParameterOf(self):
return MapList([a.name for a in self.association])
class AssignmentRule(Parameter):
formula = None
code_string = None
_names = None
_functions = None
type = 'assignment'
_TIME_ = None
fixed = False # added so that assignment rules can modify fixed species
def __init__(self, name, value):
Parameter.__init__(self, name, value)
def __call__(self):
exec(self.xcode)
return self.value
def addFormula(self, formula):
formula = formula.replace('self.', '')
self.formula = formula
InfixParser.setNameStr('self.', '()')
InfixParser.parse(formula)
self.code_string = 'self.value=%s' % InfixParser.output
self._names = InfixParser.names
self._functions = InfixParser.functions
self.xcode = compile(self.code_string, '<string>', 'exec')
def addModelAttr(self, obj):
self.__setattr__(obj.name, obj)
class RateRule(NewCoreBase):
formula = None
rate = None
xcode = None
code_string = None
_names = None
_functions = None
compartment = None
def __init__(self, name, formula):
self.name = name
self.addFormula(formula)
def __call__(self):
exec(self.xcode)
return self.rate
def addFormula(self, formula):
formula = formula.replace('self.', '')
self.formula = formula.replace('()', '')
InfixParser.setNameStr('self.', '()')
InfixParser.parse(self.formula)
self.code_string = 'self.rate=%s' % InfixParser.output
self._names = InfixParser.names
self._functions = InfixParser.functions
self.xcode = compile(self.code_string, 'RateRule: %s' % self.name, 'exec')
def getFormula(self):
return self.formula
def addModelAttr(self, obj):
self.__setattr__(obj.name, obj)
class ODE(NewCoreBase):
sdot = None
value = None
coefficients = None
reactions = None
independent = None
ode_terms = None
formula = ''
formula_alt = ''
code_string = 'self.value='
code_string_alt = 'sdot='
def __init__(self, species, independent=True):
self.sdot = species
self.name = 'ODE_' + species.name
self.reactions = []
self.coefficients = []
self.ode_terms = []
self.independent = independent
def __call__(self):
exec(self.code_string)
return self.value
def addReaction(self, reaction, coefficient):
self.reactions.append(reaction)
self.coefficients.append(coefficient)
if coefficient > 0.0:
if coefficient == 1.0:
term = '+self.%s() ' % (reaction.name)
aterm = '+(%s) ' % (reaction.code_string.replace('self.rate=', ''))
fterm = '+%s' % (reaction.name)
afterm = '+ (%s) ' % (reaction.formula)
else:
term = '+%g*self.%s() ' % (abs(coefficient), reaction.name)
aterm = '+%g*(%s) ' % (
abs(coefficient),
reaction.code_string.replace('self.rate=', ''),
)
fterm = '+%g*%s' % (abs(coefficient), reaction.name)
afterm = '+ %g*(%s) ' % (abs(coefficient), reaction.formula)
else:
if coefficient == -1.0:
term = '-self.%s() ' % (reaction.name)
aterm = '-(%s) ' % (reaction.code_string.replace('self.rate=', ''))
fterm = '-%s' % (reaction.name)
afterm = '- (%s) ' % (reaction.formula)
else:
term = '-%g*self.%s() ' % (abs(coefficient), reaction.name)
aterm = '-%g*(%s) ' % (
abs(coefficient),
reaction.code_string.replace('self.rate=', ''),
)
fterm = '-%g*%s' % (abs(coefficient), reaction.name)
afterm = '- %g*(%s) ' % (abs(coefficient), reaction.formula)
self.ode_terms.append(term)
self.code_string += term
self.code_string_alt += aterm
self.formula += fterm
self.formula_alt += afterm
self.__setattr__(reaction.name, reaction)
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def getFormula(self):
return self.code_string
def getGlobalFormula(self):
return self.code_string_alt
class StructMatrix(NewCoreBase):
"""
This class is specifically designed to store structural matrix information
give it an array and row/col index permutations it can generate its own
row/col labels given the label src.
"""
array = None
ridx = None
cidx = None
row = None
col = None
def __init__(self, array, ridx, cidx, row=None, col=None):
"""
Instantiate with array and matching row/col index arrays, optional label arrays
"""
self.array = array
self.ridx = ridx
self.cidx = cidx
self.row = row
self.col = col
self.shape = array.shape
def __call__(self):
return self.array
def getRowsByIdx(self, *args):
"""Return the rows referenced by index (1,3,5)"""
return self.array.take(args, axis=0)
def getColsByIdx(self, *args):
"""Return the columns referenced by index (1,3,5)"""
return self.array.take(args, axis=1)
def setRow(self, src):
"""
Assuming that the row index array is a permutation (full/subset)
of a source label array by supplying that source to setRow it
maps the row labels to ridx and creates self.row (row label list)
"""
self.row = [src[r] for r in self.ridx]
def setCol(self, src):
"""
Assuming that the col index array is a permutation (full/subset)
of a source label array by supplying that src to setCol
maps the row labels to cidx and creates self.col (col label list)
"""
self.col = [src[c] for c in self.cidx]
def getRowsByName(self, *args):
"""Return the rows referenced by label ('s','x','d')"""
assert self.row != None, "\nI need row labels"
try:
return self.array.take([self.row.index(l) for l in args], axis=0)
except Exception as ex:
print(ex)
print("\nValid row labels are: %s" % self.row)
return None
def getColsByName(self, *args):
"""Return the columns referenced by label ('s','x','d')"""
assert self.col != None, "\nI need column labels"
try:
return self.array.take([self.col.index(l) for l in args], axis=1)
except Exception as ex:
print(ex)
print("Valid column labels are: %s" % self.col)
return None
def getLabels(self, axis='all'):
"""Return the matrix labels ([rows],[cols]) where axis='row'/'col'/'all'"""
if axis == 'row':
return self.row
elif axis == 'col':
return self.col
else:
return self.row, self.col
def getIndexes(self, axis='all'):
"""Return the matrix indexes ([rows],[cols]) where axis='row'/'col'/'all'"""
if axis == 'row':
return self.ridx
elif axis == 'col':
return self.cidx
else:
return self.ridx, self.cidx
def getByIdx(self, row, col):
assert row in self.ridx, '\n%s is an invalid index' % row
assert col in self.cidx, '\n%s is an invalid index' % col
return self.array[row, col]
def getByName(self, row, col):
assert row in self.row, '\n%s is an invalid name' % row
assert col in self.col, '\n%s is an invalid name' % col
return self.array[self.row.index(row), self.col.index(col)]
def setByIdx(self, row, col, val):
assert row in self.ridx, '\n%s is an invalid index' % row
assert col in self.cidx, '\n%s is an invalid index' % col
self.array[row, col] = val
def setByName(self, row, col, val):
assert row in self.row, '\n%s is an invalid name' % row
assert col in self.col, '\n%s is an invalid name' % col
self.array[self.row.index(row), self.col.index(col)] = val
def shape(self):
return self.array.shape
class EventAssignment(NumberBase):
variable = None
_names = None
formula = None
code_string = None
xcode = None
def __call__(self):
self.variable.value = self.value
if self.__DEBUG__:
print('\tAssigning %s = %s' % (self.variable.name, self.value))
return True
def __init__(self, name='None'):
self.setName(name)
def setVariable(self, var):
self.variable = var
def setFormula(self, formula):
self.formula = formula
InfixParser.setNameStr('self.', '()')
## InfixParser.SymbolReplacements = {'_TIME_':'_TIME_()'}
InfixParser.parse(formula)
self._names = InfixParser.names
self.code_string = 'self.value=%s' % InfixParser.output
self.xcode = compile(self.code_string, '<string>', 'exec')
if self.__DEBUG__:
'\t', self.name, self.code_string
def evaluateAssignment(self):
exec(self.xcode)
class Event(NewCoreBase):
trigger = None
delay = 0.0
formula = None
code_string = None
xcode = None
state0 = False
state = False
assignments = None
_TIME_ = None
_ASS_TIME_ = 0.0
_need_action = False
_names = None
_time_symbol = None
def __init__(self, name):
self.setName(name)
self.assignments = []
def __call__(self, time):
self._TIME_.set(time)
exec(self.xcode)
if self.state0 and not self.state:
self.state0 = self.state
if not self.state0 and self.state:
for ass in self.assignments:
ass.evaluateAssignment()
self.state0 = self.state
self._need_action = True
self._ASS_TIME_ = self._TIME_() + self.delay
if self.__DEBUG__:
print('event %s is evaluating at %s' % (self.name, time))
if self._need_action and self._TIME_() >= self._ASS_TIME_:
for ass in self.assignments:
ass()
if self.__DEBUG__:
print(
'event %s is assigning at %s (delay=%s)'
% (self.name, time, self.delay)
)
self._need_action = False
def setTrigger(self, formula, delay=0.0):
self.formula = formula
self.delay = delay
InfixParser.setNameStr('self.', '()')
## print self._time_symbol
if self._time_symbol != None:
InfixParser.SymbolReplacements = {self._time_symbol: '_TIME_'}
## self.formula = formula.replace(self._time_symbol, '_TIME_')
InfixParser.parse(formula)
self._names = InfixParser.names
self.code_string = 'self.state=%s' % InfixParser.output
if self._time_symbol != None:
InfixParser.setNameStr('', '')
InfixParser.SymbolReplacements = {self._time_symbol: '_TIME_'}
InfixParser.parse(formula)
self.formula = InfixParser.output
self.xcode = compile(self.code_string, '<string>', 'exec')
if self.__DEBUG__:
self.name, self.code_string
def setTriggerAttributes(self, core):
# TODO: experimental
for n in self._names:
self.__setattr__(n, core.__getattribute__(n))
def setAssignment(self, var, formula):
ass = EventAssignment(var.name)
ass.setVariable(var)
ass.setFormula(formula)
self.assignments.append(ass)
self.__setattr__('_' + var.name, ass)
class PieceWise(NewCoreBase):
"""
Generic piecewise class written by me!
- *args* a dictionary of piecewise information generated by the InfixParser
"""
name = None
value = None
formula = None
code_string = None
xcode = None
_names = None
_TIME_ = None
def __init__(self, pwd):
pwd = pwd.copy()
if pwd['other'] != None:
other = 'self.value = %s' % pwd.pop('other')
else:
other = 'pass'
pwd.pop('other')
InfixParser.setNameStr('self.', '')
InfixParser.SymbolReplacements = {'_TIME_': '_TIME_()'}
self._names = []
if len(list(pwd.keys())) == 1:
formula = pwd[0][0]
InfixParser.parse(formula)
for n in InfixParser.names:
if n not in self._names and n != '_TIME_()':
self._names.append(n)
formula = InfixParser.output
self.code_string = 'if %s:\n self.value = %s\nelse:\n %s' % (
formula,
pwd[0][1],
other,
)
self.formula = self.code_string.replace('self.', '')
else:
formula = pwd[0][0]
InfixParser.parse(formula)
for n in InfixParser.names:
if n not in self._names and n != '_TIME_()':
self._names.append(n)
formula = InfixParser.output
self.code_string = 'if %s:\n self.value = %s\n' % (formula, pwd[0][1])
pwd.pop(0)
for p in pwd:
formula = pwd[p][0]
InfixParser.SymbolReplacements = {'_TIME_': '_TIME_()'}
InfixParser.parse(formula)
for n in InfixParser.names:
if n not in self._names and n != '_TIME_()':
self._names.append(n)
formula = InfixParser.output
self.code_string += 'elif %s:\n self.value = %s\n' % (
formula,
pwd[p][1],
)
self.code_string += 'else:\n %s' % other
self.formula = self.code_string.replace('self.', '')
self.xcode = compile(self.code_string, 'PieceWise', 'exec')
def __call__(self):
exec(self.xcode)
return self.value
class Time(object):
value = None
name = '__TIME__'
def __init__(self, t=0):
self.value = t
def __call__(self):
return self.value
def set(self, t):
self.value = t
## def delay(*args):
## print 'delay() ignored'
## return 1.0
class NewCore(NewCoreBase):
__nDict__ = None
reactions = None
species = None
species_variable = None
__model__ = None
__InitDict__ = None
__not_inited__ = None
global_parameters = None
__parameter_store__ = None
forcing_functions = None
__rules__ = None
__events__ = None
# new
__compartments__ = None
compartments = None
rate_rules = None
description = "Pysces Core2"
__uDict__ = None
stoichiometric_matrix = None
struct = None
ODEs = None
functions = None
_TIME_ = None
events = None
__sDict__ = None
__KeyWords__ = None
__piecewises__ = None
piecewise_functions = None
netStoich = None
def __init__(self, model, iValues=True, netStoich=True):
# setup core dictionaries
self.__nDict__ = model.__nDict__
self.__sDict__ = model.__sDict__
self.__KeyWords__ = model.__KeyWords__
if self.__KeyWords__['Modelname'] != None:
self.setName(self.__KeyWords__['Modelname'])
else:
self.setName('PySCeSModel')
if self.__KeyWords__['Description'] != None:
self.setDescription(self.__KeyWords__['Description'])
else:
self.setDescription('PySCeSModel')
self.__model__ = model
self.__InitDict__ = model.__InitDict__
if not iValues:
if self.__DEBUG__:
print(self.__InitDict__)
for k in list(self.__InitDict__.keys()):
self.__InitDict__[k] = getattr(self.__model__, k)
for c in model.__compartments__:
model.__compartments__[c]['size'] = getattr(self.__model__, c)
self.netStoich = netStoich
self.global_parameters = []
self.__parameter_store__ = []
self.__not_inited__ = []
self.forcing_functions = []
self.__rules__ = model.__rules__
self.__uDict__ = model.__uDict__
self.__piecewises__ = model.__piecewises__
InfixParser.__pwcntr__ = 0
# start building objects
self.__compartments__ = model.__compartments__
self.addCompartments()
self._TIME_ = Time()
self.addPieceWiseFunctions() # this adds any piecewise functions
self.addSpecies()
# the order is important from here as eg functions can occur in rate equations
try:
self.__functions__ = model.__functions__
except:
self.__functions__ = {}
if self.__DEBUG__:
print('No functions')
self.functions = []
self.addFunctions()
self.addReactions()
self.generateMappings()
self.setAssignmentRules()
self.setRateRules()
# add event support
self.__events__ = self.__model__.__eDict__
self.events = []
self.addEvents()
self.addPieceWiseFunctions(update=True) # this updates their attributes
## # get rid of _TIME_ in not intited
## if '_TIME_' in self.__not_inited__:
## self.__not_inited__.pop(self.__not_inited__.index('_TIME_'))
if len(self.__not_inited__) > 0:
print(
"\nWARNING: Uninitialised parameters: %s (value set to zero)"
% self.__not_inited__
)
def __cleanString__(self, s):
s = s.lstrip()
s = s.rstrip()
return s
## def parseForcingFunctions(self):
## self.__rules__ = {}
## try:
## ff = self.__function_forced_str__.split('\n')
## for f in ff:
## if f != '':
## f = f.split('=')
## f[0] = f[0].replace('self.','')
## f[1] = f[1].replace('self.','')
## self.__rules__.setdefault(self.__cleanString__(f[0]), self.__cleanString__(f[1]))
## except Exception, ex:
## pass
# print 'No forcing functions (%s).' % ex
def setDescription(self, txt):
self.description = str(txt)
def getDescription(self):
return str(self.description)
def setGlobalUnits(self, **kwargs):
for un in list(kwargs.keys()):
self.__uDict__[un] = (kwargs[un][0], kwargs[un][1])
if self.__DEBUG__:
print(
"Modified \"%s\" to be %i*%s*10**%i"
% (un, kwargs[un][0], un, kwargs[un][1])
)
def getGlobalUnits(self):
return self.__uDict__
def addPieceWiseFunctions(self, update=False):
if not update:
self.piecewise_functions = []
for pw in list(self.__piecewises__.keys()):
if self.__DEBUG__:
print('Info: adding piecewise function:%s' % pw)
P = PieceWise(self.__piecewises__[pw])
P.setName(pw)
P.__setattr__('_TIME_', self.__getattribute__('_TIME_'))
self.piecewise_functions.append(P)
self.__setattr__(pw, P)
else:
for pw in self.piecewise_functions:
for a in pw._names:
pw.__setattr__(a, self.__getattribute__(a))
def addOneCompartment(self, name, size, dimensions, compartment=None, area=None):
C = Compartment(name, compartment)
C.setSize(size, dimensions)
## C.setArea(area)
self.compartments.append(C)
self.__setattr__(name, C)
def addCompartments(self):
self.compartments = []
for C in self.__compartments__:
c2 = self.__compartments__[C]
if self.__DEBUG__:
print('Adding compartment %s' % c2['name'])
self.addOneCompartment(
c2['name'],
c2['size'],
c2['dimensions'],
compartment=c2['compartment'],
area=None,
)
def addOneSpecies(
self, species, value, fix=False, comp=None, amount=False, fullName=None
):
s = Species(species, value)
## if comp != None:
s.setCompartment(comp)
s.setAmount(amount)
s.setAnnotation('sbml_name', fullName)
if fix:
s.fixed = True
self.__setattr__(species, s)
self.species.append(s)
if not fix:
self.species_variable.append(s)
if comp != None:
comp.addSpecies(s)
def addSpecies(self):
self.species = []
self.species_variable = []
for s in self.__sDict__:
## print s
## print self.__sDict__[s]
name = self.__sDict__[s]['name']
if s in self.__InitDict__:
val = self.__InitDict__[s]
else:
val = 0.0
## print val
fix = self.__sDict__[s]['fixed']
if self.__sDict__[s]['compartment'] != None:
comp = self.__getattribute__(self.__sDict__[s]['compartment'])
else:
comp = None
amount = self.__sDict__[s]['isamount']
fullName = None
if 'fullName' in self.__sDict__[s]:
fullName = self.__sDict__[s]['fullName']
self.addOneSpecies(
name, val, fix=fix, comp=comp, amount=amount, fullName=fullName
)
def addOneFunction(self, name, args, formula):
func = Function(name)
# TODO: make better
setattr(func, '_TIME_', self._TIME_)
for a in args:
func.setArg(a)
func.addFormula(formula)
self.functions.append(func)
self.__setattr__(name, func)
def addFunctions(self):
for f in list(self.__functions__.keys()):
self.addOneFunction(
f, self.__functions__[f]['args'], self.__functions__[f]['formula']
)
def addOneReaction(self, rDict):
r = Reaction(rDict['name'])
if rDict['compartment'] != None:
C = self.__getattribute__(rDict['compartment'])
r.setCompartment(C)
C.addReaction(r)
fullName = None
if 'fullName' in rDict:
r.setAnnotation('sbml_name', rDict['fullName'])
# add dummy reaction kinetic law
# TODO: make better
setattr(r, '_TIME_', self._TIME_)
if rDict['RateEq'] != None:
r.addFormula(rDict['RateEq'].replace('self.', ''))
else:
r.addFormula('J')
self.addParameter('J')
if rDict['Type'] == 'Irrev':
r.reversible = False
# now we can add formulas that occured in the rate equation
if len(r._functions) > 0:
for func in r._functions:
try:
r.addFunction(self.__getattribute__(func))
except Exception as ex:
print(ex)
print('\nHave you added the function objects yet (addFunctions())')
# fxnames = self.hasFixedSpecies()
processed_parameter = []
# where parameters are defined `locally' per reaction
for p in rDict['Params']:
p = p.replace('self.', '')
if p not in self.hasGlobalParameters() and not (
p in self.hasFixedSpecies() or p in self.__compartments__
):
if self.__DEBUG__:
print("Adding parameter %s from networkdict" % p)
self.addParameter(p)
par = self.__getattribute__(p)
par.setAssociation(r)
r.addParameter(par)
processed_parameter.append(p)
elif not (p in self.hasFixedSpecies() or p in self.__compartments__):
if self.__DEBUG__:
print("Updating parameter %s from networkdict" % p)
pidx = self.hasGlobalParameters().index(p)
self.global_parameters[pidx].setAssociation(r)
r.addParameter(self.global_parameters[pidx])
processed_parameter.append(p)
# print self.hasGlobalParameters()
# where parameters are not `locally' defined and are extracted from Req (ie from SBML)
for p in r._names:
p = p.replace('self.', '')
if p == '_TIME_':
pass
elif p in [pw.name for pw in self.piecewise_functions]:
pass
elif p in self.hasCompartments() and p not in processed_parameter:
C = self.__getattribute__(p)
C.addReaction(r)
# TODO: this will work until isParameterOf is called on a compartment object
r.addParameter(C)
# dirty alternative
# setattr(r, C.name, C)
processed_parameter.append(p)
elif (
p not in processed_parameter
and p not in self.hasGlobalParameters()
and p not in self.hasSpecies()
):
if self.__DEBUG__:
print("Adding parameter %s from global" % p)
self.addParameter(p)
par = self.__getattribute__(p)
par.setAssociation(r)
r.addParameter(par)
processed_parameter.append(p)
elif p not in processed_parameter and p not in self.hasSpecies():
if self.__DEBUG__:
print("Updating parameter %s from global" % p)
pidx = self.hasGlobalParameters().index(p)
self.global_parameters[pidx].setAssociation(r)
r.addParameter(self.global_parameters[pidx])
processed_parameter.append(p)
self.__setattr__(rDict['name'], r)
self.reactions.append(r)
def addParameter(self, name):
if name not in self.__piecewises__:
if name in self.__InitDict__:
par = Parameter(name, self.__InitDict__[name])
else:
par = Parameter(name, 0.0)
if name not in self.__not_inited__:
self.__not_inited__.append(name)
self.global_parameters.append(par)
self.__setattr__(name, par)
def addReactions(self):
self.reactions = []
for r in self.__model__.reactions:
self.addOneReaction(self.__nDict__[r])
non_parameters = (
self.hasGlobalParameters() + self.hasSpecies() + self.hasFixedSpecies()
)
for k in list(self.__InitDict__.keys()):
if k not in non_parameters:
if self.__DEBUG__:
print('Adding new parameter:', k)
self.addParameter(k)
def replaceParameterWithRule(self, ar):
par = self.__getattribute__(ar.name)
for r in par.association:
ar.setAssociation(r)
setattr(r, ar.name, ar)
r.parameters[r.hasParameters().index(ar.name)] = ar
self.global_parameters[self.hasGlobalParameters().index(ar.name)] = ar
self.__setattr__(ar.name, ar)
def replaceFixedSpeciesWithRule(self, ar):
fs = self.__getattribute__(ar.name)
ar.fixed = fs.fixed
for r in fs.subs:
ar.setSubstrate(r)
setattr(r, ar.name, ar)
r.substrates[r.hasSubstrates().index(ar.name)] = ar
for r in fs.prods:
ar.setProduct(r)
setattr(r, ar.name, ar)
r.products[r.hasProducts().index(ar.name)] = ar
for r in fs.mods:
ar.setModifier(r)
setattr(r, ar.name, ar)
r.modifiers[r.hasModifiers().index(ar.name)] = ar
self.species[self.hasSpecies().index(ar.name)] = ar
self.__setattr__(ar.name, ar)
def replaceSpeciesWithRule(self, ar):
fs = self.__getattribute__(ar.name)
for r in fs.subs:
ar.setSubstrate(r)
setattr(r, ar.name, ar)
r.substrates[r.hasSubstrates().index(ar.name)] = ar
for r in fs.prods:
ar.setProduct(r)
setattr(r, ar.name, ar)
r.products[r.hasProducts().index(ar.name)] = ar
for r in fs.mods:
ar.setModifier(r)
setattr(r, ar.name, ar)
r.modifiers[r.hasModifiers().index(ar.name)] = ar
self.species[self.hasSpecies().index(ar.name)] = ar
self.species_variable[self.hasVariableSpecies().index(ar.name)] = ar
self.__setattr__(ar.name, ar)
def setAssignmentRules(self):
aps = [
self.__rules__[ar]['name']
for ar in self.__rules__
if self.__rules__[ar]['type'] == 'assignment'
]
## for p in self.global_parameters + [self.get(fs) for fs in self.hasFixedSpecies()]:
for p in self.global_parameters + self.species:
# print p.name
if p.name in aps:
if self.__DEBUG__:
print(
'Assigning: %s = %s'
% (p.name, self.__rules__[p.name]['formula'])
)
p2 = None
# TODO: make better
if p.name in self.hasGlobalParameters():
p2 = AssignmentRule(p.name, self.__InitDict__[p.name])
setattr(p2, '_TIME_', self._TIME_)
self.replaceParameterWithRule(p2)
elif p.name in self.hasFixedSpecies():
p2 = SpeciesAssignmentRule(p.name, self.__InitDict__[p.name])
p2.setCompartment(p.getCompartment())
setattr(p2, '_TIME_', self._TIME_)
self.replaceFixedSpeciesWithRule(p2)
elif p.name in self.hasVariableSpecies():
p2 = SpeciesAssignmentRule(p.name, self.__InitDict__[p.name])
p2.setCompartment(p.getCompartment())
setattr(p2, '_TIME_', self._TIME_)
self.replaceSpeciesWithRule(p2)
assert isinstance(p2, AssignmentRule) or isinstance(
p2, SpeciesAssignmentRule
), "\nHappy assertion error"
# print type(p2)
p2.addFormula(self.__rules__[p.name]['formula'])
## print p2._names
for n in p2._names + p2._functions:
p2.addModelAttr(self.__getattribute__(n))
## # setup initial values
## p2.value_initial = self.p2()
if p2.name in self.__not_inited__:
self.__not_inited__.pop(self.__not_inited__.index(p.name))
for p in self.global_parameters:
if p.name in self.hasAssignmentRules():
# TODO assignment rules need a list of properties
for ar in p._names:
if ar in self.hasAssignmentRules():
setattr(p, ar, self.__getattribute__(ar))
# TODO this is where things will go wrong if fs --> ar contains nested ar's
def setRateRules(self):
# TODO mayvbe split into two methods for now read from self.__rules__
# TODO add functions to rules
ars = [
self.__rules__[ar]['name']
for ar in self.__rules__
if self.__rules__[ar]['type'] == 'rate'
]
self.rate_rules = []
for rr in ars:
rrobj = RateRule(self.__rules__[rr]['name'], self.__rules__[rr]['formula'])
## print 'RR:', rrobj.name, rrobj._names, rrobj._functions
for symb in rrobj._names + rrobj._functions:
rrobj.addModelAttr(self.__getattribute__(symb))
self.rate_rules.append(rrobj)
# TODO investgiate this as it is problematic, the rate rule
# is not a model property as such more an ODE property
## self.__setattr__(rrobj.name, rrobj)
if self.__DEBUG__:
print(
'Adding RateRule %s with formula: %s' % (rrobj.name, rrobj.formula)
)
def addOneEvent(self, e):
"""Add a single event using an event dictionary """
# translate self.__events__[e] to e
ev = Event(e['name'])
ev._time_symbol = e['tsymb']
ev.setTrigger(e['trigger'], e['delay'])
# associate model attributes with event
# TODO: check that this still works
ev.setTriggerAttributes(self)
## for n in ev._names:
## setattr(ev, n, self.__getattribute__(n))
# for each assignment
for ass in e['assignments']:
ev.setAssignment(self.__getattribute__(ass), e['assignments'][ass])
assref = getattr(ev, '_' + ass) # don\t like this at all :-(
# associate model attributes with assignment
for n in assref._names:
setattr(assref, n, self.__getattribute__(n))
self.events.append(ev)
self.__setattr__(ev.name, ev)
setattr(ev, '_TIME_', self._TIME_)
def addEvents(self):
# TODO: check that you can change the trigger on the fly (might need a setAttr thing in event obj)
self.events = []
# for each event
for e in self.__events__:
self.addOneEvent(self.__events__[e])
def generateMappings(self):
## self.netStoich = False
for reac in self.reactions:
if self.netStoich:
for reag in self.__nDict__[reac.name]['Reagents']:
if self.__nDict__[reac.name]['Reagents'][reag] < 0.0:
reac.addSubstrate(
self.__getattribute__(reag.replace('self.', ''))
)
self.__getattribute__(reag.replace('self.', '')).setSubstrate(
self.__getattribute__(reac.name)
)
else:
reac.addProduct(
self.__getattribute__(reag.replace('self.', ''))
)
self.__getattribute__(reag.replace('self.', '')).setProduct(
self.__getattribute__(reac.name)
)
reac.stoichiometry.setdefault(
reag.replace('self.', ''),
self.__nDict__[reac.name]['Reagents'][reag],
)
else:
for reag in self.__nDict__[reac.name]['AllReagents']:
if reag[1] < 0.0:
reac.addSubstrate(
self.__getattribute__(reag[0].replace('self.', ''))
)
self.__getattribute__(
reag[0].replace('self.', '')
).setSubstrate(self.__getattribute__(reac.name))
else:
reac.addProduct(
self.__getattribute__(reag[0].replace('self.', ''))
)
self.__getattribute__(reag[0].replace('self.', '')).setProduct(
self.__getattribute__(reac.name)
)
reac.multistoich.append((reag[0].replace('self.', ''), reag[1]))
if reag[0].replace('self.', '') in reac.stoichiometry:
reac.multistoich_enabled = True
reac.stoichiometry.setdefault(reag[0].replace('self.', ''), reag[1])
for mod in self.__nDict__[reac.name]['Modifiers']:
reac.addModifier(self.__getattribute__(mod.replace('self.', '')))
self.__getattribute__(mod.replace('self.', '')).setModifier(
self.__getattribute__(reac.name)
)
## print 'I AM LEGEND'
## print reac.stoichiometry
## print reac.multistoich
## print 'reac.multistoich_enabled', reac.multistoich_enabled
## print self.__nDict__[reac.name]['Reagents']
## print self.__nDict__[reac.name]['AllReagents']
def setStoichiometricMatrix(self):
vspec = self.hasVariableSpecies()
react = self.hasReactions()
nm = numpy.zeros((len(vspec), len(react)), 'd')
for sp in vspec:
for r in self.get(sp).isReagentOf():
nm[vspec.index(sp)][react.index(r)] = self.get(r).stoichiometry[sp]
# this is if absolute stoichiometry value is used
## for r in self.get(sp).isSubstrateOf():
## nm[vspec.index(sp)][react.index(r)] = abs(self.get(r).stoichiometry[sp])
## for r in self.get(sp).isProductOf():
## nm[vspec.index(sp)][react.index(r)] = -abs(self.get(r).stoichiometry[sp])
self.stoichiometric_matrix = StructMatrix(
nm, list(range(len(vspec))), list(range(len(react)))
)
self.stoichiometric_matrix.setRow(vspec)
self.stoichiometric_matrix.setCol(react)
def addODEs(self):
self.ODEs = []
for varspec in self.stoichiometric_matrix.row:
if self.struct != None:
if varspec not in self.struct.Nr.row:
if self.__DEBUG__:
print('Creating dependent ODE_%s' % varspec)
ode = ODE(self.get(varspec), independent=False)
else:
if self.__DEBUG__:
print('Creating independent ODE_%s' % varspec)
ode = ODE(self.get(varspec), independent=True)
else:
if self.__DEBUG__:
print(
'Creating independent* ODE_%s (*assumed - no structural information available)'
% varspec
)
ode = ODE(self.get(varspec), independent=True)
mrow = self.stoichiometric_matrix.getRowsByName(varspec)
for e in range(len(mrow[0])):
if mrow[0, e] != 0.0:
print(
'Adding term: %s*%s'
% (mrow[0, e], self.stoichiometric_matrix.col[e])
)
ode.addReaction(
self.get(self.stoichiometric_matrix.col[e]), mrow[0, e]
)
self.__setattr__(ode.name, ode)
self.ODEs.append(ode)
self.__setattr__(
'xcode_' + ode.name, compile(ode.getGlobalFormula(), '<string>', 'exec')
)
def hasODEs(self):
return MapList([o.name for o in self.ODEs])
def evalODEs(self, odes):
return [v() for v in odes]
def evalXcode(self, ode):
exec(self.__getattribute__('xcode_' + ode.name))
return sdot
def hasFunctions(self):
return MapList([f.name for f in self.functions])
def hasReactions(self):
return MapList([r.name for r in self.reactions])
def hasSpecies(self):
return MapList([s.name for s in self.species])
def hasFixedSpecies(self):
return MapList([s.name for s in self.species if s.fixed])
def hasVariableSpecies(self):
return MapList([s.name for s in self.species if not s.fixed])
def findReactionsThatIncludeAllSpecifiedReagents(self, *args):
assert len(args) > 1, '\nNeed two or more species for this one!'
setlist = [self.__getattribute__(s).isReagentOf().asSet() for s in args]
isect = setlist[0]
for s in setlist:
isect.intersection_update(s)
return MapList(isect)
def hasGlobalParameters(self):
return MapList(p.name for p in self.global_parameters)
def hasAssignmentRules(self):
return MapList(
[
ar.name
for ar in self.global_parameters + self.species
if hasattr(ar, 'type') == 'assignemnt'
]
)
def hasAssignmentRules(self):
return MapList(
[
ar.name
for ar in self.global_parameters + self.species
if hasattr(ar, 'type') == 'rate'
]
)
def hasEvents(self):
return MapList(e.name for e in self.events)
def hasCompartments(self):
return MapList(c.name for c in self.compartments)
## if __psyco_active__:
## psyco.bind(NewCoreBase)
## psyco.bind(NumberBase)
## psyco.bind(Species)
## psyco.bind(Parameter)
## psyco.bind(AssignmentRule)
## psyco.bind(Reaction)
## psyco.bind(ODE)
## psyco.bind(NewCore)
|
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
Set of Classes for executing unix commands.
"""
import os
import platform
import socket
import sys
import signal
import psi.process
from gppylib.gplog import *
from gppylib.commands.base import *
logger = gplog.get_default_logger()
#---------------platforms--------------------
#global variable for our platform
SYSTEM="unknown"
SUNOS="sunos"
LINUX="linux"
DARWIN="darwin"
FREEBSD="freebsd"
platform_list = [SUNOS,LINUX,DARWIN,FREEBSD]
curr_platform = platform.uname()[0].lower()
GPHOME=os.environ.get('GPHOME', None)
#---------------command path--------------------
CMDPATH = ['/usr/kerberos/bin', '/usr/sfw/bin', '/opt/sfw/bin', '/bin', '/usr/local/bin',
'/usr/bin', '/sbin', '/usr/sbin', '/usr/ucb', '/sw/bin', '/opt/Navisphere/bin']
if GPHOME:
CMDPATH.append(GPHOME)
CMD_CACHE = {}
#----------------------------------
class CommandNotFoundException(Exception):
def __init__(self,cmd,paths):
self.cmd=cmd
self.paths=paths
def __str__(self):
return "Could not locate command: '%s' in this set of paths: %s" % (self.cmd,repr(self.paths))
def findCmdInPath(cmd, additionalPaths=[], printError=True):
global CMD_CACHE
if cmd not in CMD_CACHE:
# Search additional paths and don't add to cache.
for p in additionalPaths:
f = os.path.join(p, cmd)
if os.path.exists(f):
return f
for p in CMDPATH:
f = os.path.join(p, cmd)
if os.path.exists(f):
CMD_CACHE[cmd] = f
return f
if printError:
logger.critical('Command %s not found' % cmd)
search_path = CMDPATH[:]
search_path.extend(additionalPaths)
raise CommandNotFoundException(cmd,search_path)
else:
return CMD_CACHE[cmd]
#For now we'll leave some generic functions outside of the Platform framework
def getLocalHostname():
return socket.gethostname().split('.')[0]
def getUserName():
return os.environ.get('LOGNAME') or os.environ.get('USER')
def check_pid(pid):
""" Check For the existence of a unix pid. """
if pid == 0:
return False
try:
os.kill(int(pid), signal.SIG_DFL)
except OSError:
return False
else:
return True
def logandkill(pid, sig):
msgs = {
signal.SIGCONT: "Sending SIGSCONT to %d",
signal.SIGTERM: "Sending SIGTERM to %d (smart shutdown)",
signal.SIGINT: "Sending SIGINT to %d (fast shutdown)",
signal.SIGQUIT: "Sending SIGQUIT to %d (immediate shutdown)",
signal.SIGABRT: "Sending SIGABRT to %d"
}
logger.info(msgs[sig] % pid)
os.kill(pid, sig)
def kill_sequence(pid):
if not check_pid(pid): return
# first send SIGCONT in case the process is stopped
logandkill(pid, signal.SIGCONT)
# next try SIGTERM (smart shutdown)
logandkill(pid, signal.SIGTERM)
# give process a few seconds to exit
for i in range(0,3):
time.sleep(1)
if not check_pid(pid):
return
# next try SIGINT (fast shutdown)
logandkill(pid, signal.SIGINT)
# give process a few more seconds to exit
for i in range(0,3):
time.sleep(1)
if not check_pid(pid):
return
# next try SIGQUIT (immediate shutdown)
logandkill(pid, signal.SIGQUIT)
# give process a final few seconds to exit
for i in range(0,5):
time.sleep(1)
if not check_pid(pid):
return
# all else failed - try SIGABRT
logandkill(pid, signal.SIGABRT)
#---------------Platform Framework--------------------
""" The following platform framework is used to handle any differences between
the platform's we support. The GenericPlatform class is the base class
that a supported platform extends from and overrides any of the methods
as necessary.
TODO: should the platform stuff be broken out to separate module?
"""
class GenericPlatform():
def getName(self):
"unsupported"
def getDefaultLocale(self):
return 'en_US.utf-8'
def get_machine_arch_cmd(self):
return 'uname -i'
def getPingOnceCmd(self):
pass
def getDiskFreeCmd(self):
return findCmdInPath('df') + " -k"
def getTarCmd(self):
return findCmdInPath('tar')
def getCpCmd(self):
return findCmdInPath('cp')
def getSadcCmd(self, interval, outfilename):
return None
def getIfconfigCmd(self):
return findCmdInPath('ifconfig')
def getMountDevFirst(self):
return True
class LinuxPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "linux"
def getDefaultLocale(self):
return 'en_US.utf8'
def getDiskFreeCmd(self):
# -P is for POSIX formatting. Prevents error
# on lines that would wrap
return findCmdInPath('df') + " -Pk"
def getSadcCmd(self, interval, outfilename):
cmd = "/usr/lib64/sa/sadc -F -d " + str(interval) + " " + outfilename
return cmd
def getMountDevFirst(self):
return True
def getPing6(self):
return findCmdInPath('ping6')
class SolarisPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "sunos"
def getDefaultLocale(self):
return 'en_US.UTF-8'
def getDiskFreeCmd(self):
return findCmdInPath('df') + " -bk"
def getTarCmd(self):
return findCmdInPath('gtar')
def getSadcCmd(self, interval, outfilename):
cmd = "/usr/lib/sa/sadc " + str(interval) + " 100000 " + outfilename
return cmd
def getIfconfigCmd(self):
return findCmdInPath('ifconfig') + ' -a inet'
def getMountDevFirst(self):
return False
class DarwinPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "darwin"
def get_machine_arch_cmd(self):
return 'uname -m'
def getMountDevFirst(self):
return True
def getPing6(self):
return findCmdInPath('ping6')
class FreeBsdPlatform(GenericPlatform):
def __init__(self):
pass
def getName(self):
return "freebsd"
def get_machine_arch_cmd(self):
return 'uname -m'
def getMountDevFirst(self):
return True
""" if self.SYSTEM == 'sunos':
self.IFCONFIG_TXT='-a inet'
self.PS_TXT='ef'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.ZCAT='gzcat'
self.PG_METHOD='trust'
self.NOLINE_ECHO='/usr/bin/echo'
self.MAIL='/bin/mailx'
self.PING_TIME='1'
self.DF=findCmdInPath('df')
self.DU_TXT='-s'
elif self.SYSTEM == 'linux':
self.IFCONFIG_TXT=''
self.PS_TXT='ax'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.PG_METHOD='ident sameuser'
self.NOLINE_ECHO='%s -e' % self.ECHO
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='c'
elif self.SYSTEM == 'darwin':
self.IFCONFIG_TXT=''
self.PS_TXT='ax'
self.LIB_TYPE='DYLD_LIBRARY_PATH'
self.PG_METHOD='ident sameuser'
self.NOLINE_ECHO= self.ECHO
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='-c'
elif self.SYSTEM == 'freebsd':
self.IFCONFIG_TXT=''
self.PS_TXT='ax'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.PG_METHOD='ident sameuser'
self.NOLINE_ECHO='%s -e' % self.ECHO
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='-c'
"""
#---------------ping--------------------
class Ping(Command):
def __init__(self,name,hostToPing,ctxt=LOCAL,remoteHost=None,obj=None):
self.hostToPing=hostToPing
self.obj=obj
pingToUse = findCmdInPath('ping')
if curr_platform == LINUX or curr_platform == DARWIN:
# Get the family of the address we need to ping. If it's AF_INET6
# we must use ping6 to ping it.
addrinfo = socket.getaddrinfo(hostToPing, None)
if addrinfo and addrinfo[0] and addrinfo[0][0] == socket.AF_INET6:
pingToUse = SYSTEM.getPing6()
cmdStr = "%s -c 1 %s" % (pingToUse,hostToPing)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def ping_list(host_list):
for host in host_list:
yield Ping("ping",host,ctxt=LOCAL,remoteHost=None)
@staticmethod
def local(name,hostToPing):
p=Ping(name,hostToPing)
p.run(validateAfter=True)
@staticmethod
def remote(name,hostToPing,hostToPingFrom):
p=Ping(name,hostToPing,ctxt=REMOTE,remoteHost=hostToPingFrom)
p.run(validateAfter=True)
#---------------du--------------------
class DiskUsage(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="ls -l -R %s | %s ^- | %s '{t+=\$5;} END{print t}'" % (directory,findCmdInPath('grep'), findCmdInPath('awk'))
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def get_size(name,remote_host,directory):
duCmd=DiskUsage(name,directory,ctxt=REMOTE,remoteHost=remote_host)
duCmd.run(validateAfter=True)
return duCmd.get_bytes_used()
def get_bytes_used(self):
rawIn=self.results.stdout.split('\t')[0].strip()
#TODO: revisit this idea of parsing '' and making it a 0. seems dangerous.
if rawIn == '':
return 0
if rawIn[0] == 'd':
raise ExecutionError("du command could not find directory: cmd: %s"
"resulted in stdout: '%s' stderr: '%s'" %
(self.cmdStr, self.results.stdout,
self.results.stderr),
self)
else:
dirBytes=int(rawIn)
return dirBytes
#-------------df----------------------
class DiskFree(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="%s %s" % (SYSTEM.getDiskFreeCmd(),directory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def get_size(name,remote_host,directory):
dfCmd=DiskFree(name,directory,ctxt=REMOTE,remoteHost=remote_host)
dfCmd.run(validateAfter=True)
return dfCmd.get_bytes_free()
@staticmethod
def get_size_local(name, directory):
dfCmd=DiskFree(name,directory)
dfCmd.run(validateAfter=True)
return dfCmd.get_bytes_free()
@staticmethod
def get_disk_free_info_local(name, directory):
dfCmd = DiskFree(name,directory)
dfCmd.run(validateAfter=True)
return dfCmd.get_disk_free_output()
def get_disk_free_output(self):
'''expected output of the form:
Filesystem 512-blocks Used Available Capacity Mounted on
/dev/disk0s2 194699744 158681544 35506200 82% /
Returns data in list format:
['/dev/disk0s2', '194699744', '158681544', '35506200', '82%', '/']
'''
rawIn = self.results.stdout.split('\n')[1]
return rawIn.split();
def get_bytes_free(self):
disk_free = self.get_disk_free_output()
bytesFree = int(disk_free[3])*1024
return bytesFree
#-------------mkdir------------------
class MakeDirectory(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="%s -p %s" % (findCmdInPath('mkdir'),directory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,directory):
mkdirCmd=MakeDirectory(name,directory)
mkdirCmd.run(validateAfter=True)
@staticmethod
def remote(name,remote_host,directory):
mkdirCmd=MakeDirectory(name,directory,ctxt=REMOTE,remoteHost=remote_host)
mkdirCmd.run(validateAfter=True)
#-------------mv------------------
class MoveDirectory(Command):
def __init__(self,name,srcDirectory,dstDirectory,ctxt=LOCAL,remoteHost=None):
self.srcDirectory=srcDirectory
self.dstDirectory=dstDirectory
cmdStr="%s -f %s %s" % (findCmdInPath('mv'),srcDirectory,dstDirectory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------append------------------
class AppendTextToFile(Command):
def __init__(self,name,file,text,ctxt=LOCAL,remoteHost=None):
cmdStr="echo '%s' >> %s" % (text, file)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------inline perl replace------
class InlinePerlReplace(Command):
def __init__(self, name, fromStr, toStr, file, ctxt=LOCAL,remoteHost=None):
cmdStr="%s -pi.bak -e's/%s/%s/g' %s" % (findCmdInPath('perl'), fromStr, toStr, file)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------rmdir------------------
class RemoveDirectory(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="%s %s" % (findCmdInPath('rmdir'),directory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,directory):
rmCmd=RemoveDirectory(name,directory)
rmCmd.run(validateAfter=True)
@staticmethod
def remote(name,remote_host,directory):
rmCmd=RemoveDirectory(name,directory,ctxt=REMOTE,remoteHost=remote_host)
rmCmd.run(validateAfter=True)
#-------------rm -rf ------------------
class RemoveFiles(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="%s -rf %s" % (findCmdInPath('rm'),directory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,directory):
rmCmd=RemoveFiles(name,directory,ctxt=REMOTE,remoteHost=remote_host)
rmCmd.run(validateAfter=True)
@staticmethod
def local(name,directory):
rmCmd=RemoveFiles(name,directory)
rmCmd.run(validateAfter=True)
#-------------file and dir existence -------------
class PathIsDirectory(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="""python -c "import os; print os.path.isdir('%s')" """ % directory
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,directory):
cmd=PathIsDirectory(name,directory,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.isDir()
def isDir(self):
return bool (self.results.stdout.strip())
#--------------------------
class FileDirExists(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="""python -c "import os; print os.path.exists('%s')" """ % directory
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,directory):
cmd=FileDirExists(name,directory,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.filedir_exists()
def filedir_exists(self):
return self.results.stdout.strip().upper()=='TRUE'
class CreateDirIfNecessary(Command):
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="""python -c "import sys, os, errno;
try:
os.mkdir('%s')
except OSError, ex:
if ex.errno != errno.EEXIST:
raise
" """ % (directory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,directory):
cmd=CreateDirIfNecessary(name,directory,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
class DirectoryIsEmpty(Command):
def __init(self,name,directory,ctxt=LOCAL,remoteHost=None):
self.directory=directory
cmdStr="""python -c "import os; for root, dirs, files in os.walk('%s'); print (len(dirs) != 0 or len(files) != 0) """ % self.directory
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,directory):
cmd=DirectoryIsEmpty(name,directory,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.isEmpty()
def isEmpty(self):
return bool (self.results.stdout.strip())
#-------------scp------------------
# MPP-13617
def canonicalize(addr):
if ':' not in addr: return addr
if '[' in addr: return addr
return '[' + addr + ']'
class RemoteCopy(Command):
def __init__(self,name,srcDirectory,dstHost,dstDirectory,ctxt=LOCAL,remoteHost=None):
self.srcDirectory=srcDirectory
self.dstHost=dstHost
self.dstDirectory=dstDirectory
cmdStr="%s -o 'StrictHostKeyChecking no' -r %s %s:%s" % (findCmdInPath('scp'),srcDirectory,canonicalize(dstHost),dstDirectory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
class Scp(Command):
def __init__(self,name,srcFile, dstFile, srcHost=None, dstHost=None,recursive=False,ctxt=LOCAL,remoteHost=None):
cmdStr = findCmdInPath('scp') + " "
if recursive:
cmdStr = cmdStr + "-r "
if srcHost:
cmdStr = cmdStr + canonicalize(srcHost) + ":"
cmdStr = cmdStr + srcFile + " "
if dstHost:
cmdStr = cmdStr + canonicalize(dstHost) + ":"
cmdStr = cmdStr + dstFile
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------local copy------------------
class LocalDirCopy(Command):
def __init__(self,name,srcDirectory,dstDirectory):
# tar is much faster than cp for directories with lots of files
self.srcDirectory=srcDirectory
self.dstDirectory=dstDirectory
tarCmd = SYSTEM.getTarCmd()
cmdStr="%s -cf - -C %s . | %s -xf - -C %s" % (tarCmd,srcDirectory,tarCmd,dstDirectory)
Command.__init__(self,name,cmdStr,LOCAL, None)
#-------------local copy------------------
class LocalCopy(Command):
def __init__(self,name,srcFile,dstFile):
# tar is much faster than cp for directories with lots of files
cpCmd = SYSTEM.getCpCmd()
cmdStr="%s %s %s" % (cpCmd,srcFile,dstFile)
Command.__init__(self,name,cmdStr,LOCAL, None)
#------------ ssh + tar ------------------
#TODO: impl this.
#tar czf - srcDir/ | ssh user@dstHost tar xzf - -C dstDir
#-------------create tar------------------
class CreateTar(Command):
def __init__(self,name,srcDirectory,dstTarFile,ctxt=LOCAL,remoteHost=None,exclude=""):
self.srcDirectory=srcDirectory
self.dstTarFile=dstTarFile
tarCmd = SYSTEM.getTarCmd()
if exclude:
exclude = ' --exclude=' + exclude
cmdStr="%s cvPf %s %s -C %s ." % (tarCmd, self.dstTarFile, exclude,srcDirectory)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-------------extract tar---------------------
class ExtractTar(Command):
def __init__(self,name,srcTarFile,dstDirectory,ctxt=LOCAL,remoteHost=None):
self.srcTarFile=srcTarFile
self.dstDirectory=dstDirectory
tarCmd = SYSTEM.getTarCmd()
cmdStr="%s -C %s -xf %s" % (tarCmd, dstDirectory, srcTarFile)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#--------------kill ----------------------
class Kill(Command):
def __init__(self,name,pid,signal,ctxt=LOCAL,remoteHost=None):
self.pid=pid
self.signal=signal
cmdStr="%s -s %s %s" % (findCmdInPath('kill'), signal, pid)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,pid,signal):
cmd=Kill(name,pid,signal)
cmd.run(validateAfter=True)
@staticmethod
def remote(name,pid,signal,remote_host):
cmd=Kill(name,pid,signal,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
#--------------kill children--------------
class KillChildren(Command):
def __init__(self,name,pid,signal,ctxt=LOCAL,remoteHost=None):
self.pid=pid
self.signal=signal
cmdStr="%s -%s -P %s" % (findCmdInPath('pkill'), signal, pid)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,pid,signal):
cmd=KillChildren(name,pid,signal)
cmd.run(validateAfter=True)
@staticmethod
def remote(name,pid,signal,remote_host):
cmd=KillChildren(name,pid,signal,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
#--------------pkill----------------------
class Pkill(Command):
def __init__(self,name,processname,signal=signal.SIGTERM,ctxt=LOCAL,remoteHost=None):
cmdStr="%s -%s %s" % (findCmdInPath('pkill'), signal, processname)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#--------------sadc-----------------------
class Sadc(Command):
def __init__(self,name, outfilename, interval=5, background=False, ctxt=LOCAL, remoteHost=None):
cmdStr = SYSTEM.getSadcCmd(interval, outfilename)
if background:
cmdStr = "rm " + outfilename + "; nohup " + cmdStr + " < /dev/null > /dev/null 2>&1 &"
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name, outfilename, interval, background):
cmd=Sadc(name, outfilename, interval, background)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, outfilename, interval, background, remote_host):
cmd=Sadc(name, outfilename, interval, background, ctxt=REMOTE, remoteHost=remote_host)
cmd.run(validateAfter=True)
#--------------hostname ----------------------
class Hostname(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.remotehost=remoteHost
Command.__init__(self, name, findCmdInPath('hostname'), ctxt, remoteHost)
def get_hostname(self):
if not self.results:
raise Exception, 'Command not yet executed'
return self.results.stdout.strip()
class InterfaceAddrs(Command):
"""Returns list of interface IP Addresses. List does not include loopback."""
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
ifconfig = SYSTEM.getIfconfigCmd()
grep = findCmdInPath('grep')
awk = findCmdInPath('awk')
cut = findCmdInPath('cut')
cmdStr = '%s|%s "inet "|%s -v "127.0.0"|%s \'{print \$2}\'|%s -d: -f2' % (ifconfig, grep, grep, awk, cut)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def local(name):
cmd=InterfaceAddrs(name)
cmd.run(validateAfter=True)
return cmd.get_results().stdout.split()
@staticmethod
def remote(name, remoteHost):
cmd=InterfaceAddrs(name, ctxt=REMOTE, remoteHost=remoteHost)
cmd.run(validateAfter=True)
return cmd.get_results().stdout.split()
class FileContainsTerm(Command):
def __init__(self, name, search_term, file, ctxt=LOCAL, remoteHost=None):
self.search_term=search_term
self.file=file
cmdStr="%s -c %s %s" % (findCmdInPath('grep'),search_term,file)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def contains_term(self):
if not self.results:
raise Exception, 'Command not yet executed'
count=int(self.results.stdout.strip())
if count == 0:
return False
else:
return True
#--------------tcp port is active -----------------------
class PgPortIsActive(Command):
def __init__(self,name,port,file,ctxt=LOCAL, remoteHost=None):
self.port=port
cmdStr="%s -an 2>/dev/null | %s %s | %s '{print $NF}'" %\
(findCmdInPath('netstat'),findCmdInPath('grep'),file,findCmdInPath('awk'))
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def contains_port(self):
rows=self.results.stdout.strip().split()
if len(rows) == 0:
return False
for r in rows:
val = r.split('.')
netstatport = int(val[ len(val) - 1 ])
if netstatport == self.port:
return True
return False
@staticmethod
def local(name,file,port):
cmd=PgPortIsActive(name,port,file)
cmd.run(validateAfter=True)
return cmd.contains_port()
@staticmethod
def remote(name,file,port,remoteHost):
cmd=PgPortIsActive(name,port,file,ctxt=REMOTE,remoteHost=remoteHost)
cmd.run(validateAfter=True)
return cmd.contains_port()
#--------------chmod ----------------------
class Chmod(Command):
def __init__(self, name, dir, perm, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s %s %s' % (findCmdInPath('chmod'), perm, dir)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, dir, perm):
cmd=Chmod(name, dir, perm)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, hostname, dir, perm):
cmd=Chmod(name, dir, perm, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
#--------------echo ----------------------
class Echo(Command):
def __init__(self, name, echoString, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s "%s"' % (findCmdInPath('echo'), echoString)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, echoString, hostname):
cmd = Echo(name, echoString, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
#--------------touch ----------------------
class Touch(Command):
def __init__(self, name, file, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s %s' % (findCmdInPath('touch'), file)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, file, hostname):
cmd = Touch(name, file, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
#--------------get user id ----------------------
class UserId(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
idCmd = findCmdInPath('id')
trCmd = findCmdInPath('tr')
awkCmd = findCmdInPath('awk')
cmdStr = "%s|%s '(' ' '|%s ')' ' '|%s '{print $2}'" % (idCmd, trCmd, trCmd, awkCmd)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name):
cmd=UserId(name)
cmd.run(validateAfter=True)
return cmd.results.stdout.strip()
#-------------- test file for setuid bit ----------------------
class FileTestSuid(Command):
def __init__(self,name,filename,ctxt=LOCAL,remoteHost=None):
cmdStr="""python -c "import os; import stat; testRes = os.stat('%s'); print (testRes.st_mode & stat.S_ISUID) == stat.S_ISUID" """ % filename
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,filename):
cmd=FileTestSuid(name,filename,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.file_is_suid()
def file_is_suid(self):
return self.results.stdout.strip().upper()=='TRUE'
#-------------- get file owner ----------------------
class FileGetOwnerUid(Command):
def __init__(self,name,filename,ctxt=LOCAL,remoteHost=None):
cmdStr="""python -c "import os; import stat; testRes = os.stat('%s'); print testRes.st_uid " """ % filename
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
@staticmethod
def remote(name,remote_host,filename):
cmd=FileGetOwnerUid(name,filename,ctxt=REMOTE,remoteHost=remote_host)
cmd.run(validateAfter=True)
return cmd.file_uid()
def file_uid(self):
return int(self.results.stdout.strip().upper())
#--------------get list of desecendant processes -------------------
def getDescendentProcesses(pid):
''' return all process which are descendant from the given processid '''
children = list()
grandchildren = list()
for p in psi.process.ProcessTable().values():
if int(p.ppid) == int(pid):
children.append(int(p.pid))
# recursion
for child in children:
grandchildren.extend( getDescendentProcesses(child) )
return children + grandchildren
#--------------global variable initialization ----------------------
if curr_platform == SUNOS:
SYSTEM = SolarisPlatform()
elif curr_platform == LINUX:
SYSTEM = LinuxPlatform()
elif curr_platform == DARWIN:
SYSTEM = DarwinPlatform()
elif curr_platform == FREEBSD:
SYSTEM = FreeBsdPlatform()
else:
raise Exception("Platform %s is not supported. Supported platforms are:"\
" %s", SYSTEM, str(platform_list))
|
|
'''
preprocess data
1. read lines of JSON
2. sort by timestamp
3. filter by cmd and user
4. save as csv
'''
import argparse, json, csv, types, re, codecs, cStringIO
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
this_row = []
for s in row:
if type(s) == types.StringTypes:
s = s.encode('utf-8')
this_row.append(s)
self.writer.writerow(this_row)
#self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='mongoexport json output file')
args = parser.parse_args()
filename = args.filename
with open(filename) as f:
data_list = map(json.loads, f.readlines())
print 'data_list len', len(data_list)
data_list = sorted(data_list, cmp=lambda x,y: x['timestamp']['$date'] - y['timestamp']['$date'])
#split by cmd type
data_cmd = {}
for d in data_list:
cmd_type = d['cmd']
if not cmd_type in data_cmd:
data_cmd[cmd_type] = []
if 'msg' in d:
d['msg']['timestamp'] = d['timestamp']['$date']
d.pop('timestamp', None)
d.pop('_id', None)
if not 'user' in d['msg'] and d['user']:
d['msg']['user'] = d['user']
data_cmd[cmd_type].append(d)
#split by username
data_cmd.pop('login', None) #never mind about the login data
data_cmd.pop('subSlide/null', None) #never mind about the login data
data_cmd.pop('subSlide/101', None) #never mind about the login data
data_cmd.pop('subSlide/102', None) #never mind about the login data
data_cmd.pop('subSlide/103', None) #never mind about the login data
data_cmd.pop('subSlide/104', None) #never mind about the login data
data_cmd.pop('subSlide/105', None) #never mind about the login data
data_cmd_username = {}
for k in data_cmd.keys():
data_cmd_username[k] = {}
# data_cmd keys
#face-count | AskQuestion | subSlide/null | subSlide/101 | slideUpdate/101 | slideUpdate/undefined | login | AlertTeacher'
# face-count {u'msg': {'timestamp': 1457844403720, u'user': {u'username': u'dave', u'role': u'student'}, u'jpg': 8211, u'faces': [{u'y': 61, u'x': 111, u'height': 111, u'width': 111}]}, u'cmd': u'face-count', u'user': {u'username': u'dave', u'role': u'student'}}
# this is for face-count
# convert into a list for CSV
# timestamp, username, facecount, width, height, x, y ...
for d in data_cmd['face-count']:
# format: timestamp, username, facecount, width, height, x, y ...
if type(d['msg']['user']) is types.DictType:
try:
entry = [ d['cmd'], d['msg']['timestamp'], d['msg']['user']['username'] ]
except:
d['msg']['user'] = {'username': 'none'}
entry = [ d['cmd'], d['msg']['timestamp'], 'none' ]
face_list = sorted(d['msg']['faces'], cmp=lambda a,b: b['height']-a['height'])
entry.append(len(face_list))
face_keys = ['width','height', 'x','y']
for f in face_list:
for k in face_keys:
entry.append(f[k])
else:
print 'error user', d
username = d['msg']['user']['username']
if not username in data_cmd_username['face-count']:
data_cmd_username['face-count'][username] = []
data_cmd_username['face-count'][username].append(entry)
for username, value in data_cmd_username['face-count'].iteritems():
with open(username+'-face.csv', 'wb') as csvfile:
this_write = csv.writer(csvfile, delimiter=',', dialect='excel')
for d in value:
this_write.writerow(d)
#AskQuestion {u'msg': {'timestamp': 1457844540749, u'questionMsg': u'it is started sir :)', u'sender': u'erin', 'user': {u'username': u'erin', u'role': u'student'}}, u'cmd': u'AskQuestion', u'user': {u'username': u'erin', u'role': u'student'}}
for d in data_cmd['AskQuestion']:
# format: timestamp, username, questionLen, question
if not 'user' in d['msg']:
d['msg']['user'] = {'username': 'none'}
if type(d['msg']['user']) is types.DictType:
try:
entry = [ d['cmd'], d['msg']['timestamp'], d['msg']['user']['username'] ]
except:
d['msg']['user'] = {'username': 'none'}
entry = [ d['cmd'], d['msg']['timestamp'], 'none' ]
else:
print 'error user', d
entry = entry + [len(d['msg']['questionMsg']), re.sub('\n',' ',d['msg']['questionMsg'])]
username = d['msg']['user']['username']
if not username in data_cmd_username['AskQuestion']:
data_cmd_username['AskQuestion'][username] = []
data_cmd_username['AskQuestion'][username].append(entry)
for username, value in data_cmd_username['AskQuestion'].iteritems():
with codecs.open(username+'-AskQuestion.csv', 'w', 'utf-8') as csvfile:
this_write = csv.writer(csvfile, delimiter=',', dialect='excel')
#this_write = UnicodeWriter(csvfile)
for d in value:
this_row = []
for s in d:
if type(s) == types.StringTypes:
s = s.encode('utf-8', 'ignore')
this_row.append(s)
try:
this_write.writerow(this_row)
except:
print 'error csv', this_row
#AlertTeacher {u'msg': {'timestamp': 1457844751007, u'user': u'bob'}, u'cmd': u'AlertTeacher', u'user': {u'username': u'bob', u'role': u'student'}}
for d in data_cmd['AlertTeacher']:
# format: timestamp, username, 1
# 1 means there was an alert (the value must be 1)
if not 'user' in d['msg']:
d['msg']['user'] = 'none'
try:
entry = [ d['cmd'], d['msg']['timestamp'], d['msg']['user'], 1 ]
except:
d['msg']['user'] = {'username': 'none'}
entry = [ d['cmd'], d['msg']['timestamp'], 'none' ]
username = d['msg']['user']
if not username in data_cmd_username['AlertTeacher']:
data_cmd_username['AlertTeacher'][username] = []
data_cmd_username['AlertTeacher'][username].append(entry)
for username, value in data_cmd_username['AlertTeacher'].iteritems():
with open(username+'-AlertTeacher.csv', 'wb') as csvfile:
this_write = csv.writer(csvfile, delimiter=',', dialect='excel')
for d in value:
this_write.writerow(d)
#slideUpdate/101 {u'msg': {'timestamp': 1457844366261, u'slideDeckId': u'101', 'user': {u'username': u'alice', u'role': u'lecturer'}, u'slideNoLocal': 1}, u'cmd': u'slideUpdate/101', u'user': {u'username': u'alice', u'role': u'lecturer'}}
#slideUpdate/undefined {u'msg': {'timestamp': 1457844665400, u'slideDeckId': u'101', u'slideNoLocal': 3}, u'cmd': u'slideUpdate/undefined', u'user': None}
# have to handle these two situations: with and without slide number
slideUpdateList = []
for k,v in data_cmd.iteritems():
if k[:11] == 'slideUpdate':
slideUpdateList = slideUpdateList + v
data_cmd_username['slideUpdate'] = {}
for d in slideUpdateList:
# format: timestamp, username, slideDeckNo, SlideNo
if not 'user' in d['msg']:
d['msg']['user'] = {'username': 'none'}
entry = [ d['cmd'], d['msg']['timestamp'], d['msg']['user']['username'], d['msg']['slideDeckId'], d['msg']['slideNoLocal'] ]
username = d['msg']['user']['username']
if not username in data_cmd_username['slideUpdate']:
data_cmd_username['slideUpdate'][username] = []
data_cmd_username['slideUpdate'][username].append(entry)
for username, value in data_cmd_username['slideUpdate'].iteritems():
with open(username+'-slideUpdate.csv', 'wb') as csvfile:
this_write = csv.writer(csvfile, delimiter=',', dialect='excel')
for d in value:
this_write.writerow(d)
|
|
#!/usr/bin/env python
## category DNA-seq
## desc Base/variant caller
"""
Base/variant caller
Given a BAM file and a genomic reference, for each position covered in the
BAM file, show the reference base, and the number of A/T/C/G's and InDels.
This can also be used to call variants.
You can also optionally filter out all bases whose quality score is below a
threshold, bases that aren't covered by enough reads, bases that have no
variation compared to reference, or bases whose variation is too low.
The output is a tab-delimited file that contains the following for each base:
chromosome
position (1-based)
reference base
# reads that contain this base
Consensus call
Minor call
Average mappings (number of mappings each read covering this base has)
Alternative allele frequency (optional)
Entropy
# A calls
# C calls
# G calls
# T calls
# deletions
# gaps
# inserts
If -altfreq is applied, an alternative allele percentage is calculated.
minor - background
--------------------------------
(major - background) + (minor - background)
This is in lieu of using a more robust model, such as a Baysian model like
what was used in Li, et al 2009, Genome Res.
If -showstrand is applied, a minor strand percentage is also calculated.p This
is calculated as:
pct = (# reads with base on plus/ # reads with base total)
if pct > 0.5,
pct = 1-pct
Entropy is sum(a..t) { p log2 p } where p = freq(+pseudocount) / genomic freq.
pseudo count = genomic freq * sqrt(N)
We use the following genomic frequencies: A 0.3, C 0.2, G 0.2, T 0.3
"""
import os
import sys
import math
import collections
import datetime
from ngsutils.bam import bam_iter, bam_open
from ngsutils.bed import BedFile
from eta import ETA
import pysam
def usage():
print __doc__
print """
Usage: bamutils basecall {opts} in.bam {chrom:start-end}
Options:
-ref fname Include reference basecalls from this file
-qual val Minimum base-quality level to use in calculations
(numeric, Sanger scale) (default 0)
-count val Report only bases with this minimum number of read-coverage
(matches, inserts, deletions counted) (default 0)
-mask val The bitmask to use for filtering reads by flag
(default 1540 - see SAM format for details)
-minorpct pct Require a minor call to be at least [pct] of the total count
(0.0 -> 1.0, default 0.04)
-altfreq Calculate alternative allele frequency
-showgaps Report gaps/splice-junctions in RNA-seq data
-showstrand Show the minor-strand percentages for each call
(0-0.5 only shows the minor strand %)
-bed fname Only output positions that are present in this BED file
(*must* be sorted and reduced with the -nostrand option)
-variants Only output positions that differ from reference
"""
sys.exit(1)
__genomic_freq = {'A': 0.3, 'C': 0.2, 'G': 0.2, 'T': 0.3}
def calc_entropy(a, c, t, g):
counts = {'A': a, 'C': c, 'G': g, 'T': t}
N = counts['A'] + counts['C'] + counts['G'] + counts['T']
if N == 0:
return 0
N_sqrt = math.sqrt(N)
count_pseudo = {}
N_pseudo = 0
for base in 'ATCG':
count_pseudo[base] = counts[base] + (__genomic_freq[base] * N_sqrt)
N_pseudo += count_pseudo[base]
acc = 0
for base in 'ATCG':
p = float(count_pseudo[base]) / N_pseudo / __genomic_freq[base]
acc += (p * math.log(p, 2))
return acc
MappingRecord = collections.namedtuple('MappingRecord', 'qpos cigar_op base qual read')
MappingPos = collections.namedtuple('MappingPos', 'tid pos records')
BasePosition = collections.namedtuple('BasePosition', 'tid pos total a c g t n deletions gaps insertions reads a_minor c_minor g_minor t_minor n_minor del_minor ins_minor')
class BamBaseCaller(object):
def __init__(self, bam, min_qual=0, min_count=0, regions=None, mask=1540, quiet=False):
self.bam = bam
self.min_qual = min_qual
self.min_count = 0
self.regions = regions
self.cur_chrom = None
self.cur_start = None
self.cur_end = None
self.mask = mask
self.quiet = quiet
def _gen1():
if not self.quiet:
eta = ETA(self.regions.total)
else:
eta = None
count = 0
for region in self.regions:
working_chrom = None
if region.chrom in self.bam.references:
working_chrom = region.chrom
elif chrom[0:3] == 'chr':
if region.chrom[3:] in self.bam.references:
working_chrom = region.chrom[3:]
if not working_chrom:
continue
# for troubleshooting
self.cur_chrom = region.chrom
self.cur_start = region.start
self.cur_end = region.end
laststart = 0
for read in self.bam.fetch(working_chrom, region.start, region.end):
if read.pos != laststart:
count += 1
laststart = read.pos
if eta:
eta.print_status(count, extra='%s/%s %s:%s' % (count, self.regions.total, self.bam.references[read.tid], read.pos))
yield read
if eta:
eta.done()
def _gen2():
def callback(read):
return '%s:%s (%s) %s:%s-%s' % (self.bam.getrname(read.tid), read.pos, len(self.buffer), self.cur_chrom, self.cur_start, self.cur_end)
for read in bam_iter(self.bam, quiet=self.quiet, callback=callback):
yield read
if regions:
self._gen = _gen1
else:
self._gen = _gen2
self.buffer = None
self.current_tid = None
def close(self):
pass
def _calc_pos(self, tid, pos, records):
if self.cur_start and pos < self.cur_start:
return None
if self.cur_end and self.cur_end < pos:
return None
counts = {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0, 'ins': 0, 'del': 0}
plus_counts = {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0, 'ins': 0, 'del': 0}
insertions = {}
gaps = 0
total = 0
reads = []
for record in records:
qpos, cigar_op, base, qual, read = record
if cigar_op == 0: # M
if qual >= self.min_qual and (read.flag & self.mask) == 0:
total += 1
reads.append(record)
counts[base] += 1
if not read.is_reverse:
plus_counts[base] += 1
elif cigar_op == 1: # I
if qual >= self.min_qual and (read.flag & self.mask) == 0:
reads.append(record)
if not base in insertions:
insertions[base] = 1
else:
insertions[base] += 1
counts['ins'] += 1
if not read.is_reverse:
plus_counts['ins'] += 1
elif cigar_op == 2: # D
#total += 1 # not sure these should be included,
# samtools mpileup includes them
# IGV doesn't
counts['del'] += 1
reads.append(record)
if not read.is_reverse:
plus_counts['del'] += 1
elif cigar_op == 3: # N
gaps += 1
reads.append(record)
elif cigar_op == 4: # S - soft clipping
pass
elif cigar_op == 5: # H - hard clipping
pass
pcts = {'A': 0.0, 'C': 0.0, 'G': 0.0, 'T': 0.0, 'N': 0.0, 'ins': 0.0, 'del': 0.0}
for k in pcts:
if counts[k] > 0:
pcts[k] = float(plus_counts[k]) / counts[k]
if pcts[k] > 0.5:
pcts[k] = 1 - pcts[k]
if total >= self.min_count:
return BasePosition(tid, pos, total, counts['A'], counts['C'], counts['G'], counts['T'], counts['N'], counts['del'], gaps, insertions, reads, pcts['A'], pcts['C'], pcts['G'], pcts['T'], pcts['N'], pcts['del'], pcts['ins'])
def fetch(self):
self.current_tid = None
self.buffer = collections.deque()
for read in self._gen():
if (read.flag & self.mask) > 0:
continue
if self.current_tid != read.tid: # new chromosome
while self.buffer:
tid, pos, records = self.buffer.popleft()
y = self._calc_pos(tid, pos, records)
if y:
yield y
self.current_tid = read.tid
# handle all positions that are 5' of the current one
while self.buffer and read.pos > self.buffer[0].pos:
tid, pos, records = self.buffer.popleft()
y = self._calc_pos(tid, pos, records)
if y:
yield y
self._push_read(read)
# flush buffer for the end
while self.buffer:
tid, pos, records = self.buffer.popleft()
y = self._calc_pos(tid, pos, records)
if y:
yield y
def _push_read(self, read):
if not self.buffer:
self.buffer.append(MappingPos(read.tid, read.pos, []))
while self.buffer[-1].pos < read.aend:
self.buffer.append(MappingPos(read.tid, self.buffer[-1].pos + 1, []))
buf_idx = 0
while self.buffer[buf_idx].pos < read.pos:
buf_idx += 1
read_idx = 0
for op, length in read.cigar:
if op == 0: # M
for i in xrange(length):
try:
if read.qual:
qualval = ord(read.qual[read_idx]) - 33
else:
qualval = 0
self.buffer[buf_idx].records.append(MappingRecord(read_idx, op, read.seq[read_idx], qualval, read))
except Exception, e:
print e
sys.stderr.write('\n%s\nIf there is a BED file, is it sorted and reduced?\n' % e)
sys.stderr.write('read: %s (%s:%s-%s)\n' % (read.qname, self.bam.references[read.tid], read.pos, read.aend))
sys.stderr.write('%s\n' % str(read))
if self.cur_chrom:
sys.stderr.write('current range: %s:%s-%s\n' % (self.cur_chrom, self.cur_start, self.cur_end))
sys.exit(1)
buf_idx += 1
read_idx += 1
elif op == 1: # I
inseq = ''
inqual = 0
for i in xrange(length):
inseq += read.seq[read_idx]
if read.qual:
inqual += ord(read.qual[read_idx]) - 33
read_idx += 1
inqual = inqual / len(inseq) # use an average of the entire inserted bases
# as the quality for the whole insert
self.buffer[buf_idx].records.append(MappingRecord(read_idx, op, inseq, inqual, read))
elif op == 2: # D
mr = MappingRecord(read_idx, op, None, None, read)
for i in xrange(length):
self.buffer[buf_idx].records.append(mr)
buf_idx += 1
elif op == 3: # N
mr = MappingRecord(read_idx, op, None, None, read)
for i in xrange(length):
self.buffer[buf_idx].records.append(mr)
buf_idx += 1
elif op == 4: # S - soft clipping
read_idx += length
pass
elif op == 5: # H - hard clipping
pass
def _calculate_consensus_minor(minorpct, a, c, g, t):
consensuscalls = []
minorcalls = []
calls = [(a, 'A'), (c, 'C'), (g, 'G'), (t, 'T')]
calls.sort()
calls.reverse()
best = calls[0][0]
minor = 0
total = a + c + g + t
for count, base in calls:
if count == 0:
break
if count == best:
consensuscalls.append(base)
elif not minor:
minor = count
minorcalls.append(base)
elif count == minor:
minorcalls.append(base)
else:
# background
pass
if best == 0:
return ('N', '')
if total and (minor / total) < minorpct:
minorcalls = [] # too low of a pct...
# if there is one major, there can be more than one minor
# however, if there is more than one major, there are *no* minors
if len(consensuscalls) == 1:
return (consensuscalls[0], '/'.join(minorcalls))
return ('/'.join(consensuscalls), '')
def _calculate_heterozygosity(a, c, g, t):
calls = [a, c, g, t]
calls.sort()
major = calls[-1]
minor = calls[-2]
background = calls[-3]
if minor - background <= 0:
return 0.0 # There is no minor call, so not heterozygous!
return float(minor - background) / (major - background + minor - background)
def bam_basecall(bam, ref_fname, min_qual=0, min_count=0, regions=None, mask=1540, quiet=False, showgaps=False, showstrand=False, minorpct=0.01, altfreq=False, variants=False, profiler=None, out=sys.stdout):
if ref_fname:
ref = pysam.Fastafile(ref_fname)
else:
ref = None
out.write('chrom\tpos\tref\tcount\tconsensus call\tminor call\tave mappings')
if altfreq:
out.write('\talt. allele freq')
out.write('\tentropy\tA\tC\tG\tT\tN\tDeletions\tGaps\tInsertions\tInserts')
if showstrand:
out.write('\t+ strand %\tA minor %\tC minor %\tG minor %\tT minor %\tN minor %\tDeletion minor %\tInsertion minor %')
out.write('\n')
bbc = BamBaseCaller(bam, min_qual, min_count, regions, mask, quiet)
ebi_chr_convert = False
for basepos in bbc.fetch():
if basepos.pos < 0:
continue
if profiler and profiler.abort():
break
big_total = basepos.total + basepos.deletions + len(basepos.insertions)
if big_total < min_count:
continue
if big_total == 0 and not (showgaps and basepos.gaps > 0):
continue
refbase = ''
if ref:
if not ebi_chr_convert:
refbase = ref.fetch(bbc.bam.references[basepos.tid], basepos.pos, basepos.pos + 1).upper()
if not refbase and not bbc.bam.references[basepos.tid].startswith('chr'):
ebi_chr_convert = True
if not refbase and ebi_chr_convert:
refbase = ref.fetch('chr%s' % bbc.bam.references[basepos.tid], basepos.pos, basepos.pos + 1).upper()
else:
refbase = 'N'
entropy = calc_entropy(basepos.a, basepos.c, basepos.g, basepos.t)
read_ih_acc = 0
plus_count = 0.0 # needs to be float
total_count = 0
for qpos, cigar_op, base, qual, read in basepos.reads:
total_count += 1
if not read.is_reverse:
plus_count += 1.0
if cigar_op in [0, 1, 2]:
try:
read_ih_acc += int(read.opt('IH'))
except KeyError:
read_ih_acc += 1
inserts = []
for insert in basepos.insertions:
inserts.append((basepos.insertions[insert], insert))
inserts.sort()
inserts.reverse()
insert_str_ar = []
incount = 0
for count, insert in inserts:
insert_str_ar.append('%s:%s' % (insert, count))
incount += count
if big_total > 0:
ave_mapping = (float(read_ih_acc) / big_total)
else:
ave_mapping = 0
consensuscall, minorcall = _calculate_consensus_minor(minorpct, basepos.a, basepos.c, basepos.g, basepos.t) ##TODO - add inserts and dels here
if variants and consensuscall == refbase:
continue
cols = [bbc.bam.references[basepos.tid],
basepos.pos + 1,
refbase,
basepos.total,
consensuscall,
minorcall,
ave_mapping,
]
if altfreq:
cols.append(_calculate_heterozygosity(basepos.a, basepos.c, basepos.g, basepos.t))
cols.extend([
entropy,
basepos.a,
basepos.c,
basepos.g,
basepos.t,
basepos.n,
basepos.deletions,
basepos.gaps,
incount,
','.join(insert_str_ar)])
if showstrand:
cols.append(plus_count / total_count)
cols.append(basepos.a_minor)
cols.append(basepos.c_minor)
cols.append(basepos.g_minor)
cols.append(basepos.t_minor)
cols.append(basepos.n_minor)
cols.append(basepos.del_minor)
cols.append(basepos.ins_minor)
out.write('%s\n' % '\t'.join([str(x) for x in cols]))
bbc.close()
if ref:
ref.close()
# class SingleRegion(object):
# def __init__(self, arg):
# self.chrom, startend = arg.split(':')
# if '-' in startend:
# self.start, self.end = [int(x) for x in startend.split('-')]
# else:
# self.start = int(startend)
# self.end = start
# self.start = self.start - 1
# @property
# def total(self):
# return end - start
# @property
# def regions(self):
# yield (chrom, start, end)
# class BEDRegions(object):
# def __init__(self, fname):
# self.fname = fname
# self.__total = 0
# @property
# def total(self):
# if not self.__total:
# self.__total = 0
# with open(self.fname) as f:
# for line in f:
# if line[0] == '#':
# continue
# cols = line.strip().split('\t')
# self.__total += (int(cols[2]) - int(cols[1]))
# return self.__total
# @property
# def regions(self):
# with open(self.fname) as f:
# for line in f:
# if line[0] == '#':
# continue
# cols = line.strip().split('\t')
# yield (cols[0], int(cols[1]), int(cols[2]))
class TimedProfiler(object):
def __init__(self, secs_to_run=3600): # default is to run for one hour
self.expire_ts = datetime.datetime.now() + datetime.timedelta(seconds=secs_to_run)
def abort(self):
if datetime.datetime.now() > self.expire_ts:
return True
return False
if __name__ == '__main__':
bam = None
ref = None
min_qual = 0
min_count = 0
mask = 1540
chrom = None
start = None
end = None
quiet = False
showgaps = False
showstrand = False
altfreq = False
variants = False
minorpct = 0.04
regions = None
profile = None
last = None
try:
for arg in sys.argv[1:]:
if last == '-qual':
min_qual = int(arg)
last = None
elif last == '-ref':
if os.path.exists(arg) and os.path.exists('%s.fai' % arg):
ref = arg
else:
print "Missing FASTA file or index: %s" % arg
usage()
last = None
elif last == '-count':
min_count = int(arg)
last = None
elif last == '-bed':
if os.path.exists(arg):
regions = BedFile(arg)
else:
print "BED file: %s not found!" % arg
usage()
last = None
elif last == '-mask':
mask = int(arg)
last = None
elif last == '-minorpct':
minorpct = float(arg)
last = None
elif last == '-profile':
profile = arg
last = None
elif arg == '-h':
usage()
elif arg == '-showstrand':
showstrand = True
elif arg == '-showgaps':
showgaps = True
elif arg == '-q':
quiet = True
elif arg == '-variants':
variants = True
elif arg == '-altfreq':
altfreq = True
elif arg in ['-qual', '-count', '-mask', '-ref', '-minorpct', '-profile', '-bed']:
last = arg
elif not bam and os.path.exists(arg):
if os.path.exists('%s.bai' % arg):
bam = arg
else:
print "Missing BAI index on %s" % arg
usage()
elif not ref and os.path.exists(arg) and os.path.exists('%s.fai' % arg):
if os.path.exists('%s.fai' % arg):
ref = arg
else:
print "Missing FAI index on %s" % arg
usage()
elif not regions:
regions = BedFile(region=arg)
else:
print "Unknown option or missing index: %s" % arg
usage()
except Exception, e:
print e
usage()
if not bam:
usage()
else:
bamobj = bam_open(bam)
if profile:
import cProfile
def func():
bam_basecall(bamobj, ref, min_qual, min_count, regions, mask, quiet, showgaps, showstrand, minorpct, altfreq, variants, TimedProfiler())
sys.stderr.write('Profiling...\n')
cProfile.run('func()', profile)
else:
bam_basecall(bamobj, ref, min_qual, min_count, regions, mask, quiet, showgaps, showstrand, minorpct, altfreq, variants, None)
bamobj.close()
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import uuid
from datetime import date, datetime, time
import dateutil
from flask import jsonify, request, session
from marshmallow import fields, validate
from marshmallow_enum import EnumField
from werkzeug.exceptions import Forbidden
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.links import LinkType
from indico.core.db.sqlalchemy.util.queries import db_dates_overlap
from indico.core.errors import NoReportError
from indico.legacy.common.cache import GenericCache
from indico.modules.rb import rb_settings
from indico.modules.rb.controllers import RHRoomBookingBase
from indico.modules.rb.controllers.backend.common import search_room_args
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import RepeatFrequency, Reservation
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.operations.bookings import (get_active_bookings, get_booking_edit_calendar_data,
get_matching_events, get_room_calendar, get_rooms_availability,
has_same_dates, should_split_booking, split_booking)
from indico.modules.rb.operations.suggestions import get_suggestions
from indico.modules.rb.schemas import (create_booking_args, reservation_details_schema,
reservation_linked_object_data_schema, reservation_occurrences_schema,
reservation_user_event_schema)
from indico.modules.rb.util import (generate_spreadsheet_from_occurrences, get_linked_object, group_by_occurrence_date,
is_booking_start_within_grace_period, serialize_availability,
serialize_booking_details, serialize_occurrences)
from indico.util.date_time import now_utc, utc_to_server
from indico.util.i18n import _
from indico.util.spreadsheets import send_csv, send_xlsx
from indico.web.args import use_args, use_kwargs
from indico.web.flask.util import url_for
from indico.web.util import ExpectedError
NUM_SUGGESTIONS = 5
_export_cache = GenericCache('bookings-export')
class RHTimeline(RHRoomBookingBase):
def _process_args(self):
self.room = None
if 'room_id' in request.view_args:
self.room = Room.get_one(request.view_args['room_id'], is_deleted=False)
@use_kwargs({
'start_dt': fields.DateTime(required=True),
'end_dt': fields.DateTime(required=True),
'repeat_frequency': EnumField(RepeatFrequency, missing='NEVER'),
'repeat_interval': fields.Int(missing=1),
'room_ids': fields.List(fields.Int(), missing=[]),
'skip_conflicts_with': fields.List(fields.Int(), missing=None),
'admin_override_enabled': fields.Bool(missing=False)
})
def _process(self, room_ids, **kwargs):
rooms = [self.room] if self.room else Room.query.filter(Room.id.in_(room_ids), ~Room.is_deleted).all()
date_range, availability = get_rooms_availability(rooms, **kwargs)
date_range = [dt.isoformat() for dt in date_range]
for data in availability.viewvalues():
# add additional helpful attributes
data.update({
'num_days_available': len(date_range) - len(data['conflicts']),
'all_days_available': not data['conflicts'],
'num_conflicts': len(data['conflicts'])
})
serialized = serialize_availability(availability)
if self.room:
availability = serialized[self.room.id]
else:
# keep order of original room id list
availability = sorted(serialized.items(), key=lambda x: room_ids.index(x[0]))
return jsonify(availability=availability, date_range=date_range)
class RHCalendar(RHRoomBookingBase):
@use_kwargs({
'start_date': fields.Date(missing=lambda: date.today().isoformat()),
'end_date': fields.Date(missing=None),
'my_bookings': fields.Bool(missing=False),
'show_inactive': fields.Bool(missing=False),
'room_ids': fields.List(fields.Int(), missing=None),
'text': fields.String(missing=None)
})
def _process(self, start_date, end_date, room_ids, my_bookings, show_inactive, text):
booked_for_user = session.user if my_bookings else None
if end_date is None:
end_date = start_date
calendar = get_room_calendar(start_date, end_date, room_ids, booked_for_user=booked_for_user,
include_inactive=show_inactive, text=text)
return jsonify(serialize_availability(calendar).values())
class RHActiveBookings(RHRoomBookingBase):
@use_kwargs({
'room_ids': fields.List(fields.Int(), missing=None),
'start_dt': fields.DateTime(missing=None),
'last_reservation_id': fields.Int(missing=None),
'my_bookings': fields.Bool(missing=False),
'limit': fields.Int(missing=40),
'text': fields.String(missing=None)
})
def _process(self, room_ids, start_dt, last_reservation_id, my_bookings, limit, text):
start_dt = start_dt or datetime.combine(date.today(), time(0, 0))
booked_for_user = session.user if my_bookings else None
bookings, rows_left = get_active_bookings(limit=limit,
start_dt=start_dt,
last_reservation_id=last_reservation_id,
room_ids=room_ids,
booked_for_user=booked_for_user,
text=text)
return jsonify(bookings=serialize_occurrences(bookings), rows_left=rows_left)
class RHCreateBooking(RHRoomBookingBase):
@use_args(create_booking_args)
def _process_args(self, args):
self.args = args
self.prebook = args.pop('is_prebooking')
self.room = Room.get_one(self.args.pop('room_id'), is_deleted=False)
def _check_access(self):
RHRoomBookingBase._check_access(self)
if (self.prebook and not self.room.can_prebook(session.user) or
(not self.prebook and not self.room.can_book(session.user))):
raise Forbidden('Not authorized to book this room')
def _validate_room_booking_limit(self, start_dt, end_dt, booking_limit_days):
day_start_dt = datetime.combine(start_dt.date(), time())
day_end_dt = datetime.combine(end_dt.date(), time(23, 59))
selected_period_days = (day_end_dt - day_start_dt).days
return selected_period_days <= booking_limit_days
def _link_booking(self, booking, type_, id_, link_back):
obj = get_linked_object(type_, id_)
if obj is None or not obj.event.can_manage(session.user):
return
booking.linked_object = obj
if link_back:
obj.inherit_location = False
obj.room = self.room
def _process(self):
args = self.args
args.setdefault('booked_for_user', session.user)
if not is_booking_start_within_grace_period(args['start_dt'], session.user, args['admin_override_enabled']):
raise ExpectedError(_('You cannot create a booking which starts in the past'))
# Check that the booking is not longer than allowed
booking_limit_days = self.room.booking_limit_days or rb_settings.get('booking_limit')
if not self._validate_room_booking_limit(args['start_dt'], args['end_dt'], booking_limit_days):
msg = (_('Bookings for the room "{}" may not be longer than {} days')
.format(self.room.name, booking_limit_days))
raise ExpectedError(msg)
try:
resv = Reservation.create_from_data(self.room, args, session.user, prebook=self.prebook)
if args.get('link_type') is not None and args.get('link_id') is not None:
self._link_booking(resv, args['link_type'], args['link_id'], args['link_back'])
db.session.flush()
except NoReportError as e:
db.session.rollback()
raise ExpectedError(unicode(e))
serialized_occurrences = serialize_occurrences(group_by_occurrence_date(resv.occurrences.all()))
if self.prebook:
data = {'pre_bookings': serialized_occurrences}
else:
data = {'bookings': serialized_occurrences}
return jsonify(room_id=self.room.id, booking=reservation_details_schema.dump(resv), calendar_data=data)
class RHRoomSuggestions(RHRoomBookingBase):
@use_args(search_room_args)
def _process(self, args):
return jsonify(get_suggestions(args, limit=NUM_SUGGESTIONS))
class RHBookingBase(RHRoomBookingBase):
def _process_args(self):
self.booking = Reservation.get_one(request.view_args['booking_id'])
class RHBookingDetails(RHBookingBase):
def _process(self):
return jsonify(serialize_booking_details(self.booking))
class RHBookingStateActions(RHBookingBase):
def _process_args(self):
RHBookingBase._process_args(self)
self.action = request.view_args['action']
def _check_access(self):
RHBookingBase._check_access(self)
funcs = {'approve': self.booking.can_accept,
'reject': self.booking.can_reject,
'cancel': self.booking.can_cancel}
if self.action not in funcs or not funcs[self.action](session.user):
raise Forbidden
@use_kwargs({
'reason': fields.String(required=True)
})
def reject(self, reason):
self.booking.reject(session.user, reason)
def _process(self):
if self.action == 'approve':
self.booking.accept(session.user)
elif self.action == 'reject':
self.reject()
elif self.action == 'cancel':
self.booking.cancel(session.user)
return jsonify(booking=serialize_booking_details(self.booking))
class RHDeleteBooking(RHBookingBase):
def _check_access(self):
RHBookingBase._check_access(self)
if not self.booking.can_delete(session.user):
raise Forbidden
def _process(self):
booking_id = self.booking.id
room_id = self.booking.room.id
signals.rb.booking_deleted.send(self.booking)
db.session.delete(self.booking)
return jsonify(booking_id=booking_id, room_id=room_id)
class RHLinkedObjectData(RHRoomBookingBase):
"""Fetch data from event, contribution or session block"""
def _process_args(self):
type_ = LinkType[request.view_args['type']]
id_ = request.view_args['id']
self.linked_object = get_linked_object(type_, id_)
def _process(self):
if not self.linked_object or not self.linked_object.can_access(session.user):
return jsonify(can_access=False)
return jsonify(can_access=True, **reservation_linked_object_data_schema.dump(self.linked_object))
class RHBookingEditCalendars(RHBookingBase):
@use_kwargs({
'start_dt': fields.DateTime(required=True),
'end_dt': fields.DateTime(required=True),
'repeat_frequency': EnumField(RepeatFrequency, missing='NEVER'),
'repeat_interval': fields.Int(missing=1),
})
def _process(self, **kwargs):
return jsonify(get_booking_edit_calendar_data(self.booking, kwargs))
class RHUpdateBooking(RHBookingBase):
def _check_access(self):
RHBookingBase._check_access(self)
if not self.booking.can_edit(session.user):
raise Forbidden
@use_args(create_booking_args)
def _process(self, args):
new_booking_data = {
'booking_reason': args['booking_reason'],
'booked_for_user': args.get('booked_for_user', self.booking.booked_for_user),
'start_dt': args['start_dt'],
'end_dt': args['end_dt'],
'repeat_frequency': args['repeat_frequency'],
'repeat_interval': args['repeat_interval'],
}
additional_booking_attrs = {}
if not should_split_booking(self.booking, new_booking_data):
has_date_changed = not has_same_dates(self.booking, new_booking_data)
room = self.booking.room
self.booking.modify(new_booking_data, session.user)
if (has_date_changed and not room.can_book(session.user, allow_admin=False) and
room.can_prebook(session.user, allow_admin=False) and self.booking.is_accepted):
self.booking.reset_approval(session.user)
else:
new_booking = split_booking(self.booking, new_booking_data)
additional_booking_attrs['new_booking_id'] = new_booking.id
db.session.flush()
today = date.today()
calendar = get_room_calendar(args['start_dt'] or today, args['end_dt'] or today, [args['room_id']])
return jsonify(booking=dict(serialize_booking_details(self.booking), **additional_booking_attrs),
room_calendar=serialize_availability(calendar).values())
class RHMyUpcomingBookings(RHRoomBookingBase):
def _process(self):
q = (ReservationOccurrence.query
.filter(ReservationOccurrence.start_dt > utc_to_server(now_utc()),
ReservationOccurrence.is_valid,
db.or_(Reservation.booked_for_user == session.user,
Reservation.created_by_user == session.user),
~Room.is_deleted)
.join(Reservation)
.join(Room)
.order_by(ReservationOccurrence.start_dt.asc())
.limit(5))
return jsonify(reservation_occurrences_schema.dump(q))
class RHMatchingEvents(RHRoomBookingBase):
"""Get events suitable for booking linking."""
@use_kwargs({
'start_dt': fields.DateTime(),
'end_dt': fields.DateTime(),
'repeat_frequency': EnumField(RepeatFrequency, missing='NEVER'),
'repeat_interval': fields.Int(missing=1),
})
def _process(self, start_dt, end_dt, repeat_frequency, repeat_interval):
events = get_matching_events(start_dt, end_dt, repeat_frequency, repeat_interval)
return jsonify(reservation_user_event_schema.dump(events))
class RHBookingOccurrenceStateActions(RHBookingBase):
"""Reject or cancel booking occurrence."""
def _process_args(self):
RHBookingBase._process_args(self)
date = dateutil.parser.parse(request.view_args['date'], yearfirst=True).date()
self.occurrence = self.booking.occurrences.filter(ReservationOccurrence.date == date).one()
self.action = request.view_args['action']
def _check_access(self):
RHBookingBase._check_access(self)
funcs = {'reject': self.occurrence.can_reject,
'cancel': self.occurrence.can_cancel}
if self.action not in funcs or not funcs[self.action](session.user):
raise Forbidden
@use_kwargs({
'reason': fields.String(required=True)
})
def reject(self, reason):
self.occurrence.reject(session.user, reason)
def _process(self):
if self.action == 'reject':
self.reject()
elif self.action == 'cancel':
self.occurrence.cancel(session.user)
return jsonify(occurrence=reservation_occurrences_schema.dump(self.occurrence, many=False))
class RHBookingExport(RHRoomBookingBase):
@use_kwargs({
'room_ids': fields.List(fields.Int(), required=True),
'start_date': fields.Date(required=True),
'end_date': fields.Date(required=True),
'format': fields.Str(validate.OneOf({'csv', 'xlsx'}), required=True),
})
def _process(self, room_ids, start_date, end_date, format):
occurrences = (ReservationOccurrence.query
.join(ReservationOccurrence.reservation)
.filter(Reservation.room_id.in_(room_ids),
ReservationOccurrence.is_valid,
db_dates_overlap(ReservationOccurrence,
'start_dt', datetime.combine(start_date, time()),
'end_dt', datetime.combine(end_date, time.max)))).all()
token = unicode(uuid.uuid4())
headers, rows = generate_spreadsheet_from_occurrences(occurrences)
_export_cache.set(token, {'headers': headers, 'rows': rows}, time=1800)
download_url = url_for('rb.export_bookings_file', format=format, token=token)
return jsonify(url=download_url)
class RHBookingExportFile(RHRoomBookingBase):
def _process(self):
data = _export_cache.get(request.args['token'])
file_format = request.view_args['format']
if file_format == 'csv':
return send_csv('bookings.csv', **data)
elif file_format == 'xlsx':
return send_xlsx('bookings.xlsx', **data)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_checkpoint_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
ops.NotDifferentiable("GenerateVocabRemapping")
ops.NotDifferentiable("LoadAndRemapMatrix")
def _load_and_remap_matrix(ckpt_path,
old_tensor_name,
new_row_vocab_offset,
num_rows_to_load,
new_col_vocab_size,
initializer,
old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
max_rows_in_memory=-1):
"""Loads a 2-D (matrix) `Tensor` from checkpoint.
Generates 1D-remappings for rows and columns using the
`GenerateVocabRemapping` op, and initializes any anticipated values with the
provided initializer. Then, uses the `LoadAndRemapMatrix` op to create a
matrix that loads existing values from the checkpoint, while filling out
"missing" values with the newly initialized values. See
contrib/framework/ops/checkpoint_ops.cc for more information on the wrapped
functionality (LoadAndRemapMatrix). This wrapper can be used to perform only
row remapping or only col remapping. If only row remapping is desired,
{new,old}_col_vocab_file should be `None`, and vice versa for column
remapping.
NOTE: This only supports div-partitioning the vocabulary on the 1st dimension
(row axis) via `new_row_vocab_offset`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_offset: A 0-indexed integer representing what line to
start reading at in the new row vocabulary. Used for partitioned
variables.
num_rows_to_load: Number of rows to load for the new vocabulary (note: to
support variable partitioning and partial loading, this does not need to
be the same as the number of entries in `new_row_vocab_file`).
new_col_vocab_size: Number of columns to load - should be the same as the
number of entries in `new_col_vocab_file`, since we don't support
partitioning along the column axis.
initializer: Callable initializer function that accepts a 1-D tensor as the
arg to specify the shape of the returned tensor. Used to initialize
missing values.
old_row_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_row_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`. NOTE: we do not provide an equivalent
`old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis - in which case, `new_row_vocab_offset` and
`num_rows_to_load` work under the assumption that the new row vocab is the
same as the old row vocab.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis - in which case, `new_col_vocab_size` works
under the assumption that the new col vocab is the same as the old col
vocab.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A Tensor of shape `[num_rows_to_load + num_row_oov_buckets,
new_col_vocab_size + num_col_oov_buckets]`, with values loaded from the
specified tensor in the checkpoint, and any missing or OOV values
initialized with the given `initializer`.
Raises:
ValueError: If `num_row_oov_buckets` or `num_col_oov_buckets` < 0.
ValueError: If either `old_row_vocab_file` or `new_row_vocab_file` is
provided, while the other is not. Same for `old_col_vocab_file` and
`new_col_vocab_file`.
ValueError: If neither row vocabs or col vocabs are provided.
"""
if num_row_oov_buckets < 0:
raise ValueError("num_row_oov_buckets must be >= 0, but received %d" %
num_row_oov_buckets)
if num_col_oov_buckets < 0:
raise ValueError("num_col_oov_buckets must be >= 0, but received %d" %
num_col_oov_buckets)
if bool(old_row_vocab_file) != bool(new_row_vocab_file):
raise ValueError(
"old_row_vocab_file and new_row_vocab_file must both be specified or "
"left unspecified. old_row_vocab_file='{}', new_row_vocab_file='{}'".
format(old_row_vocab_file, new_row_vocab_file))
if bool(old_col_vocab_file) != bool(new_col_vocab_file):
raise ValueError(
"old_col_vocab_file and new_col_vocab_file must both be specified or "
"left unspecified. old_col_vocab_file='{}', new_col_vocab_file='{}'".
format(old_col_vocab_file, new_col_vocab_file))
remap_rows = new_row_vocab_file and old_row_vocab_file
remap_cols = new_col_vocab_file and old_col_vocab_file
if not (remap_rows or remap_cols):
raise ValueError(
"Must provide either row or column vocab files. If no remapping is "
"necessary, consider using `tf.contrib.framework.init_from_checkpoint` "
"instead.")
num_rows_present = num_rows_to_load
if remap_rows:
row_remapping, num_rows_present = (
gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=new_row_vocab_file,
old_vocab_file=old_row_vocab_file,
new_vocab_offset=new_row_vocab_offset,
num_new_vocab=num_rows_to_load,
old_vocab_size=old_row_vocab_size))
else:
# Even when the rows are not being reordered, we still need to generate a
# remapping to account for initializing partitioned Variables (when
# new_row_vocab_offset is non-zero).
row_remapping = math_ops.range(
new_row_vocab_offset,
new_row_vocab_offset + num_rows_to_load,
dtype=dtypes.int64)
col_remapping = []
num_cols_present = new_col_vocab_size
if remap_cols:
col_remapping, num_cols_present = (
gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=new_col_vocab_file,
old_vocab_file=old_col_vocab_file,
new_vocab_offset=0, # Offset is unused for cols (no partitioning).
num_new_vocab=new_col_vocab_size))
init_vals = initializer([
num_rows_to_load * new_col_vocab_size -
num_rows_present * num_cols_present, 1
])
return_tensor = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
row_remapping=row_remapping,
col_remapping=col_remapping,
initializing_values=init_vals,
num_rows=num_rows_to_load,
num_cols=new_col_vocab_size,
max_rows_in_memory=max_rows_in_memory)
# Add OOV row(s) and column(s).
if num_row_oov_buckets > 0:
init_row_oov_val = initializer([num_row_oov_buckets, new_col_vocab_size])
init_row_oov_val = ops.convert_to_tensor(init_row_oov_val)
return_tensor = array_ops.concat([return_tensor, init_row_oov_val], 0)
if num_col_oov_buckets > 0:
# We need to add any row OOV to the new column shape.
init_col_oov_val = initializer(
[num_rows_to_load + num_row_oov_buckets, num_col_oov_buckets])
init_col_oov_val = ops.convert_to_tensor(init_col_oov_val)
return_tensor = array_ops.concat([return_tensor, init_col_oov_val], 1)
return return_tensor
def _load_and_remap_matrix_initializer(ckpt_path,
old_tensor_name,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
r"""Returns a var initializer for loading and remapping a 2-D (matrix) tensor.
The returned initializer loads a 2-D (matrix) `Tensor` with name
`old_tensor_name` from the checkpoint at `ckpt_path`. It will reorder the
rows/columns according to the specified vocab files and append additional
out-of-vocabulary rows/columns according to the number of OOV buckets.
The format of the file at the `{old,new}_{row,col}_vocab_file` path should be
a text file, with each line containing a single entity within the vocabulary.
Let the function `line_of(f, "x")` return the 0-indexed line number of the
entity "x" in file f, and the function `entity_at(f, i)` return the entity at
line i of file f. Then, row i of the new output matrix will be taken from row
`line_of(old_row_vocab_file, entity_at(new_row_vocab_file, i))` of the old
matrix. If any entity in `new_row_vocab_file` is not found in
`old_row_vocab_file`, that row is considered a "missing" row, and its values
will be initialized using the `initializer` arg. The same logic also applies
for the columns.
For example, assuming that:
* `old_row_vocab_file` contains "mercury\nvenus\nmars"
* `new_row_vocab_file` contains "venus\njupiter\nmercury"
* `old_col_vocab_file` contains "good\nbetter\nbest"
* `new_col_vocab_file` contains "good\nbest\nfantastic"
* `initializer` returns the natural numbers `[1, 2, 3, 4, ...]`
* `w(i, j)` represents the value from row i, column j of the old matrix
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1],
[2, 3, 4],
[w(0, 0), w(0, 2), 5]]`
If we further specify that:
* `num_row_oov_buckets` == 2
* `num_col_oov_buckets` == 1
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1, 12],
[2, 3, 4, 13],
[w(0, 0), w(0, 2), 5, 14],
[6, 7, 8, 15],
[9, 10, 11, 16]]`
If `{old,new}_row_vocab_file` are None, we assume that the old and new row
vocab files are the same, and no row remapping is done. If
`{old,new}_col_vocab_file` are None, we assume that the old and new column
vocab files are the same, and no column remapping is done.
The returned initializer only supports div-partitioning along the row axis. It
does not support partitioning along the column axis or mod-partitioning.
NOTE: When this is used to warm-start variables, client code should use
`tf.lookup.index_table_from_tensor()` like
contrib/layers/python/layers/feature_column.py does, as opposed to
`tf.feature_to_id()` - in order to ensure the underlying lookup tables are the
same.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_row_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`. NOTE: we do not provide an equivalent
`old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
if initializer is None:
# TODO(b/25671353): Consider using sqrt(6/(fan_in + fan_out)) instead, from
# Glorot and Bengio, 2010.
initializer = init_ops.zeros_initializer()
if not callable(initializer):
raise TypeError(
"initializer must be callable, instead of being {} of type {}.".format(
initializer, type(initializer)))
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
"""Variable initializer.
Args:
shape: Shape of `Tensor` to return. Should include OOV on both axes.
dtype: Must be float32.
partition_info: variable_scope._PartitionInfo.
Returns:
`Tensor` of shape `shape`.
Raises:
TypeError: If `dtype` is anything other than float32.
ValueError: For shape mismatch upon invocation.
"""
# Sanity checks.
if dtype != dtypes.float32:
raise TypeError(
"Currently, only float32 is supported. Received dtype: {}".format(
dtype))
if len(shape) != 2:
raise ValueError("Expected 2-dim shape, but received: {}".format(shape))
if shape[0] <= 0:
raise ValueError(
"Expected 1st dim of shape to be > 0, but received shape: {}".format(
shape))
if shape[1] != (new_col_vocab_size + num_col_oov_buckets):
raise ValueError(
"Expected 2nd dim of shape to be new_col_vocab_size ({}) + "
"num_col_oov_buckets ({}) = {}, but received shape: {}".format(
new_col_vocab_size, num_col_oov_buckets,
new_col_vocab_size + num_col_oov_buckets, shape))
offset = 0
if partition_info is not None:
offset = partition_info.single_offset(shape)
if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets:
raise ValueError(
"Trying to initialize {} additional rows after {} rows have already "
"been initialized, which would exceed expected total row count of "
"new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.".format(
shape[0], offset, new_row_vocab_size, num_row_oov_buckets,
new_row_vocab_size + num_row_oov_buckets))
row_oov_buckets_to_use = min(shape[0],
max(0, offset + shape[0] - new_row_vocab_size))
num_rows_to_load = shape[0] - row_oov_buckets_to_use
# We may be operating on an OOV-only partition, in which case we newly
# initialize all rows of this partition.
if offset > new_row_vocab_size:
if shape[0] != row_oov_buckets_to_use:
raise ValueError(
"Partitioned variable offset is greater than new vocab size and "
"not operating on OOV-only partition.")
return initializer(shape)
return _load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_offset=offset,
num_rows_to_load=num_rows_to_load,
new_col_vocab_size=new_col_vocab_size,
initializer=initializer,
old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=row_oov_buckets_to_use,
num_col_oov_buckets=num_col_oov_buckets,
max_rows_in_memory=max_rows_in_memory)
return _initializer
def _load_embedding_initializer(ckpt_path,
embedding_tensor_name,
new_vocab_size,
embedding_dim,
old_vocab_file,
new_vocab_file,
old_vocab_size=-1,
num_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Returns a variable initializer for loading pre-trained embeddings.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
embedding weights and remapping according to the provided vocab files. See
docs for `load_and_remap_matrix_initializer()` for more details.
NOTE: Only for use with div-partitioned variables / vocabularies.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_vocab_size: Number of entries in the new vocab.
embedding_dim: `int` specifying the dimension of the embedding vectors from
the checkpoint. Must match the number of columns in the old embedding
matrix.
old_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old vocabulary file.
new_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new vocabulary file.
old_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`.
num_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`truncated_normal_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
if initializer is None:
# TODO(b/25671353): This should be kept in sync with the stddev used by
# feature_column.py's _EmbeddingColumn.
initializer = init_ops.truncated_normal_initializer(
stddev=1.0 / math.sqrt(embedding_dim))
return _load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=embedding_tensor_name,
new_row_vocab_size=new_vocab_size,
new_col_vocab_size=embedding_dim,
old_row_vocab_size=old_vocab_size,
old_row_vocab_file=old_vocab_file,
new_row_vocab_file=new_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
|
|
from luigi import Task, Parameter, IntParameter, ListParameter
from tasks.base_tasks import TempTableTask
from tasks.meta import current_session
from tasks.targets import ConstraintExistsTarget
from tasks.targets import PostgresTarget, PostgresFunctionTarget
from lib.logger import get_logger
LOGGER = get_logger(__name__)
class _CountryTask:
@property
def _country(self):
raise NotImplementedError('_CountryTask must define _country()')
class DenormalizedHierarchy(Task, _CountryTask):
year = IntParameter()
def requires(self):
'''
Subclasses must override this and return a dictionary with the following elements:
- 'function': The definition of the `GetParentsFunction`
- 'rel': The `HierarchyChildParentsUnion` defining the hierarchy relations
'''
raise NotImplementedError('DenormalizedHierarchy must define requires()')
def _create_indexes(self, session):
LOGGER.info('Creating index on {table}'.format(table=self.output().table))
query = '''
CREATE INDEX idx_{tablename} ON {table} (geoid, level);
'''.format(
tablename=self.output().tablename,
table=self.output().table,
)
session.execute(query)
LOGGER.info('Clustering table {table}'.format(table=self.output().table))
query = '''
CLUSTER {table} USING idx_{tablename};
'''.format(
tablename=self.output().tablename,
table=self.output().table,
)
session.execute(query)
def run(self):
session = current_session()
session.execute('CREATE SCHEMA IF NOT EXISTS {schema}'.format(
schema=self.output().schema)
)
LOGGER.info('Creating table {table}'.format(table=self.output().table))
query = '''
CREATE TABLE {output} AS
SELECT child_id as geoid, child_level as level, parent_names.*
FROM {input} i,
{get_parents_function}(child_id, child_level) parent_names
'''.format(
output=self.output().table,
input=self.input()['rel'].table,
get_parents_function=self.input()['function'].function,
)
session.execute(query)
self._create_indexes(session)
session.commit()
def output(self):
return PostgresTarget('tiler', '{country}_dz_hierarchy_geonames_{year}'.format(country=self._country,
year=self.year))
class GetParentsFunction(Task, _CountryTask):
year = IntParameter()
def version(self):
return 2
def requires(self):
'''
Subclasses must override this and return a dictionary with the following elements:
- 'rel': The `HierarchyChildParentsUnion` defining the hierarchy relations
- 'info': The `HierarchyInfoUnion` defining the geonames
'''
raise NotImplementedError('GetParentsFunction must define requires()')
def run(self):
input_ = self.input()
rel_table = input_['rel'].table
schema = self.output().schema
function = self.output().function_name
session = current_session()
levels_query = '''
SELECT DISTINCT parent_level
FROM {input_relations}'''.format(input_relations=rel_table)
levels = [l[0] for l in session.execute(levels_query).fetchall()]
levels += ["{}_id".format(l) for l in levels]
level_types = ', '.join(['"{}" text'.format(l) for l in levels])
cols_type = """
"{schema}".{function}_levels
""".format(schema=schema, function=function)
session.execute('''
DROP TYPE IF EXISTS {cols_type} CASCADE
'''.format(cols_type=cols_type))
session.execute('''
CREATE TYPE {cols_type} as ({level_types})
'''.format(cols_type=cols_type, level_types=level_types))
query = '''
CREATE OR REPLACE FUNCTION "{schema}".{function}_json (geoid_p TEXT, level_p TEXT)
RETURNS JSONB
AS $$
DECLARE
children JSONB DEFAULT NULL;
BEGIN
WITH RECURSIVE children(child_id, child_level, parent_id, parent_level, geoname) AS (
SELECT p.child_id, p.child_level, p.parent_id, p.parent_level, n.geoname
FROM {input_relations} p
LEFT JOIN {input_geonames} n ON p.parent_id = n.geoid AND p.parent_level = n.level
WHERE p.child_id = geoid_p AND p.child_level = level_p
UNION ALL
SELECT p.child_id, p.child_level, p.parent_id, p.parent_level, n.geoname
FROM {input_relations} p
INNER JOIN children c ON c.parent_id = p.child_id AND c.parent_level = p.child_level
LEFT JOIN {input_geonames} n ON p.parent_id = n.geoid AND p.parent_level = n.level
)
SELECT ('{{' || string_agg(
'"' || parent_level || '": "' || geoname || '", ' ||
'"' || parent_level || '_id": "' || parent_id || '"',
',') || '}}')::JSONB
INTO children
FROM children;
RETURN children;
END
$$ LANGUAGE plpgsql PARALLEL SAFE;
'''.format(
schema=schema,
function=function,
input_relations=rel_table,
input_geonames=input_['info'].table,
)
LOGGER.debug(query)
session.execute(query)
cols_query = '''
CREATE OR REPLACE FUNCTION "{schema}".{function}(geoid TEXT, level TEXT)
RETURNS {cols_type}
AS $$
DECLARE
_output {cols_type};
BEGIN
SELECT * FROM jsonb_populate_record(null::{cols_type},
"{schema}".{function}_json(geoid, level))
INTO _output;
RETURN _output;
END
$$ LANGUAGE plpgsql PARALLEL SAFE;
'''.format(
schema=schema,
function=function,
level_types=level_types,
cols_type=cols_type)
LOGGER.debug(cols_query)
session.execute(cols_query)
session.commit()
def output(self):
name = 'get_{country}_parents_{year}'.format(country=self._country,
year=self.year)
return PostgresFunctionTarget(self.input()['rel'].schema, name)
class Hierarchy(Task, _CountryTask):
year = IntParameter()
def requires(self):
'''
Subclasses must override this and return a dictionary with the following elements:
- 'info': The `HierarchyInfoUnion` defining the geonames
- 'rel': The `HierarchyChildParentsUnion` defining the hierarchy relations
'''
raise NotImplementedError('Hierarchy must define requires()')
def run(self):
session = current_session()
input_ = self.input()
session.execute('ALTER TABLE {rel_table} DROP CONSTRAINT IF EXISTS '
'{country}hierarchy_fk_parent'.format(
rel_table=input_['rel'].qualified_tablename,
country=self._country, ))
session.execute('ALTER TABLE {rel_table} ADD CONSTRAINT '
'{country}hierarchy_fk_parent '
'FOREIGN KEY (child_id, child_level) '
'REFERENCES {info_table} (geoid, level) '.format(
rel_table=input_['rel'].qualified_tablename,
info_table=input_['info'].qualified_tablename,
country=self._country, ))
session.execute('ALTER TABLE {rel_table} DROP CONSTRAINT IF EXISTS '
'{country}hierarchy_fk_child'.format(
rel_table=input_['rel'].qualified_tablename,
country=self._country, ))
session.execute('ALTER TABLE {rel_table} ADD CONSTRAINT '
'{country}hierarchy_fk_child '
'FOREIGN KEY (parent_id, parent_level) '
'REFERENCES {info_table} (geoid, level) '.format(
rel_table=input_['rel'].qualified_tablename,
info_table=input_['info'].qualified_tablename,
country=self._country, ))
session.commit()
def output(self):
table = self.input()['info']
return ConstraintExistsTarget(table.schema, table.tablename,
'{country}hierarchy_fk_child'.format(country=self._country))
class _YearCountryLevelsTask(_CountryTask):
year = IntParameter()
levels = ListParameter(significant=False)
class HierarchyInfoUnion(TempTableTask, _YearCountryLevelsTask):
def requires(self):
'''
Subclasses must override this and return a list of `LevelInfo` (one for each level)
'''
raise NotImplementedError('HierarchyInfoUnion must define requires()')
def _union_query(self, tables, output):
unions = ['''SELECT geoid, level, string_agg(geoname, ', ') geoname
FROM {table}
GROUP BY geoid, level'''.format(table=table.qualified_tablename)
for table in tables]
return 'CREATE TABLE {output} AS {unions}'.format(
output=output,
unions=' UNION ALL '.join(unions))
def run(self):
session = current_session()
tablename = self.output().qualified_tablename
session.execute(self._union_query(self.input(), tablename))
alter_sql = 'ALTER TABLE {tablename} ADD PRIMARY KEY (geoid, level)'
session.execute(alter_sql.format(tablename=tablename))
session.commit()
class HierarchyChildParentsUnion(TempTableTask, _YearCountryLevelsTask):
def requires(self):
'''
Subclasses must override this and return a dictionary with the following elements:
- 'hierarchy': a list of `HierarchyChildParent` (one for each child-parent relation)
'''
raise NotImplementedError('HierarchyChildParentsUnion must define requires()')
def _child_parents(self):
child_parents = []
previous = None
for idx, level in enumerate(self.levels):
if previous:
parents = self.levels[idx:]
if parents:
child_parents.append([previous, parents])
previous = level
return child_parents
def _union_query(self, tables, output):
unions = ['SELECT * FROM {table}'.format(table=table.qualified_tablename)
for table in tables]
return 'CREATE TABLE {output} AS {unions}'.format(
output=self.output().qualified_tablename,
unions=' UNION ALL '.join(unions))
def run(self):
session = current_session()
table = self.output().tablename
qualified_table = self.output().qualified_tablename
union_sql = self._union_query(self.input()['hierarchy'], qualified_table)
session.execute(union_sql)
delete_sql = 'DELETE FROM {qualified_table} WHERE parent_id IS NULL'
session.execute(delete_sql.format(qualified_table=qualified_table))
alter_sql = 'ALTER TABLE {qualified_table} ADD PRIMARY KEY ' \
'(child_id, child_level, parent_id, parent_level)'
session.execute(alter_sql.format(qualified_table=qualified_table))
parent_index_sql = '''CREATE INDEX {table}_parent_idx
ON {qualified_table} (parent_id, parent_level)
'''.format(table=table, qualified_table=qualified_table)
session.execute(parent_index_sql)
session.commit()
class HierarchyChildParent(TempTableTask):
year = IntParameter()
current_geography = Parameter()
parent_geographies = ListParameter()
UNWEIGHTED_CHILD_SQL = """
SELECT DISTINCT child_id, child_level
FROM {table}
WHERE weight = 1
AND parent_id IS NOT NULL
GROUP BY child_id, child_level
HAVING count(1) > 1
"""
def requires(self):
'''
Subclasses must override this and return a dictionary with the following elements:
- 'level': a `LevelHierarchy`
- 'current_geom': the `TableTask` with the current geometries
- 'parent_geoms': the `TableTask` with the parent geometries
'''
raise NotImplementedError('HierarchyChildParent must define requires()')
@property
def _current_geoid_field(self):
return 'geoid'
@property
def _parent_geoid_fields(self):
return ['geoid'] * len(self.parent_geographies)
def run(self):
parent_geoid_fields = self._parent_geoid_fields
session = current_session()
sql = '''
UPDATE {table}
SET weight = ST_Area(
ST_Intersection(cgt.the_geom, pgt.the_geom), False)
FROM
observatory.{current_geom_table} cgt,
observatory.{parent_geom_table} pgt
WHERE cgt.{current_geoid_field} = {table}.child_id
AND pgt.{parent_geoid_field} = {table}.parent_id
AND (child_id, child_level) IN (
{unweighted_child_sql}
)
'''
table = self.input()['level'].qualified_tablename
for i, parent_geom in enumerate(self.input()['parent_geoms']):
session.execute(
sql.format(
table=table,
current_geom_table=self.input()['current_geom'].get(
session).tablename,
parent_geom_table=parent_geom.get(
session).tablename,
current_geoid_field=self._current_geoid_field,
parent_geoid_field=parent_geoid_fields[i],
unweighted_child_sql=self.UNWEIGHTED_CHILD_SQL.format(
table=table)
)
)
create_sql = '''
CREATE TABLE {table} AS
SELECT DISTINCT ON (child_id, child_level)
child_id, child_level, parent_id, parent_level, weight
FROM {weighted_table}
ORDER BY child_id, child_level, weight desc
'''
session.execute(create_sql.format(
table=self.output().qualified_tablename,
weighted_table=self.input()['level'].qualified_tablename
))
session.commit()
def complete(self):
try:
sql = self.UNWEIGHTED_CHILD_SQL.format(
table=self.input()['level'].qualified_tablename)
return len(current_session().execute(sql).fetchall()) == 0
except Exception as e:
# Table doesn't exist yet
LOGGER.debug("ERROR running complete")
return False
class LevelHierarchy(TempTableTask):
year = IntParameter()
current_geography = Parameter()
parent_geographies = ListParameter()
parent_geoid_fields = ListParameter(significant=False, default=None)
def requires(self):
'''
Subclasses must override this and return a dictionary with the following elements:
- 'current_info': a `LevelInfo` with the current level info
- 'current_geom': the `TableTask` with the current geometries
- 'parents_infos': a list of `LevelInfo` with level infos for the parents
- 'parent_geoms': the list of `TableTask` with the parent geometries
'''
raise NotImplementedError('LevelHierarchy must define requires()')
def _parent_geoid_field(self, i):
return self.parent_geoid_fields[i] if self.parent_geoid_fields else self._geoid_field
@property
def _geoid_field(self):
return 'geoid'
def run(self):
session = current_session()
input_ = self.input()
current_info = input_['current_info']
schema = current_info.schema
current_info_tablename = current_info.qualified_tablename
current_geom_table = input_['current_geom'].get(session)
parent_info_tablename = input_['parents_infos'][0].qualified_tablename
parent_geom_table = input_['parents_geoms'][0].get(session)
parent_geoid_field = self._parent_geoid_field(0)
session.execute('CREATE SCHEMA IF NOT EXISTS "{}"'.format(schema))
create_table_sql = '''
CREATE TABLE {output_table} AS
{child_parent_sql}
'''
# First creation will link child with direct parents and leave nulls
# for those that don't have.
create_table_sql = create_table_sql.format(
output_table=self.output().qualified_tablename,
child_parent_sql=self._CHILD_PARENT_SQL.format(
current_info_table=current_info_tablename,
current_geom_table=current_geom_table.tablename,
parent_info_table=parent_info_tablename,
parent_geom_table=parent_geom_table.tablename,
parent_geoid_field=parent_geoid_field,
geoid_field=self._geoid_field, inner_or_left='LEFT', ))
session.execute(create_table_sql)
inputs = list(zip(input_['parents_infos'], input_['parents_geoms']))
for i, parent_info_geom in enumerate(inputs[1:]):
# For those without parents, insert the next ones
parent_info_tablename = parent_info_geom[0].qualified_tablename
parent_geom_table = parent_info_geom[1].get(session)
fill_parents_sql = '''
INSERT INTO {output_table}
(child_id, child_level, parent_id, parent_level, weight)
{child_parent_sql}
INNER JOIN {output_table} ot ON ot.child_id = cit.geoid
AND ot.child_level = cit.level
WHERE ot.parent_id IS NULL
'''
insert_into_sql = fill_parents_sql.format(
output_table=self.output().qualified_tablename,
geoid_field=self._geoid_field,
child_parent_sql=self._CHILD_PARENT_SQL.format(
current_info_table=current_info_tablename,
current_geom_table=current_geom_table.tablename,
parent_info_table=parent_info_tablename,
parent_geom_table=parent_geom_table.tablename,
parent_geoid_field=self._parent_geoid_field(i + 1),
geoid_field=self._geoid_field,
inner_or_left='INNER'))
session.execute(insert_into_sql)
# ... and then, delete the rows with null parents for those
# child that have any parent
delete_non_orphans = '''
DELETE FROM {output_table} ot
WHERE ot.parent_id IS NULL
AND (child_id, child_level) IN (
SELECT child_id, child_level
FROM {output_table}
WHERE parent_id IS NOT NULL
)
'''
session.execute(delete_non_orphans.format(
output_table=self.output().qualified_tablename))
session.commit()
_CHILD_PARENT_SQL = '''
SELECT
cit.geoid AS child_id,
cit.level AS child_level,
pgt.{parent_geoid_field} AS parent_id,
pit.level AS parent_level,
1.0::FLOAT AS weight
FROM {current_info_table} cit
INNER JOIN observatory.{current_geom_table} cgt ON cit.geoid = cgt.{geoid_field}
{inner_or_left} JOIN observatory.{parent_geom_table} pgt
ON ST_Within(ST_PointOnSurface(cgt.the_geom), pgt.the_geom)
{inner_or_left} JOIN {parent_info_table} pit ON pgt.{parent_geoid_field} = pit.geoid
'''
class _YearGeographyTask:
year = IntParameter()
geography = Parameter()
class LevelInfo(TempTableTask, _YearGeographyTask):
def requires(self):
'''
Subclasses must override this and return a `TableTask` containing
a geoid field and a geoname field.
'''
raise NotImplementedError('LevelInfo must define requires()')
@property
def _geoid_field(self):
raise NotImplementedError('LevelInfo must define geoid_field()')
@property
def _geoname_field(self):
raise NotImplementedError('LevelInfo must define geoname_field()')
def run(self):
session = current_session()
input_table = self.input().get(session)
names_table = input_table.tablename
schema = self.output().schema
output_table = self.output().qualified_tablename
output_tablename = self.output().tablename
session.execute('CREATE SCHEMA IF NOT EXISTS "{}"'.format(schema))
query = '''
CREATE TABLE {output_table} AS
SELECT n.{geoid_field} geoid, '{geography}' as level, n.{geoname_field} geoname
FROM observatory.{names_table} n
'''.format(output_table=output_table,
geoid_field=self._geoid_field,
geoname_field=self._geoname_field,
geography=self.geography,
names_table=names_table)
session.execute(query)
query = '''
CREATE INDEX {output_tablename}_idx ON {output_table} (geoid)
'''.format(output_table=output_table,
output_tablename=output_tablename)
session.execute(query)
session.commit()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
__imagebackend_opts = [
cfg.StrOpt('libvirt_images_type',
default='default',
help='VM Images format. Acceptable values are: raw, qcow2, lvm,'
' default. If default is specified,'
' then use_cow_images flag is used instead of this one.'),
cfg.StrOpt('libvirt_images_volume_group',
default=None,
help='LVM Volume Group that is used for VM images, when you'
' specify libvirt_images_type=lvm.'),
cfg.BoolOpt('libvirt_sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.IntOpt('libvirt_lvm_snapshot_size',
default=1000,
help='The amount of storage (in megabytes) to allocate for LVM'
' snapshot copy-on-write blocks.'),
]
CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('preallocate_images', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
class Image(object):
__metaclass__ = abc.ABCMeta
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
:source_type: block or file
:driver_format: raw or qcow2
:is_block_dev:
"""
self.source_type = source_type
self.driver_format = driver_format
self.is_block_dev = is_block_dev
self.preallocate = False
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
"""Create image from template.
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
"""
pass
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
'disk_write_bytes_sec', 'disk_write_iops_sec',
'disk_total_bytes_sec', 'disk_total_iops_sec']
# Note(yaguang): Currently, the only tuning available is Block I/O
# throttling for qemu.
if self.source_type in ['file', 'block']:
for key, value in extra_specs.iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in tune_items:
setattr(info, scope[1], value)
return info
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
"""Creates image from template.
Ensures that template and image not already exists.
Ensures that base directory exists.
Synchronizes on template fetching.
:fetch_func: Function that creates the base image
Should accept `target` argument.
:filename: Name of the file in the image directory
:size: Size of created image in bytes (optional)
"""
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def call_if_not_exists(target, *args, **kwargs):
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
elif CONF.libvirt_images_type == "lvm" and \
'ephemeral_size' in kwargs:
fetch_func(target=target, *args, **kwargs)
base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
if not os.path.exists(self.path) or not os.path.exists(base):
self.create_image(call_if_not_exists, base, size,
*args, **kwargs)
if size and self.preallocate and self._can_fallocate():
utils.execute('fallocate', '-n', '-l', size, self.path)
def _can_fallocate(self):
"""Check once per class, whether fallocate(1) is available,
and that the instances directory supports fallocate(2).
"""
can_fallocate = getattr(self.__class__, 'can_fallocate', None)
if can_fallocate is None:
_out, err = utils.trycmd('fallocate', '-n', '-l', '1',
self.path + '.fallocate_test')
utils.delete_if_exists(self.path + '.fallocate_test')
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
LOG.error('Unable to preallocate_images=%s at path: %s' %
(CONF.preallocate_images, self.path))
return can_fallocate
def snapshot_create(self):
raise NotImplementedError
def snapshot_extract(self, target, out_format):
raise NotImplementedError
def snapshot_delete(self):
raise NotImplementedError
class Raw(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
self.correct_format()
def correct_format(self):
if os.path.exists(self.path):
data = images.qemu_img_info(self.path)
self.driver_format = data.file_format or 'raw'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_raw_image(base, target, size):
libvirt_utils.copy_image(base, target)
if size:
disk.extend(target, size)
generating = 'image_id' not in kwargs
if generating:
#Generating image in place
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
if not os.path.exists(self.path):
with utils.remove_path_on_error(self.path):
copy_raw_image(base, self.path, size)
self.correct_format()
def snapshot_create(self):
pass
def snapshot_extract(self, target, out_format):
images.convert_image(self.path, target, out_format)
def snapshot_delete(self):
pass
class Qcow2(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_qcow2_image(base, target, size):
# TODO(pbrady): Consider copying the cow image here
# with preallocation=metadata set for performance reasons.
# This would be keyed on a 'preallocate_images' setting.
libvirt_utils.create_cow_image(base, target)
if size:
disk.extend(target, size)
if not os.path.exists(base):
prepare_template(target=base, *args, **kwargs)
# NOTE(cfb): Having a flavor that sets the root size to 0 and having
# nova effectively ignore that size and use the size of the
# image is considered a feature at this time, not a bug.
if size and size < disk.get_disk_size(base):
LOG.error('%s virtual size larger than flavor root disk size %s' %
(base, size))
raise exception.ImageTooLarge()
if not os.path.exists(self.path):
with utils.remove_path_on_error(self.path):
copy_qcow2_image(base, self.path, size)
def snapshot_create(self):
libvirt_utils.create_snapshot(self.path, self.snapshot_name)
def snapshot_extract(self, target, out_format):
libvirt_utils.extract_snapshot(self.path, 'qcow2',
self.snapshot_name, target,
out_format)
def snapshot_delete(self):
libvirt_utils.delete_snapshot(self.path, self.snapshot_name)
class Lvm(Image):
@staticmethod
def escape(filename):
return filename.replace('_', '__')
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Lvm, self).__init__("block", "raw", is_block_dev=True)
if path:
info = libvirt_utils.logical_volume_info(path)
self.vg = info['VG']
self.lv = info['LV']
self.path = path
else:
if not CONF.libvirt_images_volume_group:
raise RuntimeError(_('You should specify'
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(disk_name))
self.path = os.path.join('/dev', self.vg, self.lv)
# TODO(pbrady): possibly deprecate libvirt_sparse_logical_volumes
# for the more general preallocate_images
self.sparse = CONF.libvirt_sparse_logical_volumes
self.preallocate = not self.sparse
if snapshot_name:
self.snapshot_name = snapshot_name
self.snapshot_path = os.path.join('/dev', self.vg,
self.snapshot_name)
def _can_fallocate(self):
return False
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def create_lvm_image(base, size):
base_size = disk.get_disk_size(base)
resize = size > base_size
size = size if resize else base_size
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
images.convert_image(base, self.path, 'raw', run_as_root=True)
if resize:
disk.resize2fs(self.path, run_as_root=True)
generated = 'ephemeral_size' in kwargs
#Generate images with specified size right on volume
if generated and size:
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
with self.remove_volume_on_error(self.path):
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
with self.remove_volume_on_error(self.path):
create_lvm_image(base, size)
@contextlib.contextmanager
def remove_volume_on_error(self, path):
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
def snapshot_create(self):
size = CONF.libvirt_lvm_snapshot_size
cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name,
self.path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
def snapshot_extract(self, target, out_format):
images.convert_image(self.snapshot_path, target, out_format,
run_as_root=True)
def snapshot_delete(self):
# NOTE (rmk): Snapshot volumes are automatically zeroed by LVM
cmd = ('lvremove', '-f', self.snapshot_path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
class Backend(object):
def __init__(self, use_cow):
self.BACKEND = {
'raw': Raw,
'qcow2': Qcow2,
'lvm': Lvm,
'default': Qcow2 if use_cow else Raw
}
def backend(self, image_type=None):
if not image_type:
image_type = CONF.libvirt_images_type
image = self.BACKEND.get(image_type)
if not image:
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
def image(self, instance, disk_name, image_type=None):
"""Constructs image for selected backend
:instance: Instance name.
:name: Image name.
:image_type: Image type.
Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
return backend(instance=instance, disk_name=disk_name)
def snapshot(self, disk_path, snapshot_name, image_type=None):
"""Returns snapshot for given image
:path: path to image
:snapshot_name: snapshot name
:image_type: type of image
"""
backend = self.backend(image_type)
return backend(path=disk_path, snapshot_name=snapshot_name)
|
|
"""
.. _tut_erp:
EEG processing and Event Related Potentials (ERPs)
==================================================
For a generic introduction to the computation of ERP and ERF
see :ref:`tut_epoching_and_averaging`.
.. contents:: Here we cover the specifics of EEG, namely:
:local:
:depth: 1
"""
import mne
from mne.datasets import sample
###############################################################################
# Setup for reading the raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# these data already have an EEG average reference
raw = mne.io.read_raw_fif(raw_fname, preload=True)
###############################################################################
# Let's restrict the data to the EEG channels
raw.pick_types(meg=False, eeg=True, eog=True)
###############################################################################
# By looking at the measurement info you will see that we have now
# 59 EEG channels and 1 EOG channel
print(raw.info)
###############################################################################
# In practice it's quite common to have some EEG channels that are actually
# EOG channels. To change a channel type you can use the
# :func:`mne.io.Raw.set_channel_types` method. For example
# to treat an EOG channel as EEG you can change its type using
raw.set_channel_types(mapping={'EOG 061': 'eeg'})
print(raw.info)
###############################################################################
# And to change the nameo of the EOG channel
raw.rename_channels(mapping={'EOG 061': 'EOG'})
###############################################################################
# Let's reset the EOG channel back to EOG type.
raw.set_channel_types(mapping={'EOG': 'eog'})
###############################################################################
# The EEG channels in the sample dataset already have locations.
# These locations are available in the 'loc' of each channel description.
# For the first channel we get
print(raw.info['chs'][0]['loc'])
###############################################################################
# And it's actually possible to plot the channel locations using
# :func:`mne.io.Raw.plot_sensors`.
raw.plot_sensors()
raw.plot_sensors('3d') # in 3D
###############################################################################
# Setting EEG Montage (using standard montages)
# ---------------------------------------------
#
# In the case where your data don't have locations you can set them
# using a :class:`mne.channels.Montage`. MNE comes with a set of default
# montages. To read one of them do:
montage = mne.channels.read_montage('standard_1020')
print(montage)
###############################################################################
# To apply a montage on your data use the ``set_montage`` method.
# function. Here don't actually call this function as our demo dataset
# already contains good EEG channel locations.
#
# Next we'll explore the definition of the reference.
###############################################################################
# Setting EEG reference
# ---------------------
#
# Let's first remove the reference from our Raw object.
#
# This explicitly prevents MNE from adding a default EEG average reference
# required for source localization.
raw_no_ref, _ = mne.set_eeg_reference(raw, [])
###############################################################################
# We next define Epochs and compute an ERP for the left auditory condition.
reject = dict(eeg=180e-6, eog=150e-6)
event_id, tmin, tmax = {'left/auditory': 1}, -0.2, 0.5
events = mne.read_events(event_fname)
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
evoked_no_ref = mne.Epochs(raw_no_ref, **epochs_params).average()
del raw_no_ref # save memory
title = 'EEG Original reference'
evoked_no_ref.plot(titles=dict(eeg=title), time_unit='s')
evoked_no_ref.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Average reference**: This is normally added by default, but can also
# be added explicitly.
raw.del_proj()
raw_car, _ = mne.set_eeg_reference(raw, 'average', projection=True)
evoked_car = mne.Epochs(raw_car, **epochs_params).average()
del raw_car # save memory
title = 'EEG Average reference'
evoked_car.plot(titles=dict(eeg=title), time_unit='s')
evoked_car.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Custom reference**: Use the mean of channels EEG 001 and EEG 002 as
# a reference
raw_custom, _ = mne.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
evoked_custom = mne.Epochs(raw_custom, **epochs_params).average()
del raw_custom # save memory
title = 'EEG Custom reference'
evoked_custom.plot(titles=dict(eeg=title), time_unit='s')
evoked_custom.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# Evoked arithmetic (e.g. differences)
# ------------------------------------
#
# Trial subsets from Epochs can be selected using 'tags' separated by '/'.
# Evoked objects support basic arithmetic.
# First, we create an Epochs object containing 4 conditions.
event_id = {'left/auditory': 1, 'right/auditory': 2,
'left/visual': 3, 'right/visual': 4}
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
epochs = mne.Epochs(raw, **epochs_params)
print(epochs)
###############################################################################
# Next, we create averages of stimulation-left vs stimulation-right trials.
# We can use basic arithmetic to, for example, construct and plot
# difference ERPs.
left, right = epochs["left"].average(), epochs["right"].average()
# create and plot difference ERP
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
mne.combine_evoked([left, -right], weights='equal').plot_joint(**joint_kwargs)
###############################################################################
# This is an equal-weighting difference. If you have imbalanced trial numbers,
# you could also consider either equalizing the number of events per
# condition (using
# :meth:`epochs.equalize_event_counts <mne.Epochs.equalize_event_counts>`).
# As an example, first, we create individual ERPs for each condition.
aud_l = epochs["auditory", "left"].average()
aud_r = epochs["auditory", "right"].average()
vis_l = epochs["visual", "left"].average()
vis_r = epochs["visual", "right"].average()
all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)
###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)
# Then, we construct and plot an unweighted average of left vs. right trials
# this way, too:
mne.combine_evoked(
[aud_l, -aud_r, vis_l, -vis_r], weights='equal').plot_joint(**joint_kwargs)
###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.
# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
mne.write_evokeds('/tmp/tmp-ave.fif', all_evokeds)
# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds['left/auditory'])
# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
all_evokeds[cond].plot_joint(title=cond, **joint_kwargs)
|
|
from __future__ import unicode_literals
from decimal import Decimal as D
import logging
from django.views.generic import RedirectView, View
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
from django.utils import six
from django.utils.translation import ugettext_lazy as _
import oscar
from oscar.apps.payment.exceptions import UnableToTakePayment
from oscar.core.exceptions import ModuleNotFoundError
from oscar.core.loading import get_class, get_model
from oscar.apps.shipping.methods import FixedPrice, NoShippingRequired
from paypal.express.facade import (
get_paypal_url, fetch_transaction_details, confirm_transaction)
from paypal.express.exceptions import (
EmptyBasketException, MissingShippingAddressException,
MissingShippingMethodException, InvalidBasket)
from paypal.exceptions import PayPalError
# Load views dynamically
PaymentDetailsView = get_class('checkout.views', 'PaymentDetailsView')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
ShippingAddress = get_model('order', 'ShippingAddress')
Country = get_model('address', 'Country')
Basket = get_model('basket', 'Basket')
Repository = get_class('shipping.repository', 'Repository')
Selector = get_class('partner.strategy', 'Selector')
Source = get_model('payment', 'Source')
SourceType = get_model('payment', 'SourceType')
try:
Applicator = get_class('offer.applicator', 'Applicator')
except ModuleNotFoundError:
# fallback for django-oscar<=1.1
Applicator = get_class('offer.utils', 'Applicator')
logger = logging.getLogger('paypal.express')
class RedirectView(CheckoutSessionMixin, RedirectView):
"""
Initiate the transaction with Paypal and redirect the user
to PayPal's Express Checkout to perform the transaction.
"""
permanent = False
# Setting to distinguish if the site has already collected a shipping
# address. This is False when redirecting to PayPal straight from the
# basket page but True when redirecting from checkout.
as_payment_method = False
def get_redirect_url(self, **kwargs):
try:
basket = self.request.basket
url = self._get_redirect_url(basket, **kwargs)
except PayPalError as ppe:
messages.error(
self.request, ppe.message)
if self.as_payment_method:
url = reverse('checkout:payment-details')
else:
url = reverse('basket:summary')
return url
except InvalidBasket as e:
messages.warning(self.request, six.text_type(e))
return reverse('basket:summary')
except EmptyBasketException:
messages.error(self.request, _("Your basket is empty"))
return reverse('basket:summary')
except MissingShippingAddressException:
messages.error(
self.request, _("A shipping address must be specified"))
return reverse('checkout:shipping-address')
except MissingShippingMethodException:
messages.error(
self.request, _("A shipping method must be specified"))
return reverse('checkout:shipping-method')
else:
# Transaction successfully registered with PayPal. Now freeze the
# basket so it can't be edited while the customer is on the PayPal
# site.
basket.freeze()
logger.info("Basket #%s - redirecting to %s", basket.id, url)
return url
def _get_redirect_url(self, basket, **kwargs):
if basket.is_empty:
raise EmptyBasketException()
params = {
'basket': basket,
'shipping_methods': [] # setup a default empty list
} # to support no_shipping
user = self.request.user
if self.as_payment_method:
if basket.is_shipping_required():
# Only check for shipping details if required.
shipping_addr = self.get_shipping_address(basket)
if not shipping_addr:
raise MissingShippingAddressException()
shipping_method = self.get_shipping_method(
basket, shipping_addr)
if not shipping_method:
raise MissingShippingMethodException()
params['shipping_address'] = shipping_addr
params['shipping_method'] = shipping_method
params['shipping_methods'] = []
else:
# Maik doubts that this code ever worked. Assigning
# shipping method instances to Paypal params
# isn't going to work, is it?
shipping_methods = Repository().get_shipping_methods(
user=user, basket=basket, request=self.request)
params['shipping_methods'] = shipping_methods
if settings.DEBUG:
# Determine the localserver's hostname to use when
# in testing mode
params['host'] = self.request.META['HTTP_HOST']
if user.is_authenticated():
params['user'] = user
params['paypal_params'] = self._get_paypal_params()
return get_paypal_url(**params)
def _get_paypal_params(self):
"""
Return any additional PayPal parameters
"""
return {}
class CancelResponseView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
basket = get_object_or_404(Basket, id=kwargs['basket_id'],
status=Basket.FROZEN)
basket.thaw()
logger.info("Payment cancelled (token %s) - basket #%s thawed",
request.GET.get('token', '<no token>'), basket.id)
return super(CancelResponseView, self).get(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
messages.error(self.request, _("PayPal transaction cancelled"))
return reverse('basket:summary')
# Upgrading notes: when we drop support for Oscar 0.6, this class can be
# refactored to pass variables around more explicitly (instead of assigning
# things to self so they are accessible in a later method).
class SuccessResponseView(PaymentDetailsView):
template_name_preview = 'paypal/express/preview.html'
preview = True
@property
def pre_conditions(self):
return []
def get(self, request, *args, **kwargs):
"""
Fetch details about the successful transaction from PayPal. We use
these details to show a preview of the order with a 'submit' button to
place it.
"""
try:
self.payer_id = request.GET['PayerID']
self.token = request.GET['token']
except KeyError:
# Manipulation - redirect to basket page with warning message
logger.warning("Missing GET params on success response page")
messages.error(
self.request,
_("Unable to determine PayPal transaction details"))
return HttpResponseRedirect(reverse('basket:summary'))
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError as e:
logger.warning(
"Unable to fetch transaction details for token %s: %s",
self.token, e)
messages.error(
self.request,
_("A problem occurred communicating with PayPal - please try again later"))
return HttpResponseRedirect(reverse('basket:summary'))
# Reload frozen basket which is specified in the URL
kwargs['basket'] = self.load_frozen_basket(kwargs['basket_id'])
if not kwargs['basket']:
logger.warning(
"Unable to load frozen basket with ID %s", kwargs['basket_id'])
messages.error(
self.request,
_("No basket was found that corresponds to your "
"PayPal transaction"))
return HttpResponseRedirect(reverse('basket:summary'))
logger.info(
"Basket #%s - showing preview with payer ID %s and token %s",
kwargs['basket'].id, self.payer_id, self.token)
return super(SuccessResponseView, self).get(request, *args, **kwargs)
def load_frozen_basket(self, basket_id):
# Lookup the frozen basket that this txn corresponds to
try:
basket = Basket.objects.get(id=basket_id, status=Basket.FROZEN)
except Basket.DoesNotExist:
return None
# Assign strategy to basket instance
if Selector:
basket.strategy = Selector().strategy(self.request)
# Re-apply any offers
Applicator().apply(request=self.request, basket=basket)
return basket
def get_context_data(self, **kwargs):
ctx = super(SuccessResponseView, self).get_context_data(**kwargs)
if not hasattr(self, 'payer_id'):
return ctx
# This context generation only runs when in preview mode
ctx.update({
'payer_id': self.payer_id,
'token': self.token,
'paypal_user_email': self.txn.value('EMAIL'),
'paypal_amount': D(self.txn.value('AMT')),
})
return ctx
def post(self, request, *args, **kwargs):
"""
Place an order.
We fetch the txn details again and then proceed with oscar's standard
payment details view for placing the order.
"""
error_msg = _(
"A problem occurred communicating with PayPal "
"- please try again later"
)
try:
self.payer_id = request.POST['payer_id']
self.token = request.POST['token']
except KeyError:
# Probably suspicious manipulation if we get here
messages.error(self.request, error_msg)
return HttpResponseRedirect(reverse('basket:summary'))
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError:
# Unable to fetch txn details from PayPal - we have to bail out
messages.error(self.request, error_msg)
return HttpResponseRedirect(reverse('basket:summary'))
# Reload frozen basket which is specified in the URL
basket = self.load_frozen_basket(kwargs['basket_id'])
if not basket:
messages.error(self.request, error_msg)
return HttpResponseRedirect(reverse('basket:summary'))
submission = self.build_submission(basket=basket)
return self.submit(**submission)
def build_submission(self, **kwargs):
submission = super(
SuccessResponseView, self).build_submission(**kwargs)
# Pass the user email so it can be stored with the order
submission['order_kwargs']['guest_email'] = self.txn.value('EMAIL')
# Pass PP params
submission['payment_kwargs']['payer_id'] = self.payer_id
submission['payment_kwargs']['token'] = self.token
submission['payment_kwargs']['txn'] = self.txn
return submission
def handle_payment(self, order_number, total, **kwargs):
"""
Complete payment with PayPal - this calls the 'DoExpressCheckout'
method to capture the money from the initial transaction.
"""
try:
confirm_txn = confirm_transaction(
kwargs['payer_id'], kwargs['token'], kwargs['txn'].amount,
kwargs['txn'].currency)
except PayPalError:
raise UnableToTakePayment()
if not confirm_txn.is_successful:
raise UnableToTakePayment()
# Record payment source and event
source_type, is_created = SourceType.objects.get_or_create(
name='PayPal')
source = Source(source_type=source_type,
currency=confirm_txn.currency,
amount_allocated=confirm_txn.amount,
amount_debited=confirm_txn.amount)
self.add_payment_source(source)
self.add_payment_event('Settled', confirm_txn.amount,
reference=confirm_txn.correlation_id)
def get_shipping_address(self, basket):
"""
Return a created shipping address instance, created using
the data returned by PayPal.
"""
# Determine names - PayPal uses a single field
ship_to_name = self.txn.value('PAYMENTREQUEST_0_SHIPTONAME')
if ship_to_name is None:
return None
first_name = last_name = None
parts = ship_to_name.split()
if len(parts) == 1:
last_name = ship_to_name
elif len(parts) > 1:
first_name = parts[0]
last_name = " ".join(parts[1:])
return ShippingAddress(
first_name=first_name,
last_name=last_name,
line1=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET'),
line2=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET2', default=""),
line4=self.txn.value('PAYMENTREQUEST_0_SHIPTOCITY', default=""),
state=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTATE', default=""),
postcode=self.txn.value('PAYMENTREQUEST_0_SHIPTOZIP', default=""),
country=Country.objects.get(iso_3166_1_a2=self.txn.value('PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE'))
)
def _get_shipping_method_by_name(self, name, basket, shipping_address=None):
methods = Repository().get_shipping_methods(
basket=basket, user=self.request.user,
shipping_addr=shipping_address, request=self.request)
for method in methods:
if method.name == name:
return method
def get_shipping_method(self, basket, shipping_address=None, **kwargs):
"""
Return the shipping method used
"""
if not basket.is_shipping_required():
return NoShippingRequired()
# Instantiate a new FixedPrice shipping method instance
charge_incl_tax = D(self.txn.value('PAYMENTREQUEST_0_SHIPPINGAMT'))
# Assume no tax for now
charge_excl_tax = charge_incl_tax
name = self.txn.value('SHIPPINGOPTIONNAME')
session_method = super(SuccessResponseView, self).get_shipping_method(
basket, shipping_address, **kwargs)
if not session_method or (name and name != session_method.name):
if name:
method = self._get_shipping_method_by_name(name, basket, shipping_address)
else:
method = None
if not method:
method = FixedPrice(charge_excl_tax, charge_incl_tax)
if session_method:
method.name = session_method.name
method.code = session_method.code
else:
method = session_method
return method
class ShippingOptionsView(View):
def post(self, request, *args, **kwargs):
"""
We use the shipping address given to use by PayPal to
determine the available shipping method
"""
# Basket ID is passed within the URL path. We need to do this as some
# shipping options depend on the user and basket contents. PayPal do
# pass back details of the basket contents but it would be royal pain to
# reconstitute the basket based on those - easier to just to piggy-back
# the basket ID in the callback URL.
basket = get_object_or_404(Basket, id=kwargs['basket_id'])
user = basket.owner
if not user:
user = AnonymousUser()
# Create a shipping address instance using the data passed back
country_code = self.request.POST.get(
'PAYMENTREQUEST_0_SHIPTOCOUNTRY', None)
try:
country = Country.objects.get(iso_3166_1_a2=country_code)
except Country.DoesNotExist:
country = Country()
shipping_address = ShippingAddress(
line1=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOSTREET', ''),
line2=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOSTREET2', ''),
line4=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOCITY', ''),
state=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOSTATE', ''),
postcode=self.request.POST.get('PAYMENTREQUEST_0_SHIPTOZIP', ''),
country=country
)
methods = Repository().get_shipping_methods(
basket=basket, shipping_addr=shipping_address,
request=self.request, user=user)
return self.render_to_response(methods, basket)
def render_to_response(self, methods, basket):
pairs = [
('METHOD', 'CallbackResponse'),
('CURRENCYCODE', self.request.POST.get('CURRENCYCODE', 'GBP')),
]
for index, method in enumerate(methods):
charge = method.calculate(basket).incl_tax
pairs.append(('L_SHIPPINGOPTIONNAME%d' % index,
six.text_type(method.name)))
pairs.append(('L_SHIPPINGOPTIONLABEL%d' % index,
six.text_type(method.name)))
pairs.append(('L_SHIPPINGOPTIONAMOUNT%d' % index, charge))
# For now, we assume tax and insurance to be zero
pairs.append(('L_TAXAMT%d' % index, D('0.00')))
pairs.append(('L_INSURANCEAMT%d' % index, D('0.00')))
# We assume that the first returned method is the default one
pairs.append(('L_SHIPPINGOPTIONISDEFAULT%d' % index, 1 if index == 0 else 0))
else:
# No shipping methods available - we flag this up to PayPal indicating that we
# do not ship to the shipping address.
pairs.append(('NO_SHIPPING_OPTION_DETAILS', 1))
payload = urlencode(pairs)
return HttpResponse(payload)
|
|
# -*- coding: utf-8 -*-
# extractDict4UI.py
'''
Created on 2011-3-15
@author: qqgit
'''
import os
# import re
import codecs
from subprocess import call
class ExtractDict(object):
'''
classdocs
'''
pass
def __init__(self, enFile = None, zhFile = None, extractedFile = None, native2ascii_exe = None):
'''
Initiate enFile name and zhFile name. enFile and zhFile should be
full file names with directory. Blank lines and comment lines
(starts with "#") are ignored.
'''
# print "I'm here!"
self.enFile = enFile
self.zhFile = zhFile
self.extractedFile = extractedFile
self.native2ascii_exe = native2ascii_exe
self.sortedListOfKeyValueEnZhDict = None
self.sortedListOfExtractedDict = None
self.sortedListOfEnDiffZhDict = None
self.sortedListOfZhDiffEnDict = None
def openAndRead(self,fName):
'''
Read "key1 = value1 \n, ...\n, keyN = valueN \n" format
dictionary(properties) file. fName should be
full file names with directory.
Parse the contents of properties file (i.e. enFile
and zhFile) to form a list. Blank lines and comment lines
(starts with "#") are ignored.
Return a list of [["key1","value1"],...,["keyi","valuei"]
if fName is not a .properties file, i.e. it is not I18N ASCII file,
translate it using Java native2ascii.exe
'''
if os.path.isfile(fName) == False: return "Not a file name!"
# parse absolute filename to path, name trunk and extension
pathName,fileName = os.path.split(fName)
fileNameTrunk, fileNameExt = os.path.splitext(fileName)
# if fName is not a .properties file, i.e. it is not I18N ASCII file,
# translate it using Java native2ascii.exe
# create a file with a name of os.path.join(pathName,fileNameTrunk+fileNameExt)
if fileNameExt.upper() != ".properties".upper():
fName = self.unicode2Ascii(fName)
try:
f = codecs.open(fName,mode="r")
try:
fLns = f.readlines()
finally:
f.close()
# parse DictLns to Python dict
# delete all the elements that represent blank lines from the list
fLnsClean = [i for i in fLns if i!=u'\r\n']
fLnsNoComment = [i.rstrip() for i in fLnsClean if i[0]!='#']
# split ["key1=value1",...,"keyi=valuei"] list to
# [["key1","value1"],...,["keyi","valuei"] format
fList = [k.split("=") for k in fLnsNoComment]
fList = [k for k in fList if len(k)==2]# make sure only one "=" in each line
return fList
except IOError:
pass
def unicode2Ascii(self,unicodeFileName):
"""
Convert a file from native to I18N ASCII file with the same file
name and a extension of .properties
"""
if self.native2ascii_exe != None:
native2ascii_Fun = self.native2ascii_exe
else:
if os.getenv("JAVA_HOME") != None:
native2ascii_Fun = os.getenv("JAVA_HOME") + os.sep + "bin" + os.sep + "native2ascii.exe"
else:
native2ascii_Fun = os.getcwd() + os.sep + "native2ascii.exe"
if os.path.isfile(native2ascii_Fun) == False:
native2ascii_Fun = "Returned because native2ascii_Fun is Not set!"
pathName,fileName = os.path.split(unicodeFileName)
fileNameTrunk, fileNameExt = os.path.splitext(fileName)
asciiFileName = os.path.join(pathName,fileNameTrunk+".properties")
call([native2ascii_Fun,unicodeFileName,asciiFileName])
return asciiFileName
def ascii2Unicode(self,asciiFileName):
"""
Convert a file from I18N ASCII to native file with the same file
name and a extension of .txt
"""
if self.native2ascii_exe != None:
native2ascii_Fun = self.native2ascii_exe
else:
if os.getenv("JAVA_HOME") != None:
native2ascii_Fun = os.getenv("JAVA_HOME") + os.sep + "bin" + os.sep + "native2ascii.exe"
else:
native2ascii_Fun = os.getcwd() + os.sep + "native2ascii.exe"
if os.path.isfile(native2ascii_Fun) == False:
native2ascii_Fun = "Returned because native2ascii_Fun is Not set!"
pathName,fileName = os.path.split(asciiFileName)
fileNameTrunk, fileNameExt = os.path.splitext(fileName)
unicodeFileName = os.path.join(pathName,fileNameTrunk+".txt")
call([native2ascii_Fun,"-reverse",asciiFileName,unicodeFileName])
return unicodeFileName
def extractDict(self):
if self.enFile == None or self.zhFile == None:
return "Two .properties files needed."
enList = self.openAndRead(self.enFile)
zhList = self.openAndRead(self.zhFile)
# strip blanks and "\t\r\n" in all keys and values
# and use keys and values to form dictionary
enDict = dict([(k[0].strip(),k[1].strip()) for k in enList])
zhDict = dict([(k[0].strip(),k[1].strip()) for k in zhList])
# merge two dictionaries, keep only the common keys
# in the form of {key1:[enVaule1,zhValue1], ..., keyN:[enValueN,zhValueN]}
dicts = enDict,zhDict
commonKeySet = set(enDict.keys()) & set(zhDict.keys())
enDiffZhKeySet = set(enDict.keys()) - set(zhDict.keys())
zhDiffEnKeySet = set(zhDict.keys()) - set(enDict.keys())
dictKeyValueEnZh = dict((k,[d.get(k) for d in dicts]) for k in commonKeySet)
enDiffZhDict = dict((k,enDict[k]) for k in enDiffZhKeySet)
zhDiffEnDict = dict((k,zhDict[k]) for k in zhDiffEnKeySet)
# make a dictionary with only values use English words as keys
# and eliminate duplicated keys
listValueEnZh = dictKeyValueEnZh.values()
dictValueEnZh = dict(listValueEnZh)
# Clean nonsense pairs in dict, i.e. keep only translated pairs
dictKeyValueEnZhCleaned = dict([[k,v] for k,v in \
dictKeyValueEnZh.items() if v[0]!=v[1]])
dictValueEnZhCleaned = dict([k for k in dictValueEnZh.items() if k[0]!=k[1]])
# sort dictionaries by keys
self.sortedListOfKeyValueEnZhDict = sorted(dictKeyValueEnZhCleaned.items(),key = lambda x:x[0])
self.sortedListOfExtractedDict = sorted(dictValueEnZhCleaned.items(), key = lambda x:x[0])
self.sortedListOfEnDiffZhDict = sorted(enDiffZhDict.items(), key = lambda x:x[0])
self.sortedListOfZhDiffEnDict = sorted(zhDiffEnDict.items(), key = lambda x:x[0])
def writeExtractedDict(self,fName=None, fName4UI=None):
if fName == None: fName = self.enFile
if os.path.isfile(fName) == False: return "Not a file name!"
if fName4UI == None: fName4UI = self.extractedFile
# parse absolute filename to path, name trunk and extension
pathName,fileName = os.path.split(fName)
fileNameTrunk, fileNameExt = os.path.splitext(fileName)
extractedDictFileName = os.path.join(pathName,fileNameTrunk+"_ExtractedDict.properties")
keyValueEnZhDictFileName = os.path.join(pathName,fileNameTrunk+"_KeyValueEnZhDict.properties")
EnDiffZhFileName = os.path.join(pathName,fileNameTrunk+"_EnDiffZh.properties")
ZhDiffEnFileName = os.path.join(pathName,fileNameTrunk+"_ZhDiffEn.properties")
# if sorted list of updated dict is exist, write it in "keyi = valuei" format
if self.sortedListOfKeyValueEnZhDict != None:
f = codecs.open(keyValueEnZhDictFileName,mode="w")
for i in self.sortedListOfKeyValueEnZhDict:
f.write(i[0]+'\t = \t'+i[1][0]+','+i[1][1]+'\n')
f.close()
self.ascii2Unicode(keyValueEnZhDictFileName)
if self.sortedListOfExtractedDict != None:
f = codecs.open(extractedDictFileName,mode="w")
if fName4UI != None:
f4UI = codecs.open(fName4UI,mode="w")
for i in self.sortedListOfExtractedDict:
f.write(i[0]+'\t = \t'+i[1]+'\n')
if fName4UI != None:
f4UI.write(i[0]+'\t = \t'+i[1]+'\n')
f.close()
if fName4UI != None:
f4UI.close()
self.ascii2Unicode(extractedDictFileName)
if self.sortedListOfEnDiffZhDict != None:
f = codecs.open(EnDiffZhFileName,mode="w")
for i in self.sortedListOfEnDiffZhDict:
f.write(i[0]+'\t = \t'+i[1]+'\n')
f.close()
if self.sortedListOfZhDiffEnDict != None:
f = codecs.open(ZhDiffEnFileName,mode="w")
for i in self.sortedListOfZhDiffEnDict:
f.write(i[0]+'\t = \t'+i[1]+'\n')
f.close()
self.ascii2Unicode(ZhDiffEnFileName)
if fName4UI != None:
return fName4UI
else:
return extractedDictFileName
def test(enFile = None, zhFile = None):
extractDict = ExtractDict(enFile,zhFile)
extractDict.extractDict()
extractDict.writeExtractedDict()
if __name__ == "__main__":
# #file and directories to be deal with
# print "I'm here!"
fPath = "D:\\workspace\\Python27\\myfirstpython\\files\\ApplicationResources"
# English and Chinese properties file used to prepare for extracting dictionary
fEnFile = os.path.join(fPath,"ApplicationResources.properties")
fZhFile = os.path.join(fPath,"ApplicationResources_zh.properties")
test(fEnFile,fZhFile)
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic EC2 Resource Tag / Filters and actions
These work for the whole family of resources associated
to ec2 (subnets, vpc, security-groups, volumes, instances,
snapshots).
"""
from concurrent.futures import as_completed
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
from c7n.actions import BaseAction as Action
from c7n.filters import Filter, OPERATORS
from c7n import utils
DEFAULT_TAG = "maid_status"
ACTIONS = [
'suspend', 'resume', 'terminate', 'stop', 'start',
'delete', 'deletion']
def register_tags(filters, actions):
filters.register('marked-for-op', TagActionFilter)
filters.register('tag-count', TagCountFilter)
actions.register('mark-for-op', TagDelayedAction)
actions.register('tag-trim', TagTrim)
actions.register('mark', Tag)
actions.register('tag', Tag)
actions.register('unmark', RemoveTag)
actions.register('untag', RemoveTag)
actions.register('remove-tag', RemoveTag)
class TagTrim(Action):
"""Automatically remove tags from an ec2 resource.
EC2 Resources have a limit of 10 tags, in order to make
additional tags space on a set of resources, this action can
be used to remove enough tags to make the desired amount of
space while preserving a given set of tags.
.. code-block :: yaml
- policies:
- name: ec2-tag-trim
comment: |
Any instances with 8 or more tags get tags removed until
they match the target tag count, in this case 7 so we
that we free up a tag slot for another usage.
resource: ec2
filters:
# Filter down to resources which already have 8 tags
# as we need space for 3 more, this also ensures that
# metrics reporting is correct for the policy.
type: value
key: "[length(Tags)][0]"
op: ge
value: 8
actions:
- type: tag-trim
space: 3
preserve:
- OwnerContact
- ASV
- CMDBEnvironment
- downtime
- custodian_status
"""
max_tag_count = 10
schema = utils.type_schema(
'tag-trim',
space={'type': 'integer'},
preserve={'type': 'array', 'items': {'type': 'string'}})
def process(self, resources):
self.id_key = self.manager.get_model().id
self.preserve = set(self.data.get('preserve'))
self.space = self.data.get('space', 3)
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_resource, resources))
def process_resource(self, i):
# Can't really go in batch parallel without some heuristics
# without some more complex matching wrt to grouping resources
# by common tags populations.
tag_map = {
t['Key']:t['Value'] for t in i.get('Tags', [])
if not t['Key'].startswith('aws:')}
# Space == 0 means remove all but specified
if self.space and len(tag_map) + self.space <= self.max_tag_count:
return
keys = set(tag_map)
preserve = self.preserve.intersection(keys)
candidates = keys - self.preserve
if self.space:
# Free up slots to fit
remove = len(candidates) - (
self.max_tag_count - (self.space + len(preserve)))
candidates = list(sorted(candidates))[:remove]
if not candidates:
self.log.warning(
"Could not find any candidates to trim %s" % i[self.id_key])
return
self.process_tag_removal(i, candidates)
def process_tag_removal(self, resource, tags):
client = utils.local_session(
self.manager.session_factory).client('ec2')
client.delete_tags(
Tags=[{'Key': c} for c in tags],
Resources=[resource[self.id_key]],
DryRun=self.manager.config.dryrun)
class TagActionFilter(Filter):
"""Filter resources for tag specified future action
Filters resources by a 'custodian_status' tag which specifies a future
date for an action.
The filter parses the tag values looking for an 'op@date'
string. The date is parsed and compared to do today's date, the
filter succeeds if today's date is gte to the target date.
The optional 'skew' parameter provides for incrementing today's
date a number of days into the future. An example use case might
be sending a final notice email a few days before terminating an
instance, or snapshotting a volume prior to deletion.
.. code-block :: yaml
- policies:
- name: ec2-stop-marked
resource: ec2
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
actions:
- stop
"""
schema = utils.type_schema(
'marked-for-op',
tag={'type': 'string'},
skew={'type': 'number', 'minimum': 0},
op={'enum': ACTIONS})
current_date = None
def __call__(self, i):
tag = self.data.get('tag', DEFAULT_TAG)
op = self.data.get('op', 'stop')
skew = self.data.get('skew', 0)
v = None
for n in i.get('Tags', ()):
if n['Key'] == tag:
v = n['Value']
break
if v is None:
return False
if not ':' in v or not '@' in v:
return False
msg, tgt = v.rsplit(':', 1)
action, action_date_str = tgt.strip().split('@', 1)
if action != op:
return False
try:
action_date = parse(action_date_str)
except:
self.log.warning("could not parse tag:%s value:%s on %s" % (
tag, v, i['InstanceId']))
if self.current_date is None:
self.current_date = datetime.now()
return self.current_date >= (action_date - timedelta(skew))
class TagCountFilter(Filter):
"""Simplify tag counting..
ie. these two blocks are equivalent
.. code-block :: yaml
- filters:
- type: value
key: "[length(Tags)][0]"
op: gte
value: 8
- filters:
- type: tag-count
value: 8
"""
schema = utils.type_schema(
'tag-count',
count={'type': 'integer', 'minimum': 0},
op={'enum': OPERATORS.keys()})
def __call__(self, i):
count = self.data.get('count', 10)
op_name = self.data.get('op', 'gte')
op = OPERATORS.get(op_name)
tag_count = len([
t['Key'] for t in i.get('Tags', [])
if not t['Key'].startswith('aws:')])
return op(tag_count, count)
class Tag(Action):
"""Tag an ec2 resource.
"""
batch_size = 150
concurrency = 2
schema = utils.type_schema(
'tag', aliases=('mark',),
tags={'type': 'object'},
key={'type': 'string'},
value={'type': 'string'},
)
def process(self, resources):
self.id_key = self.manager.get_model().id
# Legacy
msg = self.data.get('msg')
msg = self.data.get('value') or msg
tag = self.data.get('tag', DEFAULT_TAG)
tag = self.data.get('key') or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get('tags')
if tags is None:
tags = []
else:
tags = [{'Key': k, 'Value': v} for k, v in tags.items()]
if msg:
tags.append({'Key': tag, 'Value': msg})
batch_size = self.data.get('batch_size', self.batch_size)
with self.executor_factory(max_workers=self.concurrency) as w:
futures = {}
for resource_set in utils.chunks(resources, size=batch_size):
futures[
w.submit(
self.process_resource_set, resource_set, tags)
] = resource_set
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception removing tags: %s on resources:%s \n %s" % (
tags,
", ".join([r[self.id_key] for r in resource_set]),
f.exception()))
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('ec2')
client.create_tags(
Resources=[v[self.id_key] for v in resource_set],
Tags=tags,
DryRun=self.manager.config.dryrun)
class RemoveTag(Action):
"""Remove tags from ec2 resources.
"""
batch_size = 100
concurrency = 2
schema = utils.type_schema(
'untag', aliases=('unmark', 'remove-tag'),
tags={'type': 'array', 'items': {'type': 'string'}})
def process(self, resources):
self.id_key = self.manager.get_model().id
tags = self.data.get('tags', [DEFAULT_TAG])
batch_size = self.data.get('batch_size', self.batch_size)
with self.executor_factory(max_workers=self.concurrency) as w:
futures = {}
for resource_set in utils.chunks(resources, size=batch_size):
futures[
w.submit(
self.process_resource_set, resource_set, tags)
] = resource_set
for f in as_completed(futures):
if f.exception():
resource_set = futures[f]
self.log.error(
"Exception removing tags: %s on resources:%s \n %s" % (
tags,
", ".join([r[self.id_key] for r in resource_set]),
f.exception()))
def process_resource_set(self, vol_set, tag_keys):
client = utils.local_session(
self.manager.session_factory).client('ec2')
client.delete_tags(
Resources=[v[self.id_key] for v in vol_set],
Tags=[{'Key': k for k in tag_keys}],
DryRun=self.manager.config.dryrun)
class TagDelayedAction(Action):
"""Tag resources for future action.
.. code-block :: yaml
- policies:
- name: ec2-stop-marked
resource: ec2
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
actions:
- stop
"""
schema = utils.type_schema(
'mark-for-op',
tag={'type': 'string'},
msg={'type': 'string'},
days={'type': 'number', 'minimum': 0, 'exclusiveMinimum': True},
op={'enum': ACTIONS})
batch_size = 200
default_template = 'Resource does not meet policy: {op}@{action_date}'
def process(self, resources):
self.id_key = self.manager.get_model().id
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get('msg', self.default_template)
op = self.data.get('op', 'stop')
tag = self.data.get('tag', DEFAULT_TAG)
date = self.data.get('days', 4)
n = datetime.now(tz=tzutc())
action_date = n + timedelta(days=date)
msg = msg_tmpl.format(
op=op, action_date=action_date.strftime('%Y/%m/%d'))
self.log.info("Tagging %d resources for %s on %s" % (
len(resources), op, action_date.strftime('%Y/%m/%d')))
tags = [{'Key': tag, 'Value': msg}]
with self.executor_factory(max_workers=2) as w:
futures = []
for resource_set in utils.chunks(resources, size=self.batch_size):
futures.append(
w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception tagging resource set: %s \n %s" % (
tags, f.exception()))
def process_resource_set(self, resource_set, tags):
client = utils.local_session(self.manager.session_factory).client('ec2')
client.create_tags(
Resources=[v[self.id_key] for v in resource_set],
Tags=tags,
DryRun=self.manager.config.dryrun)
|
|
import pygame
from events import *
from GameModel import TILE_WIDTH, \
TILE_HEIGHT, \
NUM_TILES_WIDE, \
NUM_TILES_TALL
SCREEN_WIDTH = TILE_WIDTH * NUM_TILES_WIDE
SCREEN_HEIGHT = TILE_HEIGHT * NUM_TILES_TALL
#------------------------------------------------------------------------------
class PygameView:
# Be responsible for screen shifts!!!!
def __init__(self, evManager):
self.evManager = evManager
self.evManager.RegisterListener( self )
pygame.init()
self.window = pygame.display.set_mode( (SCREEN_WIDTH, SCREEN_HEIGHT) )
img_icon = pygame.image.load("images/img_icon.bmp")
pygame.display.set_icon(img_icon)
pygame.display.set_caption( 'Demo RPG' )
self.background = pygame.Surface( self.window.get_size() )
self.screenRect = self.background.get_rect()
self.background.fill( (0,0,255) ) # Blue
self.window.blit( self.background, (0,0) )
self.backSprites = pygame.sprite.RenderUpdates()
self.charSprites = pygame.sprite.RenderUpdates()
pygame.display.update()
#--------------------------------------------------------------------------
def Notify(self, event):
if isinstance( event, TickEvent ):
self.backSprites.clear( self.window, self.background )
self.charSprites.update()
dirtyRects1 = self.backSprites.draw( self.window )
dirtyRects2 = self.charSprites.draw( self.window )
dirtyRects = dirtyRects1 + dirtyRects2
pygame.display.update( dirtyRects )
elif isinstance( event, MapLoadedEvent ):
self.CreateMapTiles(event.map_screen)
elif isinstance( event, CharacterPlacementEvent ):
self.ShowCharacter(event.character)
#--------------------------------------------------------------------------
def CreateMapTiles(self, map_screen):
# delete exisiting sprites?
back_map = map_screen.struct_back_layer
tileset = map_screen.tileset_back_layer
file_path = "images/" + tileset
for row in range(len(back_map)):
for col in range(len(back_map[row])):
char_map = map_screen.get_dict_back_layer(row, col)
#char_map is a dictionary containing the 'item : value' entries
#of the sections defined by the character at position: row, col
#in the back_layer
tile_xcoord = int(char_map['tile_xcoord'])
tile_ycoord = int(char_map['tile_ycoord'])
image = self.GetTileImage(file_path, tile_xcoord, tile_ycoord)
newTile = TileSprite(image, row, col, self.backSprites)
#--------------------------------------------------------------------------
def GetTileImage(self, image_file, row, col):
image = pygame.image.load(image_file)
left = col * TILE_WIDTH
top = row * TILE_HEIGHT
rect = pygame.Rect(left, top, TILE_WIDTH, TILE_HEIGHT)
tile_image = image.subsurface(rect)
return tile_image
#--------------------------------------------------------------------------
def ShowCharacter(self, character):
characterSprite = CharacterSprite(character, character.masterImage, \
self.screenRect, self.charSprites)
characterSprite.rect.top = character.rect.top
characterSprite.rect.left = character.rect.left
#------------------------------------------------------------------------------
class TileSprite(pygame.sprite.Sprite):
def __init__(self, image, row, col, group = None):
pygame.sprite.Sprite.__init__(self, group)
self.image = image
self.rect = self.image.get_rect()
self.rect.top = row * TILE_HEIGHT
self.rect.left = col * TILE_WIDTH
#------------------------------------------------------------------------------
class CharacterSprite(pygame.sprite.DirtySprite):
#--------------------------------------------------------------------------
def __init__(self, character, image, screenRect, group = None):
pygame.sprite.Sprite.__init__(self, group)
self.character = character # From game model
self.masterImage = pygame.image.load(image)
self.masterImage.convert()
self.masterImage.set_colorkey( (255, 255, 255) )
self.screenRect = screenRect
self.downImages = []
self.upImages = []
self.leftImages = []
self.rightImages = []
self.imageDirection = []
self.frameSize = (TILE_WIDTH, TILE_HEIGHT)
self.frame = 0
# Variables for animation
self.pause = 0
#-----Create list of images for each direction-------------------------
for row in range(4):
for col in range(2):
if row == 0:
position = (col * self.frameSize[1], 0 )
image = self.masterImage.subsurface( position , self.frameSize )
self.downImages.append(image)
elif row == 1:
position = (col * self.frameSize[1], self.frameSize[0] )
image = self.masterImage.subsurface( position , self.frameSize )
self.upImages.append(image)
elif row == 2:
position = (col * self.frameSize[1], 2 * self.frameSize[0] )
image = self.masterImage.subsurface( position , self.frameSize )
self.leftImages.append(image)
elif row == 3:
position = (col * self.frameSize[1], 3 * self.frameSize[0] )
image = self.masterImage.subsurface( position , self.frameSize )
self.rightImages.append(image)
#----------------------------------------------------------------------
self.imageDirection = self.downImages
self.image = self.imageDirection[self.frame]
self.rect = self.image.get_rect()
#--------------------------------------------------------------------------
def update(self):
# Decide image directions
if self.character.directionFacing == 'up':
self.imageDirection = self.upImages
elif self.character.directionFacing == 'down':
self.imageDirection = self.downImages
elif self.character.directionFacing == 'left':
self.imageDirection = self.leftImages
elif self.character.directionFacing == 'right':
self.imageDirection = self.rightImages
# Remember sprite's previous position
oldRect = self.rect.copy()
# Get new position from character data
self.rect.top = self.character.rect.top
self.rect.left = self.character.rect.left
dx = self.character.speed[0]
dy = self.character.speed[1]
# Check collisions on each axis separately
if dx != 0:
self.CollideAxis(dx, 0)
if dy != 0:
self.CollideAxis(0, dy)
if self.rect != oldRect:
self.Animation()
else:
self.image = self.imageDirection[0]
def CollideAxis(self, dx, dy):
# If collision fix position
if self.screenRect.contains(self.rect) == False:
if dx > 0: # Moving right; Hit the left side of the wall
self.rect.right = self.screenRect.left
#self.character.location[0] = self.rect.left
if dx < 0: # Moving left; Hit the right side of the wall
self.rect.left = self.screenRect.right
#self.character.location[0] = self.rect.left
if dy > 0: # Moving down; Hit the top side of the wall
self.rect.bottom = self.screenRect.top
#self.character.location[1] = self.rect.top
if dy < 0: # Moving up; Hit the bottom side of the wall
self.rect.top = self.screenRect.bottom
#self.character.location[1] = self.rect.top
#--------------------------------------------------------------------------
def Animation(self):
# Determine image direction based on character speed:
if self.character.speed[1] > 0: # Moving down
if self.character.backMovementActive == False:
self.imageDirection = self.downImages
else:
self.imageDirection = self.upImages
elif self.character.speed[1] < 0: # Moving up
if self.character.backMovementActive == False:
self.imageDirection = self.upImages
else:
self.imageDirection = self.downImages
elif self.character.speed[0] < 0: # Moving left
if self.character.backMovementActive == False:
self.imageDirection = self.leftImages
else:
self.imageDirection = self.rightImages
elif self.character.speed[0] > 0: # Moving right
if self.character.backMovementActive == False:
self.imageDirection = self.rightImages
else:
self.imageDirection = self.leftImages
#----------------------------------------------------------------------
delay = 10
self.pause += 1
if self.pause >= delay:
self.pause = 0
self.frame += 1
if self.frame >= len(self.imageDirection):
self.frame = 0
self.image = self.imageDirection[self.frame]
|
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def _sigmoid(x):
xp = cuda.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _peephole(func, c, h, x):
xp = cuda.get_array_module(x)
with cuda.get_device_from_array(x):
lstm_in = x.dot(func.upward.W.data.T)
lstm_in += h.dot(func.lateral.W.data.T)
lstm_in = xp.reshape(lstm_in, (len(lstm_in),
lstm_in.shape[1] // 4,
4))
a, i, f, o = xp.split(lstm_in, 4, 2)
a = xp.reshape(a, (len(a), a.shape[1]))
i = xp.reshape(i, (len(i), i.shape[1]))
f = xp.reshape(f, (len(f), f.shape[1]))
o = xp.reshape(o, (len(o), o.shape[1]))
peep_in_i = c.dot(func.peep_i.W.data.T)
peep_in_f = c.dot(func.peep_f.W.data.T)
a = xp.tanh(a)
i = _sigmoid(i + peep_in_i)
f = _sigmoid(f + peep_in_f)
c_next = a * i + f * c
peep_in_o = c_next.dot(func.peep_o.W.data.T)
o = _sigmoid(o + peep_in_o)
y = o * xp.tanh(c_next)
return c_next, y
@testing.parameterize(
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
)
class TestPeephole(unittest.TestCase):
def setUp(self):
self.link = links.StatefulPeepholeLSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
peep_i = self.link.peep_i.W.data
peep_i[...] = numpy.random.uniform(-1, 1, peep_i.shape)
peep_f = self.link.peep_f.W.data
peep_f[...] = numpy.random.uniform(-1, 1, peep_f.shape)
peep_o = self.link.peep_o.W.data
peep_o[...] = numpy.random.uniform(-1, 1, peep_o.shape)
c_shape = (1, self.out_size)
h_shape = (1, self.out_size)
x_shape = (4, self.in_size)
gy_shape = (4, self.out_size)
self.c = numpy.zeros(c_shape).astype(numpy.float32)
self.h = numpy.zeros(h_shape).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
def _forward(self, link, x):
return link(x)
def check_forward(self, c_data, h_data, x_data):
x = chainer.Variable(x_data)
h1 = self.link(x)
c1_expect, h1_expect = _peephole(self.link, c_data, h_data, x_data)
testing.assert_allclose(h1.data, h1_expect)
testing.assert_allclose(self.link.c.data, c1_expect)
testing.assert_allclose(self.link.h.data, h1_expect)
h2 = self.link(x)
c2_expect, h2_expect = _peephole(self.link,
c1_expect, h1_expect, x_data)
testing.assert_allclose(h2.data, h2_expect)
testing.assert_allclose(self.link.c.data, c2_expect)
testing.assert_allclose(self.link.h.data, h2_expect)
def test_forward_cpu(self):
self.check_forward(self.c, self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
self.link.to_gpu()
c = cuda.to_gpu(self.c)
h = cuda.to_gpu(self.h)
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(c, h, x)
def check_backward(self, c_data, h_data, x_data, y_grad):
x = chainer.Variable(x_data)
y = self._forward(self.link, x)
y.grad = y_grad
y.backward()
def f():
c, y = _peephole(self.link, c_data, h_data, x_data)
return y,
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
testing.assert_allclose(gx, x.grad, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.c, self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
class TestPeepholeState(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulPeepholeLSTM(in_size, out_size)
def check_reset_state(self):
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
self.link.to_gpu()
self.check_reset_state()
class TestPeepholeToCPUToGPU(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulPeepholeLSTM(in_size, out_size)
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
def check_to_cpu(self, c, h):
self.link.c = c
self.link.h = h
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.check_to_cpu(self.c, self.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu(self.c, self.h)
def check_to_cpu_to_gpu(self, c, h):
self.link.c = c
self.link.h = h
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.check_to_cpu_to_gpu(self.c, self.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu_to_gpu(self.c, self.h)
testing.run_module(__name__, __file__)
|
|
import os
import os.path
import shutil
import subprocess
import time
import unittest
import tempfile
def my_check_output(*popenargs, **kwargs):
"""
If we had python 2.7, we should simply use subprocess.check_output.
This is a stop-gap solution for python 2.6
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stderr=subprocess.PIPE, stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise Exception("Exit code is not 0. It is %d. Command: %s" %
(retcode, cmd))
return output
def run_err_null(cmd):
return os.system(cmd + " 2>/dev/null ")
class LDBTestCase(unittest.TestCase):
def setUp(self):
self.TMP_DIR = tempfile.mkdtemp(prefix="ldb_test_")
self.DB_NAME = "testdb"
def tearDown(self):
assert(self.TMP_DIR.strip() != "/"
and self.TMP_DIR.strip() != "/tmp"
and self.TMP_DIR.strip() != "/tmp/") #Just some paranoia
shutil.rmtree(self.TMP_DIR)
def dbParam(self, dbName):
return "--db=%s" % os.path.join(self.TMP_DIR, dbName)
def assertRunOKFull(self, params, expectedOutput, unexpected=False):
"""
All command-line params must be specified.
Allows full flexibility in testing; for example: missing db param.
"""
output = my_check_output("./ldb %s |grep -v \"Created bg thread\"" %
params, shell=True)
if not unexpected:
self.assertEqual(output.strip(), expectedOutput.strip())
else:
self.assertNotEqual(output.strip(), expectedOutput.strip())
def assertRunFAILFull(self, params):
"""
All command-line params must be specified.
Allows full flexibility in testing; for example: missing db param.
"""
try:
my_check_output("./ldb %s >/dev/null 2>&1 |grep -v \"Created bg \
thread\"" % params, shell=True)
except Exception, e:
return
self.fail(
"Exception should have been raised for command with params: %s" %
params)
def assertRunOK(self, params, expectedOutput, unexpected=False):
"""
Uses the default test db.
"""
self.assertRunOKFull("%s %s" % (self.dbParam(self.DB_NAME), params),
expectedOutput, unexpected)
def assertRunFAIL(self, params):
"""
Uses the default test db.
"""
self.assertRunFAILFull("%s %s" % (self.dbParam(self.DB_NAME), params))
def testSimpleStringPutGet(self):
print "Running testSimpleStringPutGet..."
self.assertRunFAIL("put x1 y1")
self.assertRunOK("put --create_if_missing x1 y1", "OK")
self.assertRunOK("get x1", "y1")
self.assertRunFAIL("get x2")
self.assertRunOK("put x2 y2", "OK")
self.assertRunOK("get x1", "y1")
self.assertRunOK("get x2", "y2")
self.assertRunFAIL("get x3")
self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2")
self.assertRunOK("put x3 y3", "OK")
self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --from=x", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --to=x2", "x1 : y1")
self.assertRunOK("scan --from=x1 --to=z --max_keys=1", "x1 : y1")
self.assertRunOK("scan --from=x1 --to=z --max_keys=2",
"x1 : y1\nx2 : y2")
self.assertRunOK("scan --from=x1 --to=z --max_keys=3",
"x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --from=x1 --to=z --max_keys=4",
"x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --from=x1 --to=x2", "x1 : y1")
self.assertRunOK("scan --from=x2 --to=x4", "x2 : y2\nx3 : y3")
self.assertRunFAIL("scan --from=x4 --to=z") # No results => FAIL
self.assertRunFAIL("scan --from=x1 --to=z --max_keys=foo")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("delete x1", "OK")
self.assertRunOK("scan", "x2 : y2\nx3 : y3")
self.assertRunOK("delete NonExistentKey", "OK")
# It is weird that GET and SCAN raise exception for
# non-existent key, while delete does not
self.assertRunOK("checkconsistency", "OK")
def dumpDb(self, params, dumpFile):
return 0 == run_err_null("./ldb dump %s > %s" % (params, dumpFile))
def loadDb(self, params, dumpFile):
return 0 == run_err_null("cat %s | ./ldb load %s" % (dumpFile, params))
def testStringBatchPut(self):
print "Running testStringBatchPut..."
self.assertRunOK("batchput x1 y1 --create_if_missing", "OK")
self.assertRunOK("scan", "x1 : y1")
self.assertRunOK("batchput x2 y2 x3 y3 \"x4 abc\" \"y4 xyz\"", "OK")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 abc : y4 xyz")
self.assertRunFAIL("batchput")
self.assertRunFAIL("batchput k1")
self.assertRunFAIL("batchput k1 v1 k2")
def testCountDelimDump(self):
print "Running testCountDelimDump..."
self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
self.assertRunOK("dump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("dump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
self.assertRunOK("dump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
def testCountDelimIDump(self):
print "Running testCountDelimIDump..."
self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
self.assertRunOK("dump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("dump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
self.assertRunOK("dump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
def testInvalidCmdLines(self):
print "Running testInvalidCmdLines..."
# db not specified
self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
# No param called he
self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
# max_keys is not applicable for put
self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
# hex has invalid boolean value
def testHexPutGet(self):
print "Running testHexPutGet..."
self.assertRunOK("put a1 b1 --create_if_missing", "OK")
self.assertRunOK("scan", "a1 : b1")
self.assertRunOK("scan --hex", "0x6131 : 0x6231")
self.assertRunFAIL("put --hex 6132 6232")
self.assertRunOK("put --hex 0x6132 0x6232", "OK")
self.assertRunOK("scan --hex", "0x6131 : 0x6231\n0x6132 : 0x6232")
self.assertRunOK("scan", "a1 : b1\na2 : b2")
self.assertRunOK("get a1", "b1")
self.assertRunOK("get --hex 0x6131", "0x6231")
self.assertRunOK("get a2", "b2")
self.assertRunOK("get --hex 0x6132", "0x6232")
self.assertRunOK("get --key_hex 0x6132", "b2")
self.assertRunOK("get --key_hex --value_hex 0x6132", "0x6232")
self.assertRunOK("get --value_hex a2", "0x6232")
self.assertRunOK("scan --key_hex --value_hex",
"0x6131 : 0x6231\n0x6132 : 0x6232")
self.assertRunOK("scan --hex --from=0x6131 --to=0x6133",
"0x6131 : 0x6231\n0x6132 : 0x6232")
self.assertRunOK("scan --hex --from=0x6131 --to=0x6132",
"0x6131 : 0x6231")
self.assertRunOK("scan --key_hex", "0x6131 : b1\n0x6132 : b2")
self.assertRunOK("scan --value_hex", "a1 : 0x6231\na2 : 0x6232")
self.assertRunOK("batchput --hex 0x6133 0x6233 0x6134 0x6234", "OK")
self.assertRunOK("scan", "a1 : b1\na2 : b2\na3 : b3\na4 : b4")
self.assertRunOK("delete --hex 0x6133", "OK")
self.assertRunOK("scan", "a1 : b1\na2 : b2\na4 : b4")
self.assertRunOK("checkconsistency", "OK")
def testTtlPutGet(self):
print "Running testTtlPutGet..."
self.assertRunOK("put a1 b1 --ttl --create_if_missing", "OK")
self.assertRunOK("scan --hex", "0x6131 : 0x6231", True)
self.assertRunOK("dump --ttl ", "a1 ==> b1", True)
self.assertRunOK("dump --hex --ttl ",
"0x6131 ==> 0x6231\nKeys in range: 1")
self.assertRunOK("scan --hex --ttl", "0x6131 : 0x6231")
self.assertRunOK("get --value_hex a1", "0x6231", True)
self.assertRunOK("get --ttl a1", "b1")
self.assertRunOK("put a3 b3 --create_if_missing", "OK")
# fails because timstamp's length is greater than value's
self.assertRunFAIL("get --ttl a3")
self.assertRunOK("checkconsistency", "OK")
def testInvalidCmdLines(self):
print "Running testInvalidCmdLines..."
# db not specified
self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
# No param called he
self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
# max_keys is not applicable for put
self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
# hex has invalid boolean value
self.assertRunFAIL("put 0x6133 0x6233 --hex=Boo --create_if_missing")
def testDumpLoad(self):
print "Running testDumpLoad..."
self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
"OK")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
# Dump and load without any additional params specified
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump1")
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump and load in hex
dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump2")
self.assertTrue(self.dumpDb("--db=%s --hex" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --hex --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump only a portion of the key range
dumpFilePath = os.path.join(self.TMP_DIR, "dump3")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump3")
self.assertTrue(self.dumpDb(
"--db=%s --from=x1 --to=x3" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath, "x1 : y1\nx2 : y2")
# Dump upto max_keys rows
dumpFilePath = os.path.join(self.TMP_DIR, "dump4")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump4")
self.assertTrue(self.dumpDb(
"--db=%s --max_keys=3" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3")
# Load into an existing db, create_if_missing is not specified
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb("--db=%s" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump and load with WAL disabled
dumpFilePath = os.path.join(self.TMP_DIR, "dump5")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump5")
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --disable_wal --create_if_missing" % loadedDbPath,
dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump and load with lots of extra params specified
extraParams = " ".join(["--bloom_bits=14", "--block_size=1024",
"--auto_compaction=true",
"--write_buffer_size=4194304",
"--file_size=2097152"])
dumpFilePath = os.path.join(self.TMP_DIR, "dump6")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump6")
self.assertTrue(self.dumpDb(
"--db=%s %s" % (origDbPath, extraParams), dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s %s --create_if_missing" % (loadedDbPath, extraParams),
dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump with count_only
dumpFilePath = os.path.join(self.TMP_DIR, "dump7")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump7")
self.assertTrue(self.dumpDb(
"--db=%s --count_only" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
# DB should have atleast one value for scan to work
self.assertRunOKFull("put --db=%s k1 v1" % loadedDbPath, "OK")
self.assertRunOKFull("scan --db=%s" % loadedDbPath, "k1 : v1")
# Dump command fails because of typo in params
dumpFilePath = os.path.join(self.TMP_DIR, "dump8")
self.assertFalse(self.dumpDb(
"--db=%s --create_if_missing" % origDbPath, dumpFilePath))
def testMiscAdminTask(self):
print "Running testMiscAdminTask..."
# These tests need to be improved; for example with asserts about
# whether compaction or level reduction actually took place.
self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
"OK")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertTrue(0 == run_err_null(
"./ldb compact --db=%s" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb reduce_levels --db=%s --new_levels=2" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb reduce_levels --db=%s --new_levels=3" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb compact --db=%s --from=x1 --to=x3" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb compact --db=%s --hex --from=0x6131 --to=0x6134"
% origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
#TODO(dilip): Not sure what should be passed to WAL.Currently corrupted.
self.assertTrue(0 == run_err_null(
"./ldb dump_wal --db=%s --walfile=%s --header" % (
origDbPath, os.path.join(origDbPath, "LOG"))))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
def testCheckConsistency(self):
print "Running testCheckConsistency..."
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
self.assertRunOK("put x2 y2", "OK")
self.assertRunOK("get x1", "y1")
self.assertRunOK("checkconsistency", "OK")
sstFilePath = my_check_output("ls %s" % os.path.join(dbPath, "*.sst"),
shell=True)
# Modify the file
my_check_output("echo 'evil' > %s" % sstFilePath, shell=True)
self.assertRunFAIL("checkconsistency")
# Delete the file
my_check_output("rm -f %s" % sstFilePath, shell=True)
self.assertRunFAIL("checkconsistency")
def dumpLiveFiles(self, params, dumpFile):
return 0 == run_err_null("./ldb dump_live_files %s > %s" % (
params, dumpFile))
def testDumpLiveFiles(self):
print "Running testDumpLiveFiles..."
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
self.assertRunOK("put x2 y2", "OK")
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
self.assertRunOK("delete x1", "OK")
self.assertRunOK("put x3 y3", "OK")
dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
if __name__ == "__main__":
unittest.main()
|
|
import binascii
import hashlib
import math
import random
import re
_fmt_num_trials = 50
_mrpt_num_trials = 5 # number of bases to test
# RFC 3110
# hex 30 21 30 09 06 05 2B 0E 03 02 1A 05 00 04 14
ASN1_sha1_prefix = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
def _tohex(int_value):
encoded = format(int_value, 'x')
length = len(encoded)
encoded = encoded.zfill(length+length%2)
return encoded.decode('hex')
def bit_len(n):
return int(math.ceil(math.log(n, 2)))
def modexp(base, power, modulus):
return pow(base, power, modulus)
# primality tests
def is_prime_fermat(n):
"""
Fermat's Little Theorem primality test
"""
assert n >= 2
# special case 2
if n == 2:
return True
# ensure n is odd
if n % 2 == 0:
return False
for i in range(_fmt_num_trials):
a = random.randint(1, n-1)
if modexp(a, n-1, n) != 1:
return False
return True
def is_prime_mr(n):
"""
Miller-Rabin primality test.
"""
assert n >= 2
# special case 2
if n == 2:
return True
# ensure n is odd
if n % 2 == 0:
return False
# write n-1 as 2**s * d
# repeatedly try to divide n-1 by 2
s = 0
d = n-1
while True:
quotient, remainder = divmod(d, 2)
if remainder == 1:
break
s += 1
d = quotient
assert(2**s * d == n-1)
# test the base a to see whether it is a witness for the compositeness of n
def try_composite(a):
if pow(a, d, n) == 1:
return False
for i in range(s):
if pow(a, 2**i * d, n) == n-1:
return False
return True # n is definitely composite
for i in range(_mrpt_num_trials):
a = random.randrange(2, n)
if try_composite(a):
return False
return True # no base tested showed n as composite
# multiplicative inverse modular
def invmod(a, n):
"""
Modular multiplicative inverse using extended Euler
"""
assert n > 1
t, r = 0, n
new_t, new_r = 1, a
while new_r != 0:
q = r/new_r
t, new_t = new_t, t - q * new_t
r, new_r = new_r, r - q * new_r
if r > 1:
print "%d is not invertible" % a
return None
if t < 0:
t = t + n
return t
# Generate a prime
def gen_prime(bitlen):
while True:
guess = random.getrandbits(bitlen)
guess |= 1
guess |= 1 << (bitlen - 1)
#if is_prime_fermat(guess):
if is_prime_mr(guess):
return guess
# RSA
def rsa_keygen(bitlen=1024, e=3):
assert bitlen % 2 == 0
# generate two primes
# make sure e and phi would be coprime
p = None
while True:
p = gen_prime(bitlen/2)
if p % e != 1:
break
q = None
while True:
q = gen_prime(bitlen/2)
if q % e != 1:
break
n = p * q
#print "p = %d" % p
#print "q = %d" % q
# key gen
phi = (p - 1) * (q - 1)
#print "phi = %d" % phi
d = invmod(e, phi)
pub = (e, n)
priv = (d, n)
return (pub, priv)
def encrypt(pub, m):
(e, n) = pub
m = long(binascii.hexlify(m), 16)
assert bit_len(m) <= bit_len(n)
return modexp(m, e, n)
def decrypt(priv, c):
(d, n) = priv
m = modexp(c, d, n)
return _tohex(m)
def pkcs1_sign(priv, m):
d, n = priv
# message digest
h = hashlib.sha1(m).digest()
# padding
byte_len = bit_len(n)/8
npad = byte_len - 2 - 1 - len(ASN1_sha1_prefix + h)
t = '\x00\x01' + '\xff'*npad + '\x00' + ASN1_sha1_prefix + h
assert len(t) == byte_len
t = long(binascii.hexlify(t), 16)
print "t is ", hex(t)
return decrypt(priv, t)
def vuln_pkcs1_verify(pub, m, sig):
e, n = pub
# encrypt the sig
t = _tohex(encrypt(pub, sig))
# deal with truncation of \x00 from the beginning
# not 100% realistic, but whatever
t = binascii.hexlify('\x00' + t)
#print "t is ", repr(t)
h = hashlib.sha1(m).hexdigest()
# bad check
# just look for 00h 001h ffh ... 00 ASN.1_prefix hash
pattern = '^0001ff+00' + binascii.hexlify(ASN1_sha1_prefix) + h
if re.match(pattern, t):
return True
else:
return False
if __name__=="__main__":
bitlen = 32
print "Try to generate a prime: %d" % bitlen
print gen_prime(bitlen)
print ""
print "Testing PKCS#1.5 signatures"
pub, priv = rsa_keygen(1024)
m = "Hello World!!"
sig = pkcs1_sign(priv, m)
print "M: '%s'" % m
print "Sig: '%s'" % repr(sig)
print "Verifying ... ", vuln_pkcs1_verify(pub, m, sig)
|
|
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the network RPC API.
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from jacket.objects.compute import base as objects_base
from jacket import rpc
rpcapi_opts = [
cfg.StrOpt('network_topic',
default='network',
help='The topic network nodes listen on'),
cfg.BoolOpt('multi_host',
default=False,
help='Default value for multi_host in networks. Also, if set, '
'some rpc network calls will be sent directly to host.'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('network',
help='Set a version cap for messages sent to network services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class NetworkAPI(object):
'''Client side of the network rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds migrate_instance_[start|finish]
* 1.2 - Make migrate_instance_[start|finish] a little more flexible
* 1.3 - Adds fanout cast update_dns for multi_host networks
* 1.4 - Add get_backdoor_port()
* 1.5 - Adds associate
* 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
* 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
* 1.8 - Adds macs to allocate_for_instance
* 1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes
instance_uuid from allocate_for_instance and
instance_get_nw_info
... Grizzly supports message version 1.9. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.9.
* 1.10- Adds (optional) requested_networks to deallocate_for_instance
... Havana supports message version 1.10. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.10.
* NOTE: remove unused method get_vifs_by_instance()
* NOTE: remove unused method get_vif_by_mac_address()
* NOTE: remove unused method get_network()
* NOTE: remove unused method get_all_networks()
* 1.11 - Add instance to deallocate_for_instance().
Remove instance_id, project_id, and host.
* 1.12 - Add instance to deallocate_fixed_ip()
... Icehouse supports message version 1.12. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.12.
* 1.13 - Convert allocate_for_instance()
to use NetworkRequestList objects
... Juno and Kilo supports message version 1.13. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.13.
* NOTE: remove unused method get_floating_ips_by_fixed_address()
* NOTE: remove unused method get_instance_uuids_by_ip_filter()
* NOTE: remove unused method disassociate_network()
* NOTE: remove unused method get_fixed_ip()
* NOTE: remove unused method get_fixed_ip_by_address()
* NOTE: remove unused method get_floating_ip()
* NOTE: remove unused method get_floating_ip_pools()
* NOTE: remove unused method get_floating_ip_by_address()
* NOTE: remove unused method get_floating_ips_by_project()
* NOTE: remove unused method get_instance_id_by_floating_address()
* NOTE: remove unused method allocate_floating_ip()
* NOTE: remove unused method deallocate_floating_ip()
* NOTE: remove unused method associate_floating_ip()
* NOTE: remove unused method disassociate_floating_ip()
* NOTE: remove unused method associate()
* 1.14 - Add mac parameter to release_fixed_ip().
* 1.15 - Convert set_network_host() to use Network objects.
... Liberty supports message version 1.15. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.15.
* 1.16 - Transfer instance in addition to instance_id in
setup_networks_on_host
'''
VERSION_ALIASES = {
'grizzly': '1.9',
'havana': '1.10',
'icehouse': '1.12',
'juno': '1.13',
'kilo': '1.13',
'liberty': '1.15',
}
def __init__(self, topic=None):
super(NetworkAPI, self).__init__()
topic = topic or CONF.network_topic
target = messaging.Target(topic=topic, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.network,
CONF.upgrade_levels.network)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, version_cap, serializer)
# TODO(russellb): Convert this to named arguments. It's a pretty large
# list, so unwinding it all is probably best done in its own patch so it's
# easier to review.
def create_networks(self, ctxt, **kwargs):
return self.client.call(ctxt, 'create_networks', **kwargs)
def delete_network(self, ctxt, uuid, fixed_range):
return self.client.call(ctxt, 'delete_network',
uuid=uuid, fixed_range=fixed_range)
def allocate_for_instance(self, ctxt, instance_id, project_id, host,
rxtx_factor, vpn, requested_networks, macs=None,
dhcp_options=None):
version = '1.13'
if not self.client.can_send_version(version):
version = '1.9'
if requested_networks:
requested_networks = requested_networks.as_tuples()
if CONF.multi_host:
cctxt = self.client.prepare(version=version, server=host)
else:
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'allocate_for_instance',
instance_id=instance_id, project_id=project_id,
host=host, rxtx_factor=rxtx_factor, vpn=vpn,
requested_networks=requested_networks,
macs=jsonutils.to_primitive(macs))
def deallocate_for_instance(self, ctxt, instance, requested_networks=None):
cctxt = self.client
kwargs = {}
if self.client.can_send_version('1.11'):
version = '1.11'
kwargs['instance'] = instance
kwargs['requested_networks'] = requested_networks
else:
if self.client.can_send_version('1.10'):
version = '1.10'
kwargs['requested_networks'] = requested_networks
else:
version = '1.0'
kwargs['host'] = instance.host
kwargs['instance_id'] = instance.uuid
kwargs['project_id'] = instance.project_id
if CONF.multi_host:
cctxt = cctxt.prepare(server=instance.host, version=version)
return cctxt.call(ctxt, 'deallocate_for_instance', **kwargs)
def add_fixed_ip_to_instance(self, ctxt, instance_id, rxtx_factor,
host, network_id):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'add_fixed_ip_to_instance',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, network_id=network_id)
def remove_fixed_ip_from_instance(self, ctxt, instance_id, rxtx_factor,
host, address):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'remove_fixed_ip_from_instance',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, address=address)
def add_network_to_project(self, ctxt, project_id, network_uuid):
return self.client.call(ctxt, 'add_network_to_project',
project_id=project_id,
network_uuid=network_uuid)
def get_instance_nw_info(self, ctxt, instance_id, rxtx_factor, host,
project_id):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'get_instance_nw_info',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, project_id=project_id)
def validate_networks(self, ctxt, networks):
return self.client.call(ctxt, 'validate_networks', networks=networks)
def get_dns_domains(self, ctxt):
return self.client.call(ctxt, 'get_dns_domains')
def add_dns_entry(self, ctxt, address, name, dns_type, domain):
return self.client.call(ctxt, 'add_dns_entry',
address=address, name=name,
dns_type=dns_type, domain=domain)
def modify_dns_entry(self, ctxt, address, name, domain):
return self.client.call(ctxt, 'modify_dns_entry',
address=address, name=name, domain=domain)
def delete_dns_entry(self, ctxt, name, domain):
return self.client.call(ctxt, 'delete_dns_entry',
name=name, domain=domain)
def delete_dns_domain(self, ctxt, domain):
return self.client.call(ctxt, 'delete_dns_domain', domain=domain)
def get_dns_entries_by_address(self, ctxt, address, domain):
return self.client.call(ctxt, 'get_dns_entries_by_address',
address=address, domain=domain)
def get_dns_entries_by_name(self, ctxt, name, domain):
return self.client.call(ctxt, 'get_dns_entries_by_name',
name=name, domain=domain)
def create_private_dns_domain(self, ctxt, domain, av_zone):
return self.client.call(ctxt, 'create_private_dns_domain',
domain=domain, av_zone=av_zone)
def create_public_dns_domain(self, ctxt, domain, project):
return self.client.call(ctxt, 'create_public_dns_domain',
domain=domain, project=project)
def setup_networks_on_host(self, ctxt, instance_id, host, teardown,
instance):
# NOTE(tr3buchet): the call is just to wait for completion
version = '1.16'
kwargs = {}
if not self.client.can_send_version(version):
version = '1.0'
else:
kwargs['instance'] = instance
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'setup_networks_on_host',
instance_id=instance_id, host=host,
teardown=teardown, **kwargs)
def set_network_host(self, ctxt, network_ref):
version = '1.15'
if not self.client.can_send_version(version):
version = '1.0'
network_ref = objects_base.obj_to_primitive(network_ref)
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'set_network_host', network_ref=network_ref)
def rpc_setup_network_on_host(self, ctxt, network_id, teardown, host):
# NOTE(tr3buchet): the call is just to wait for completion
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'rpc_setup_network_on_host',
network_id=network_id, teardown=teardown)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _rpc_allocate_fixed_ip(self, ctxt, instance_id, network_id, address,
vpn, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, '_rpc_allocate_fixed_ip',
instance_id=instance_id, network_id=network_id,
address=address, vpn=vpn)
def deallocate_fixed_ip(self, ctxt, address, host, instance):
kwargs = {}
if self.client.can_send_version('1.12'):
version = '1.12'
kwargs['instance'] = instance
else:
version = '1.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'deallocate_fixed_ip',
address=address, host=host, **kwargs)
def update_dns(self, ctxt, network_ids):
cctxt = self.client.prepare(fanout=True, version='1.3')
cctxt.cast(ctxt, 'update_dns', network_ids=network_ids)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _associate_floating_ip(self, ctxt, floating_address, fixed_address,
interface, host, instance_uuid=None):
cctxt = self.client.prepare(server=host, version='1.6')
return cctxt.call(ctxt, '_associate_floating_ip',
floating_address=floating_address,
fixed_address=fixed_address,
interface=interface, instance_uuid=instance_uuid)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _disassociate_floating_ip(self, ctxt, address, interface, host,
instance_uuid=None):
cctxt = self.client.prepare(server=host, version='1.6')
return cctxt.call(ctxt, '_disassociate_floating_ip',
address=address, interface=interface,
instance_uuid=instance_uuid)
def lease_fixed_ip(self, ctxt, address, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'lease_fixed_ip', address=address)
def release_fixed_ip(self, ctxt, address, host, mac):
kwargs = {}
if self.client.can_send_version('1.14'):
version = '1.14'
kwargs['mac'] = mac
else:
version = '1.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'release_fixed_ip', address=address, **kwargs)
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
cctxt = self.client.prepare(server=host, version='1.2')
return cctxt.call(ctxt, 'migrate_instance_start',
instance_uuid=instance_uuid,
rxtx_factor=rxtx_factor,
project_id=project_id,
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses)
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
cctxt = self.client.prepare(server=host, version='1.2')
return cctxt.call(ctxt, 'migrate_instance_finish',
instance_uuid=instance_uuid,
rxtx_factor=rxtx_factor,
project_id=project_id,
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses)
|
|
import argparse
import json
import logging
import os
import re
from collections import OrderedDict
from typing import Optional, Dict, List
import gitlab
import requests
import sys
import xmltodict
from circleci.api import Api as circle_api
from slack import WebClient as SlackClient
from Tests.Marketplace.marketplace_constants import BucketUploadFlow
from Tests.Marketplace.marketplace_services import get_upload_data
from Tests.scripts.utils.log_util import install_logging
from demisto_sdk.commands.common.tools import str2bool, run_command
DEMISTO_GREY_ICON = 'https://3xqz5p387rui1hjtdv1up7lw-wpengine.netdna-ssl.com/wp-content/' \
'uploads/2018/07/Demisto-Icon-Dark.png'
ARTIFACTS_FOLDER = os.getenv('ARTIFACTS_FOLDER', './artifacts')
UNITTESTS_TYPE = 'unittests'
TEST_PLAYBOOK_TYPE = 'test_playbooks'
SDK_UNITTESTS_TYPE = 'sdk_unittests'
SDK_FAILED_STEPS_TYPE = 'sdk_failed_steps'
SDK_RUN_AGAINST_FAILED_STEPS_TYPE = 'sdk_run_against_failed_steps'
SDK_BUILD_TITLE = 'SDK Nightly Build'
SDK_XSOAR_BUILD_TITLE = 'Demisto SDK Nightly - Run Against Cortex XSOAR'
CONTENT_CHANNEL = 'dmst-content-team'
DMST_SDK_NIGHTLY_GITLAB_JOBS_PREFIX = 'demisto-sdk-nightly'
SDK_NIGHTLY_CIRCLE_OPTS = {
SDK_UNITTESTS_TYPE, SDK_FAILED_STEPS_TYPE, SDK_RUN_AGAINST_FAILED_STEPS_TYPE
}
CONTENT_REPO_ID_CIRCLE_CI = '60525392'
def get_failed_steps_list(build_number: str):
options = options_handler()
if options.gitlab_server:
return get_gitlab_failed_steps(options.ci_token, build_number, options.gitlab_server,
options.gitlab_project_id)
return get_circle_failed_steps(options.ci_token, build_number)
def get_circle_failed_steps(ci_token, build_number):
failed_steps_list = []
circle_client = circle_api(ci_token)
vcs_type = 'github'
build_report = circle_client.get_build_info(username='demisto', project='content', build_num=build_number,
vcs_type=vcs_type)
for step in build_report.get('steps', []):
step_name = step.get('name', '')
actions = step.get('actions', [])
for action in actions:
action_status = action.get('status', '')
if action_status and action_status == 'failed':
action_name = action.get('name', '')
if action_name != step_name:
failed_steps_list.append(f'{step_name}: {action_name}')
else:
failed_steps_list.append(f'{step_name}')
return failed_steps_list
def get_gitlab_failed_steps(ci_token, build_number, server_url, project_id):
failed_steps_list = []
gitlab_client = gitlab.Gitlab(server_url, private_token=ci_token)
project = gitlab_client.projects.get(int(project_id))
job = project.jobs.get(int(build_number))
logging.info(f'status of gitlab job with id {job.id} and name {job.name} is {job.status}')
if job.status == 'failed':
logging.info(f'collecting failed job {job.name}')
logging.info(f'pipeline associated with failed job is {job.pipeline.get("web_url")}')
failed_steps_list.append(f'{job.name}')
return failed_steps_list
def http_request(url, params_dict=None, verify=True, text=False):
res = requests.request("GET",
url,
verify=verify,
params=params_dict,
)
res.raise_for_status()
if text:
return res.text
return res.json()
def options_handler():
parser = argparse.ArgumentParser(description='Parser for slack_notifier args')
parser.add_argument('-n', '--nightly', type=str2bool, help='is nightly build?', required=True)
parser.add_argument('-u', '--url', help='The url of the current build', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-s', '--slack', help='The token for slack', required=True)
parser.add_argument('-c', '--ci_token', help='The token for circleci/gitlab', required=True)
parser.add_argument('-t', '--test_type', help='unittests or test_playbooks or sdk_unittests or sdk_failed_steps'
'or bucket_upload')
parser.add_argument('-f', '--env_results_file_name', help='The env results file containing the dns address')
parser.add_argument('-bu', '--bucket_upload', help='is bucket upload build?', required=True, type=str2bool)
parser.add_argument('-ca', '--ci_artifacts', help="The path to the ci artifacts directory")
parser.add_argument('-j', '--job_name', help='The job name that is running the slack notifier')
parser.add_argument('-ch', '--slack_channel', help='The slack channel in which to send the notification')
parser.add_argument('-g', '--gitlab_server', help='The gitlab server running the script, if left empty circleci '
'is assumed.')
parser.add_argument('-gp', '--gitlab_project_id', help='The gitlab project_id. Only needed if the script is ran '
'from gitlab.')
options = parser.parse_args()
return options
def get_artifact_data(artifact_relative_path: str) -> Optional[str]:
"""
Retrieves artifact data according to the artifact relative path from 'ARTIFACTS_FOLDER' given.
Args:
artifact_relative_path (str): Relative path of an artifact file.
Returns:
(Optional[str]): data of the artifact as str if exists, None otherwise.
"""
artifact_data = None
try:
file_name = os.path.join(ARTIFACTS_FOLDER, artifact_relative_path)
if os.path.isfile(file_name):
logging.info(f'Extracting {artifact_relative_path}')
with open(file_name, 'r') as file_data:
artifact_data = file_data.read()
else:
logging.info(f'Did not find {artifact_relative_path} file')
except Exception:
logging.exception(f'Error getting {artifact_relative_path} file')
return artifact_data
def get_entities_fields(entity_title: str, entities: List[str]) -> List[Dict]:
"""
Builds an entity from given entity title and entities list
Args:
entity_title (str): Title of the entity.
entities (List[str]): List of the entities.
Returns:
(List[Dict]): List of dict containing the entity. List is needed because it is the expected format by Slack API.
"""
return [{
"title": f'{entity_title}',
"value": '\n'.join(entities),
"short": False
}]
def get_failed_unit_tests_attachment(build_url: str, is_sdk_build: bool = False) -> List[Dict]:
"""
Returns the failed unit tests attachment to be reported in Slack.
Args:
build_url (str): Build URL of the given nightly.
is_sdk_build (bool): Whether build is SDK nightly or content nightly.
Returns:
(List[Dict]) Dict wrapped inside a list containing failed unit tests attachment.
"""
if artifact_data := get_artifact_data('failed_lint_report.txt'):
artifacts = artifact_data.split('\n')
unittests_fields: Optional[List[Dict]] = get_entities_fields(f'Failed Unittests - ({len(artifacts)})',
artifacts)
else:
unittests_fields = []
color: str = 'good' if not unittests_fields else 'danger'
build_type: str = 'SDK' if is_sdk_build else 'Content'
status = 'Success' if not unittests_fields else 'Failure'
title: str = f'{build_type} Nightly Unit Tests - {status}'
return [{
'fallback': title,
'color': color,
'title': title,
'title_link': build_url,
'fields': unittests_fields
}]
def get_coverage_color(coverage_percent: float) -> str:
"""
Returns color to represent coverage percent.
Args:
coverage_percent (float): Coverage percent.
Returns:
(str): Representing the color
"""
if coverage_percent <= 50.0:
return 'danger'
elif coverage_percent < 60.0:
return 'warning'
return 'good'
def get_coverage_attachment(build_number: str) -> Optional[Dict]:
"""
Returns content coverage report attachment.
Args:
build_number (str): Build number in CircleCI.
Returns:
(Dict): Attachment of the coverage if coverage report exists.
"""
xml_coverage_data: Optional[str] = get_artifact_data('coverage_report/coverage.xml')
if not xml_coverage_data:
return None
coverage_dict_data: OrderedDict = xmltodict.parse(xml_coverage_data)
if not (coverage_percent_str := coverage_dict_data.get('coverage', {}).get('@line-rate')):
logging.error('Line coverage rate was missing from coverage data.')
return None
try:
coverage_percent: float = float(coverage_percent_str) * 100.0
except ValueError:
logging.error(
f'Unexpected value for line coverage rage: {coverage_percent_str}. Expected float from line coverage rate.')
return None
coverage_url: str = f'https://{build_number}-{CONTENT_REPO_ID_CIRCLE_CI}-gh.circle-artifacts.com/0/artifacts' \
'/coverage_report/html/index.html'
return {
'fallback': f'Coverage Report Content: {coverage_percent:.2f}% Total Coverage',
'color': get_coverage_color(coverage_percent),
'title': f'Coverage Report Content: {coverage_percent:.2f}% Total Coverage',
'title_link': coverage_url,
'fields': []
}
def get_attachments_for_unit_test(build_url: str, build_number: str, is_sdk_build: bool = False) -> List[Dict]:
"""
Creates attachment for unit tests. Including failed unit tests attachment and coverage if exists.
Args:
build_url (str): Build URL.
build_number (str): Build number.
is_sdk_build (bool): Whether build is SDK build.
Returns:
(List[Dict]): List of attachments.
"""
unit_tests_attachments = get_failed_unit_tests_attachment(build_url, is_sdk_build)
if not is_sdk_build:
coverage_attachment = get_coverage_attachment(build_number)
if coverage_attachment:
unit_tests_attachments.append(coverage_attachment)
return unit_tests_attachments
def get_attachments_for_bucket_upload_flow(build_url, job_name, build_number, packs_results_file_path=None):
if failed_entities := get_failed_steps_list(build_number):
steps_fields = get_entities_fields(f'Failed Steps - ({len(failed_entities)})', failed_entities)
else:
steps_fields = []
color = 'good' if not steps_fields else 'danger'
title = f'{BucketUploadFlow.BUCKET_UPLOAD_BUILD_TITLE} - Success' if not steps_fields \
else f'{BucketUploadFlow.BUCKET_UPLOAD_BUILD_TITLE} - Failure'
if job_name and color == 'danger':
steps_fields = [{
"title": f'Job Failed: {job_name}',
"value": '',
"short": False
}] + steps_fields
if job_name and job_name in BucketUploadFlow.UPLOAD_JOB_NAMES:
successful_packs, failed_packs, successful_private_packs, _ = get_upload_data(
packs_results_file_path, BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
)
if successful_packs:
steps_fields += [{
"title": "Successful Packs:",
"value": "\n".join(sorted([pack_name for pack_name in {*successful_packs}], key=lambda s: s.lower())),
"short": False
}]
if failed_packs:
steps_fields += [{
"title": "Failed Packs:",
"value": "\n".join(sorted([pack_name for pack_name in {*failed_packs}], key=lambda s: s.lower())),
"short": False
}]
if successful_private_packs:
steps_fields += [{
"title": "Successful Private Packs:",
"value": "\n".join(sorted([pack_name for pack_name in {*successful_private_packs}],
key=lambda s: s.lower())),
"short": False
}]
if job_name and job_name not in BucketUploadFlow.UPLOAD_JOB_NAMES and color == 'good':
logging.info('On bucket upload flow we are not notifying on jobs that are not Upload Packs. exiting...')
sys.exit(0)
container_build_url = build_url + '#queue-placeholder/containers/0'
content_team_attachment = [{
'fallback': title,
'color': color,
'title': title,
'title_link': container_build_url,
'fields': steps_fields
}]
return content_team_attachment
def get_attachments_for_all_steps(build_url, build_title, build_number):
if failed_entities := get_failed_steps_list(build_number):
steps_fields = get_entities_fields(f'Failed Steps - ({len(failed_entities)})', failed_entities)
else:
steps_fields = []
color = 'good' if not steps_fields else 'danger'
title = f'{build_title} - Success' if not steps_fields else f'{build_title} - Failure'
container_build_url = build_url + '#queue-placeholder/containers/0'
content_team_attachment = [{
'fallback': title,
'color': color,
'title': title,
'title_link': container_build_url,
'fields': steps_fields
}]
return content_team_attachment
def get_attachments_for_test_playbooks(build_url, env_results_file_name):
if not env_results_file_name or not os.path.exists(env_results_file_name):
logging.critical('When running slack notifier for nightly build, provide env_results_file')
sys.exit(0)
with open(env_results_file_name, 'r') as env_results_file_content:
env_results = json.load(env_results_file_content)
role = env_results[0]['Role']
success_file_path = "./Tests/is_build_passed_{}.txt".format(role.replace(' ', ''))
content_team_fields, content_fields, _ = get_fields()
is_build_success = os.path.isfile(success_file_path)
color = 'good' if is_build_success else 'danger'
title = 'Content Nightly Build - Success' if is_build_success else 'Content Nightly Build - Failure'
content_team_attachment = [{
'fallback': title,
'color': color,
'title': title,
'title_link': build_url,
'fields': content_team_fields
}]
content_attachment = [{
'fallback': title,
'color': color,
'title': title,
'title_link': build_url,
'fields': content_fields
}]
return content_team_attachment, content_attachment
def get_fields():
failed_tests = []
# failed_tests.txt is copied into the artifacts directory
failed_tests_file_path = os.path.join(ARTIFACTS_FOLDER, 'failed_tests.txt')
if os.path.isfile(failed_tests_file_path):
logging.info('Extracting failed_tests')
with open(failed_tests_file_path, 'r') as failed_tests_file:
failed_tests = failed_tests_file.readlines()
failed_tests = [line.strip('\n') for line in failed_tests]
skipped_tests = []
if os.path.isfile('./Tests/skipped_tests.txt'):
logging.info('Extracting skipped_tests')
with open('./Tests/skipped_tests.txt', 'r') as skipped_tests_file:
skipped_tests = skipped_tests_file.readlines()
skipped_tests = [line.strip('\n') for line in skipped_tests]
skipped_integrations = []
if os.path.isfile('./Tests/skipped_integrations.txt'):
logging.info('Extracting skipped_integrations')
with open('./Tests/skipped_integrations.txt', 'r') as skipped_integrations_file:
skipped_integrations = skipped_integrations_file.readlines()
skipped_integrations = [line.strip('\n') for line in skipped_integrations]
content_team_fields = []
content_fields = []
if failed_tests:
field_failed_tests = {
"title": "Failed tests - ({})".format(len(failed_tests)),
"value": '\n'.join(failed_tests),
"short": False
}
content_team_fields.append(field_failed_tests)
content_fields.append(field_failed_tests)
if skipped_tests:
field_skipped_tests = {
"title": "Skipped tests - ({})".format(len(skipped_tests)),
"value": '',
"short": True
}
content_team_fields.append(field_skipped_tests)
if skipped_integrations:
field_skipped_integrations = {
"title": "Skipped integrations - ({})".format(len(skipped_integrations)),
"value": '',
"short": True
}
content_team_fields.append(field_skipped_integrations)
return content_team_fields, content_fields, failed_tests
def slack_notifier(build_url, slack_token, test_type, build_number, env_results_file_name=None, packs_results_file=None,
job_name="", slack_channel=CONTENT_CHANNEL, gitlab_server=None):
branches = run_command("git branch")
branch_name_reg = re.search(r'\* (.*)', branches)
branch_name = branch_name_reg.group(1) # type: ignore[union-attr]
if branch_name == 'master' or slack_channel.lower() != CONTENT_CHANNEL:
logging.info("Extracting build status")
if test_type == UNITTESTS_TYPE:
logging.info("Starting Slack notifications about nightly build - unit tests")
content_team_attachments = get_attachments_for_unit_test(build_url, build_number)
elif test_type == SDK_UNITTESTS_TYPE:
logging.info("Starting Slack notifications about SDK nightly build - unit tests")
content_team_attachments = get_attachments_for_unit_test(build_url, build_number, is_sdk_build=True)
elif test_type == 'test_playbooks':
logging.info("Starting Slack notifications about nightly build - tests playbook")
content_team_attachments, _ = get_attachments_for_test_playbooks(build_url, env_results_file_name)
elif test_type == SDK_FAILED_STEPS_TYPE:
logging.info('Starting Slack notifications about SDK nightly build - test playbook')
content_team_attachments = get_attachments_for_all_steps(build_url, SDK_BUILD_TITLE, build_number)
elif test_type == BucketUploadFlow.BUCKET_UPLOAD_TYPE:
logging.info('Starting Slack notifications about upload to production bucket build')
content_team_attachments = get_attachments_for_bucket_upload_flow(build_url, job_name, build_number,
packs_results_file)
elif test_type == SDK_RUN_AGAINST_FAILED_STEPS_TYPE:
logging.info("Starting Slack notifications about SDK nightly build - run against an xsoar instance")
content_team_attachments = get_attachments_for_all_steps(build_url, SDK_XSOAR_BUILD_TITLE, build_number)
elif job_name and test_type == job_name:
if job_name.startswith(DMST_SDK_NIGHTLY_GITLAB_JOBS_PREFIX):
# We run the various circleci sdk nightly builds in a single pipeline in GitLab
# as different jobs so it requires different handling
logging.info(f"Starting Slack notifications for {job_name}")
if 'unittest' in job_name:
content_team_attachments = get_attachments_for_unit_test(build_url, build_number, is_sdk_build=True)
# override the 'title' from the attachment to be the job name
content_team_attachments[0]['title'] = content_team_attachments[0]['title'].replace(
'SDK Nightly Unit Tests', job_name
)
else:
content_team_attachments = get_attachments_for_all_steps(build_url, job_name, build_number)
# override the 'fields' from the attachment since any failure will be the same as the job name
content_team_attachments[0]['fields'] = []
else:
raise NotImplementedError('The test_type parameter must be only \'test_playbooks\' or \'unittests\'')
logging.info(f'Content team attachments:\n{content_team_attachments}')
logging.info(f"Sending Slack messages to {slack_channel}")
slack_client = SlackClient(slack_token)
username = 'Content GitlabCI' if gitlab_server else 'Content CircleCI'
slack_client.api_call(
"chat.postMessage",
json={'channel': slack_channel,
'username': username,
'as_user': 'False',
'attachments': content_team_attachments}
)
def main():
install_logging('Slack_Notifier.log')
options = options_handler()
nightly = options.nightly
url = options.url
slack = options.slack
test_type = options.test_type
env_results_file_name = options.env_results_file_name
bucket_upload = options.bucket_upload
ci_artifacts_path = options.ci_artifacts
job_name = options.job_name
slack_channel = options.slack_channel or CONTENT_CHANNEL
gitlab_server = options.gitlab_server
build_number = options.buildNumber
if nightly:
slack_notifier(url, slack, test_type, build_number, env_results_file_name)
elif bucket_upload:
slack_notifier(url, slack, test_type, build_number,
packs_results_file=os.path.join(
ci_artifacts_path, BucketUploadFlow.PACKS_RESULTS_FILE), job_name=job_name,
slack_channel=slack_channel, gitlab_server=gitlab_server)
elif test_type in SDK_NIGHTLY_CIRCLE_OPTS or test_type == job_name:
slack_notifier(
url, slack, test_type, build_number, job_name=job_name,
slack_channel=slack_channel, gitlab_server=gitlab_server
)
else:
logging.error("Not nightly build, stopping Slack Notifications about Content build")
if __name__ == '__main__':
main()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for external_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# pylint: disable=g-import-not-at-top,unused-import
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
import mock
except ImportError:
try:
import unittest.mock as mock
except ImportError:
# At the moment TensorFlow does not have access to mock when in Python 2.7
# mode, although mock is part of the standard Python 3 library. If mock is
# not available, indicate this by assigning None to it.
mock = None
# pylint: enable=g-import-not-at-top,unused-import
class MockOptimizerInterface(tf.contrib.opt.ExternalOptimizerInterface):
NUM_STEP_CALLS = 5
NUM_LOSS_CALLS = 2
NUM_GRAD_CALLS = 3
def _minimize(self, initial_val, loss_func, loss_grad_func, step_callback,
optimizer_kwargs, **unused_kwargs):
"""Minimize (x - x0)**2 / 2 with respect to x."""
for _ in range(self.NUM_LOSS_CALLS):
loss_func(initial_val)
for _ in range(self.NUM_GRAD_CALLS - 1):
loss_grad_func(initial_val)
for _ in range(self.NUM_STEP_CALLS):
step_callback(initial_val)
return initial_val - loss_grad_func(initial_val)
class TestCase(tf.test.TestCase):
def assertAllClose(self, array1, array2):
array1 = np.asarray(array1)
array2 = np.asarray(array2)
if not array1.shape:
array1 = np.array([array1])
if not array2.shape:
array2 = np.array([array2])
super(TestCase, self).assertAllClose(array1, array2, rtol=1e-5, atol=1e-5)
def mock_import(self, module_name):
"""Causes importing a specific module to return a mock.MagicMock instance.
Usage:
with mock_import('scipy'):
import scipy # scipy is a MagicMock.
x = scipy.blah()[7] # x is also a MagicMock.
Args:
module_name: Name of module that should be mocked.
Returns:
A context manager for use in a with statement.
"""
orig_import = __import__
mocked_module = mock.MagicMock()
def import_mock(name, *args, **kwargs):
if name == module_name:
return mocked_module
return orig_import(name, *args, **kwargs)
return mock.patch.object(builtins, '__import__', side_effect=import_mock)
class ExternalOptimizerInterfaceTest(TestCase):
def test_optimize(self):
scalar = tf.Variable(tf.random_normal([]), 'scalar')
vector = tf.Variable(tf.random_normal([2]), 'vector')
matrix = tf.Variable(tf.random_normal([2, 3]), 'matrix')
minimum_location = tf.constant(np.arange(9), dtype=tf.float32)
loss = tf.reduce_sum(tf.square(vector - minimum_location[:2])) / 2.
loss += tf.reduce_sum(tf.square(scalar - minimum_location[2])) / 2.
loss += tf.reduce_sum(tf.square(
matrix - tf.reshape(minimum_location[3:], [2, 3]))) / 2.
optimizer = MockOptimizerInterface(loss)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
optimizer.minimize(sess)
self.assertAllClose(np.arange(2), sess.run(vector))
self.assertAllClose(np.arange(1) + 2, sess.run(scalar))
self.assertAllClose(np.arange(6).reshape(2, 3) + 3, sess.run(matrix))
def test_callbacks(self):
if mock is None:
# This test requires mock. See comment in imports section at top.
tf.logging.warning('This test requires mock and will not be run')
return
vector_val = np.array([7., -2.], dtype=np.float32)
vector = tf.Variable(vector_val, 'vector')
minimum_location_val = np.arange(2)
minimum_location = tf.constant(minimum_location_val, dtype=tf.float32)
loss = tf.reduce_sum(tf.square(vector - minimum_location)) / 2.
loss_val = ((vector_val - minimum_location_val)**2).sum() / 2.
optimizer = MockOptimizerInterface(loss)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
initial_vector_val = sess.run(vector)
extra_fetches = [loss]
step_callback = mock.Mock()
loss_callback = mock.Mock()
grad_callback = mock.Mock()
optimizer.minimize(
sess, fetches=extra_fetches, loss_callback=loss_callback,
grad_callback=grad_callback, step_callback=step_callback)
call = mock.call(loss_val)
loss_calls = [call] * MockOptimizerInterface.NUM_LOSS_CALLS
loss_callback.assert_has_calls(loss_calls)
grad_calls = [call] * MockOptimizerInterface.NUM_GRAD_CALLS
grad_callback.assert_has_calls(grad_calls)
args, _ = step_callback.call_args
self.assertAllClose(initial_vector_val, args[0])
class ScipyOptimizerInterfaceTest(TestCase):
def test_unconstrained(self):
if mock is None:
# This test requires mock. See comment in imports section at top.
tf.logging.warning('This test requires mock and will not be run')
return
vector_initial_value = [7., 7.]
vector = tf.Variable(vector_initial_value, 'vector')
# Make norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss)
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
with self.mock_import('scipy.optimize'):
import scipy.optimize # pylint: disable=g-import-not-at-top
# scipy.optimize is now a mock.MagicMock.
optimized_vector = np.array([1.23, -0.1])
scipy.optimize.minimize.return_value = {'x': optimized_vector}
optimizer.minimize(sess)
self.assertAllClose(optimized_vector, sess.run(vector))
self.assertEqual(1, len(scipy.optimize.minimize.mock_calls))
call_signature = scipy.optimize.minimize.mock_calls[0]
args = call_signature[1]
self.assertEqual(2, len(args))
self.assertTrue(callable(args[0]))
self.assertAllClose(vector_initial_value, args[1])
kwargs = call_signature[2]
self.assertEqual(4, len(kwargs))
self.assertEqual('L-BFGS-B', kwargs['method'])
self.assertTrue(callable(kwargs['jac']))
self.assertTrue(callable(kwargs['callback']))
self.assertEqual([], kwargs['constraints'])
def test_nonlinear_programming(self):
if mock is None:
# This test requires mock. See comment in imports section at top.
tf.logging.warning('This test requires mock and will not be run')
return
vector_initial_value = [7., 7.]
vector = tf.Variable(vector_initial_value, 'vector')
# Make norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
# Ensure y = 1.
equalities = [vector[1] - 1.]
# Ensure x >= 1. Thus optimum should be at (1, 1).
inequalities = [vector[0] - 1.]
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities,
method='SLSQP')
with self.test_session() as sess:
sess.run(tf.initialize_all_variables())
with self.mock_import('scipy.optimize'):
import scipy.optimize # pylint: disable=g-import-not-at-top
# scipy.optimize is now a mock.MagicMock.
optimized_vector = np.array([1.23, -0.1])
scipy.optimize.minimize.return_value = {'x': optimized_vector}
optimizer.minimize(sess)
self.assertAllClose(optimized_vector, sess.run(vector))
self.assertEqual(1, len(scipy.optimize.minimize.mock_calls))
call_signature = scipy.optimize.minimize.mock_calls[0]
args = call_signature[1]
self.assertEqual(2, len(args))
self.assertTrue(callable(args[0]))
self.assertAllClose(vector_initial_value, args[1])
kwargs = call_signature[2]
self.assertEqual(3, len(kwargs))
self.assertEqual('SLSQP', kwargs['method'])
self.assertTrue(callable(kwargs['jac']))
# No callback keyword arg since SLSQP doesn't support it.
constraints = kwargs['constraints']
self.assertEqual(2, len(constraints))
eq_constraint = constraints[0]
self.assertEqual(3, len(eq_constraint))
self.assertEqual('eq', eq_constraint['type'])
self.assertTrue(callable(eq_constraint['fun']))
self.assertTrue(callable(eq_constraint['jac']))
ineq_constraint = constraints[1]
self.assertEqual(3, len(ineq_constraint))
self.assertEqual('ineq', ineq_constraint['type'])
self.assertTrue(callable(ineq_constraint['fun']))
self.assertTrue(callable(ineq_constraint['jac']))
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright (c) 2014 OpenStack Foundation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For IronicHostManager
"""
import mock
from oslo.serialization import jsonutils
from nova import db
from nova import exception
from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import ironic_host_manager
from nova import test
from nova.tests.unit.scheduler import ironic_fakes
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class IronicHostManagerTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
def setUp(self):
super(IronicHostManagerTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
def test_manager_public_api_signatures(self):
self.assertPublicAPISignatures(host_manager.HostManager(),
self.host_manager)
def test_state_public_api_signatures(self):
self.assertPublicAPISignatures(
host_manager.HostState("dummy",
"dummy"),
ironic_host_manager.IronicNodeState("dummy",
"dummy")
)
def test_get_all_host_states(self):
# Ensure .service is set and we have the values we expect to.
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
for i in range(4):
compute_node = ironic_fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
node = compute_node['hypervisor_hostname']
state_key = (host, node)
self.assertEqual(compute_node['service'],
host_states_map[state_key].service)
self.assertEqual(jsonutils.loads(compute_node['stats']),
host_states_map[state_key].stats)
self.assertEqual(compute_node['free_ram_mb'],
host_states_map[state_key].free_ram_mb)
self.assertEqual(compute_node['free_disk_gb'] * 1024,
host_states_map[state_key].free_disk_mb)
class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
def setUp(self):
super(IronicHostManagerChangedNodesTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
ironic_driver = "nova.virt.ironic.driver.IronicDriver"
supported_instances = '[["i386", "baremetal", "baremetal"]]'
self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
stats=jsonutils.dumps(dict(
ironic_driver=ironic_driver,
cpu_arch='i386')),
supported_instances=supported_instances,
free_disk_gb=10, free_ram_mb=1024,
hypervisor_type='ironic',
hypervisor_version = 1,
hypervisor_hostname = 'fake_host')
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):
init_mock.return_value = None
compute = {'hypervisor_type': 'ironic'}
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_non_ironic_host_state(self, init_mock):
init_mock.return_value = None
compute = {'cpu_info': 'other cpu'}
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(host_manager.HostState, type(host_state))
def test_get_all_host_states_after_delete_one(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
# all nodes active for first call
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
# remove node4 for second call
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4uuid']
db.compute_node_get_all(context).AndReturn(running_nodes)
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(3, len(host_states_map))
def test_get_all_host_states_after_delete_all(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
# all nodes active for first call
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
# remove all nodes for second call
db.compute_node_get_all(context).AndReturn([])
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(0, len(host_states_map))
def test_update_from_compute_node(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
self.assertEqual(1024, host.free_ram_mb)
self.assertEqual(1024, host.total_usable_ram_mb)
self.assertEqual(10240, host.free_disk_mb)
self.assertEqual(1, host.vcpus_total)
self.assertEqual(0, host.vcpus_used)
self.assertEqual(jsonutils.loads(self.compute_node['stats']),
host.stats)
self.assertEqual('ironic', host.hypervisor_type)
self.assertEqual(1, host.hypervisor_version)
self.assertEqual('fake_host', host.hypervisor_hostname)
def test_consume_identical_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
def test_consume_larger_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
def test_consume_smaller_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
class IronicHostManagerTestFilters(test.NoDBTestCase):
"""Test filters work for IronicHostManager."""
def setUp(self):
super(IronicHostManagerTestFilters, self).setUp()
self.flags(scheduler_available_filters=['%s.%s' % (__name__, cls) for
cls in ['FakeFilterClass1',
'FakeFilterClass2']])
self.host_manager = ironic_host_manager.IronicHostManager()
self.fake_hosts = [ironic_host_manager.IronicNodeState(
'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
self.fake_hosts += [ironic_host_manager.IronicNodeState(
'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
# Test we returns 1 correct function
host_filters = self.host_manager._choose_host_filters(None)
self.assertEqual(1, len(host_filters))
self.assertEqual('FakeFilterClass2',
host_filters[0].__class__.__name__)
def _mock_get_filtered_hosts(self, info, specified_filters=None):
self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
self.host_manager._choose_host_filters(specified_filters).AndReturn(
[FakeFilterClass1()])
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
if filters:
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_specified_filters(self):
fake_properties = {'moo': 1, 'cow': 2}
specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info, specified_filters)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties, filter_class_names=specified_filters)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
|
|
#!/usr/bin/env python
# encoding: utf-8
import os
import numpy as np
from flask_classful import route
from flask import jsonify, request, Response
import json
from marvin import config
from marvin.api.base import BaseView, arg_validate as av
from marvin.core.exceptions import MarvinError
from marvin.utils.general import parseIdentifier, mangaid2plateifu
from marvin.tools.cube import Cube
from brain.core.exceptions import BrainError
try:
from sdss_access.path import Path
except ImportError:
Path = None
''' stuff that runs server-side '''
def _getCube(name, use_file=False, release=None, **kwargs):
''' Retrieve a cube using marvin tools '''
drpver, __ = config.lookUpVersions(release)
cube = None
results = {}
# parse name into either mangaid or plateifu
try:
idtype = parseIdentifier(name)
except Exception as ee:
results['error'] = 'Failed to parse input name {0}: {1}'.format(name, str(ee))
return cube, results
filename = None
plateifu = None
mangaid = None
try:
if use_file:
if idtype == 'mangaid':
plate, ifu = mangaid2plateifu(name, drpver=drpver)
elif idtype == 'plateifu':
plate, ifu = name.split('-')
if Path is not None:
filename = Path().full('mangacube', ifu=ifu, plate=plate, drpver=drpver)
assert os.path.exists(filename), 'file not found.'
else:
raise MarvinError('cannot create path for MaNGA cube.')
else:
if idtype == 'plateifu':
plateifu = name
elif idtype == 'mangaid':
mangaid = name
else:
raise MarvinError('invalid plateifu or mangaid: {0}'.format(idtype))
cube = Cube(filename=filename, mangaid=mangaid, plateifu=plateifu,
mode='local', release=release)
results['status'] = 1
except Exception as ee:
results['error'] = 'Failed to retrieve cube {0}: {1}'.format(name, str(ee))
return cube, results
class CubeView(BaseView):
''' Class describing API calls related to MaNGA Cubes '''
route_base = '/cubes/'
# decorators = [av.check_args()]
def index(self):
'''Returns general cube info
.. :quickref: Cube; Get general cube info
:query string release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: data message
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin/api/cubes/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": "this is a cube!"
}
'''
av.manual_parse(self, request)
self.results['status'] = 1
self.results['data'] = 'this is a cube!'
return jsonify(self.results)
@route('/<name>/', methods=['GET', 'POST'], endpoint='getCube')
@av.check_args()
def get(self, args, name):
'''Returns the necessary information to instantiate a cube for a given plateifu.
.. :quickref: Cube; Get a cube given a plate-ifu or mangaid
:param name: The name of the cube as plate-ifu or mangaid
:form release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:json string plateifu: id of cube
:json string mangaid: mangaid of cube
:json float ra: RA of cube
:json float dec: Dec of cube
:json string header: the cube header as a string
:json float redshift: the cube redshift
:json list wavelength: the cube wavelength array
:json string wcs_header: the cube wcs_header as a string
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin/api/cubes/8485-1901/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {"plateifu": "8485-1901",
"mangaid": "1-209232",
"ra": 232.544703894,
"dec": 48.6902009334,
"header": "XTENSION= 'IMAGE', NAXIS=3, .... END",
"wcs_header": "WCSAXES = 3 / Number of coordindate axes .... END",
"redshift": 0.0407447,
"wavelength": [3621.6, 3622.43,...,10353.8]
}
}
'''
# Pop any args we don't want going into Cube
args = self._pop_args(args, arglist='name')
cube, res = _getCube(name, **args)
self.update_results(res)
if cube:
try:
nsa_data = cube.nsa
except (MarvinError, BrainError):
nsa_data = None
wavelength = (cube._wavelength.tolist() if isinstance(cube._wavelength, np.ndarray)
else cube._wavelength)
self.results['data'] = {'plateifu': name,
'mangaid': cube.mangaid,
'ra': cube.ra,
'dec': cube.dec,
'header': cube.header.tostring(),
'redshift': nsa_data.z if nsa_data else -9999,
'wavelength': wavelength,
'wcs_header': cube.wcs.to_header_string(),
'shape': cube._shape}
return jsonify(self.results)
@route('/<name>/extensions/<cube_extension>/', methods=['GET', 'POST'],
endpoint='getExtension')
@av.check_args()
def getExtension(self, args, name, cube_extension):
"""Returns the extension for a cube given a plateifu/mangaid.
.. :quickref: Cube; Gets a specified cube extension for a given plate-ifu or mangaid
:param name: The name of the cube as plate-ifu or mangaid
:param cube_extension: The name of the cube extension. Either flux, ivar, or mask.
:form release: the release of MaNGA
:form use_file: if True, forces to load the cube from a file.
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:json string cube_extension: the data for the specified extension
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin/api/cubes/8485-1901/extensions/flux/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {"extension_data": [[0,0,..0], [], ... [0, 0, 0,... 0]]
}
}
"""
# Pass the args in and get the cube
args = self._pop_args(args, arglist=['name', 'cube_extension'])
cube, res = _getCube(name, use_file=True, **args)
self.update_results(res)
if cube:
extension_data = cube.data[cube_extension.upper()].data
if extension_data is None:
self.results['data'] = {'extension_data': None}
else:
self.results['data'] = {'extension_data': extension_data.tolist()}
return Response(json.dumps(self.results), mimetype='application/json')
@route('/<name>/quantities/<x>/<y>/', methods=['GET', 'POST'],
endpoint='getCubeQuantitiesSpaxel')
@av.check_args()
def getCubeQuantitiesSpaxel(self, args, name, x, y):
"""Returns a dictionary with all the quantities.
.. :quickref: Cube; Returns a dictionary with all the quantities
:param name: The name of the cube as plate-ifu or mangaid
:param x: The x coordinate of the spaxel (origin is ``lower``)
:param y: The y coordinate of the spaxel (origin is ``lower``)
:form release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin/api/cubes/8485-1901/quantities/10/12/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {"flux": {"value": [0,0,..0], "ivar": ...},
"specres": ...}
}
}
"""
# Pass the args in and get the cube
args = self._pop_args(args, arglist=['name', 'x', 'y'])
cube, res = _getCube(name, **args)
self.update_results(res)
if cube:
self.results['data'] = {}
spaxel_quantities = cube._get_spaxel_quantities(x, y)
for quant in spaxel_quantities:
spectrum = spaxel_quantities[quant]
if spectrum is None:
self.data[quant] = {'value': None}
continue
value = spectrum.value.tolist()
ivar = spectrum.ivar.tolist() if spectrum.ivar is not None else None
mask = spectrum.mask.tolist() if spectrum.mask is not None else None
self.results['data'][quant] = {'value': value,
'ivar': ivar,
'mask': mask}
self.results['data']['wavelength'] = cube._wavelength.tolist()
return Response(json.dumps(self.results), mimetype='application/json')
|
|
# -*- test-case-name: wokkel.test.test_pubsub -*-
#
# Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details.
"""
XMPP publish-subscribe protocol.
This protocol is specified in
U{XEP-0060<http://www.xmpp.org/extensions/xep-0060.html>}.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log
from twisted.words.protocols.jabber import jid, error
from twisted.words.xish import domish
from wokkel import disco, data_form, generic, shim
from wokkel.compat import IQ
from wokkel.subprotocols import IQHandlerMixin, XMPPHandler
from wokkel.iwokkel import IPubSubClient, IPubSubService, IPubSubResource
# Iq get and set XPath queries
IQ_GET = '/iq[@type="get"]'
IQ_SET = '/iq[@type="set"]'
# Publish-subscribe namespaces
NS_PUBSUB = 'http://jabber.org/protocol/pubsub'
NS_PUBSUB_EVENT = NS_PUBSUB + '#event'
NS_PUBSUB_ERRORS = NS_PUBSUB + '#errors'
NS_PUBSUB_OWNER = NS_PUBSUB + "#owner"
NS_PUBSUB_NODE_CONFIG = NS_PUBSUB + "#node_config"
NS_PUBSUB_META_DATA = NS_PUBSUB + "#meta-data"
NS_PUBSUB_SUBSCRIBE_OPTIONS = NS_PUBSUB + "#subscribe_options"
# XPath to match pubsub requests
PUBSUB_REQUEST = '/iq[@type="get" or @type="set"]/' + \
'pubsub[@xmlns="' + NS_PUBSUB + '" or ' + \
'@xmlns="' + NS_PUBSUB_OWNER + '"]'
class SubscriptionPending(Exception):
"""
Raised when the requested subscription is pending acceptance.
"""
class SubscriptionUnconfigured(Exception):
"""
Raised when the requested subscription needs to be configured before
becoming active.
"""
class PubSubError(error.StanzaError):
"""
Exception with publish-subscribe specific condition.
"""
def __init__(self, condition, pubsubCondition, feature=None, text=None):
appCondition = domish.Element((NS_PUBSUB_ERRORS, pubsubCondition))
if feature:
appCondition['feature'] = feature
error.StanzaError.__init__(self, condition,
text=text,
appCondition=appCondition)
class BadRequest(error.StanzaError):
"""
Bad request stanza error.
"""
def __init__(self, pubsubCondition=None, text=None):
if pubsubCondition:
appCondition = domish.Element((NS_PUBSUB_ERRORS, pubsubCondition))
else:
appCondition = None
error.StanzaError.__init__(self, 'bad-request',
text=text,
appCondition=appCondition)
class Unsupported(PubSubError):
def __init__(self, feature, text=None):
self.feature = feature
PubSubError.__init__(self, 'feature-not-implemented',
'unsupported',
feature,
text)
def __str__(self):
message = PubSubError.__str__(self)
message += ', feature %r' % self.feature
return message
class Subscription(object):
"""
A subscription to a node.
@ivar nodeIdentifier: The identifier of the node subscribed to.
The root node is denoted by C{None}.
@ivar subscriber: The subscribing entity.
@ivar state: The subscription state. One of C{'subscribed'}, C{'pending'},
C{'unconfigured'}.
@ivar options: Optional list of subscription options.
@type options: C{dict}.
"""
def __init__(self, nodeIdentifier, subscriber, state, options=None):
self.nodeIdentifier = nodeIdentifier
self.subscriber = subscriber
self.state = state
self.options = options or {}
class Item(domish.Element):
"""
Publish subscribe item.
This behaves like an object providing L{domish.IElement}.
Item payload can be added using C{addChild} or C{addRawXml}, or using the
C{payload} keyword argument to C{__init__}.
"""
def __init__(self, id=None, payload=None):
"""
@param id: optional item identifier
@type id: L{unicode}
@param payload: optional item payload. Either as a domish element, or
as serialized XML.
@type payload: object providing L{domish.IElement} or L{unicode}.
"""
domish.Element.__init__(self, (NS_PUBSUB, 'item'))
if id is not None:
self['id'] = id
if payload is not None:
if isinstance(payload, basestring):
self.addRawXml(payload)
else:
self.addChild(payload)
class PubSubRequest(generic.Stanza):
"""
A publish-subscribe request.
The set of instance variables used depends on the type of request. If
a variable is not applicable or not passed in the request, its value is
C{None}.
@ivar verb: The type of publish-subscribe request. See L{_requestVerbMap}.
@type verb: C{str}.
@ivar affiliations: Affiliations to be modified.
@type affiliations: C{set}
@ivar items: The items to be published, as L{domish.Element}s.
@type items: C{list}
@ivar itemIdentifiers: Identifiers of the items to be retrieved or
retracted.
@type itemIdentifiers: C{set}
@ivar maxItems: Maximum number of items to retrieve.
@type maxItems: C{int}.
@ivar nodeIdentifier: Identifier of the node the request is about.
@type nodeIdentifier: C{unicode}
@ivar nodeType: The type of node that should be created, or for which the
configuration is retrieved. C{'leaf'} or C{'collection'}.
@type nodeType: C{str}
@ivar options: Configurations options for nodes, subscriptions and publish
requests.
@type options: L{data_form.Form}
@ivar subscriber: The subscribing entity.
@type subscriber: L{JID}
@ivar subscriptionIdentifier: Identifier for a specific subscription.
@type subscriptionIdentifier: C{unicode}
@ivar subscriptions: Subscriptions to be modified, as a set of
L{Subscription}.
@type subscriptions: C{set}
"""
verb = None
affiliations = None
items = None
itemIdentifiers = None
maxItems = None
nodeIdentifier = None
nodeType = None
options = None
subscriber = None
subscriptionIdentifier = None
subscriptions = None
# Map request iq type and subelement name to request verb
_requestVerbMap = {
('set', NS_PUBSUB, 'publish'): 'publish',
('set', NS_PUBSUB, 'subscribe'): 'subscribe',
('set', NS_PUBSUB, 'unsubscribe'): 'unsubscribe',
('get', NS_PUBSUB, 'options'): 'optionsGet',
('set', NS_PUBSUB, 'options'): 'optionsSet',
('get', NS_PUBSUB, 'subscriptions'): 'subscriptions',
('get', NS_PUBSUB, 'affiliations'): 'affiliations',
('set', NS_PUBSUB, 'create'): 'create',
('get', NS_PUBSUB_OWNER, 'default'): 'default',
('get', NS_PUBSUB_OWNER, 'configure'): 'configureGet',
('set', NS_PUBSUB_OWNER, 'configure'): 'configureSet',
('get', NS_PUBSUB, 'items'): 'items',
('set', NS_PUBSUB, 'retract'): 'retract',
('set', NS_PUBSUB_OWNER, 'purge'): 'purge',
('set', NS_PUBSUB_OWNER, 'delete'): 'delete',
('get', NS_PUBSUB_OWNER, 'affiliations'): 'affiliationsGet',
('set', NS_PUBSUB_OWNER, 'affiliations'): 'affiliationsSet',
('get', NS_PUBSUB_OWNER, 'subscriptions'): 'subscriptionsGet',
('set', NS_PUBSUB_OWNER, 'subscriptions'): 'subscriptionsSet',
}
# Map request verb to request iq type and subelement name
_verbRequestMap = dict(((v, k) for k, v in _requestVerbMap.iteritems()))
# Map request verb to parameter handler names
_parameters = {
'publish': ['node', 'items'],
'subscribe': ['nodeOrEmpty', 'jid'],
'unsubscribe': ['nodeOrEmpty', 'jid'],
'optionsGet': ['nodeOrEmpty', 'jid'],
'optionsSet': ['nodeOrEmpty', 'jid', 'options'],
'subscriptions': [],
'affiliations': [],
'create': ['nodeOrNone'],
'default': ['default'],
'configureGet': ['nodeOrEmpty'],
'configureSet': ['nodeOrEmpty', 'configure'],
'items': ['node', 'maxItems', 'itemIdentifiers'],
'retract': ['node', 'itemIdentifiers'],
'purge': ['node'],
'delete': ['node'],
'affiliationsGet': ['nodeOrEmpty'],
'affiliationsSet': [],
'subscriptionsGet': ['nodeOrEmpty'],
'subscriptionsSet': [],
}
def __init__(self, verb=None):
self.verb = verb
@staticmethod
def _findForm(element, formNamespace):
"""
Find a Data Form.
Look for an element that represents a Data Form with the specified
form namespace as a child element of the given element.
"""
if not element:
return None
form = None
for child in element.elements():
try:
form = data_form.Form.fromElement(child)
except data_form.Error:
continue
if form.formNamespace != NS_PUBSUB_NODE_CONFIG:
continue
return form
def _parse_node(self, verbElement):
"""
Parse the required node identifier out of the verbElement.
"""
try:
self.nodeIdentifier = verbElement["node"]
except KeyError:
raise BadRequest('nodeid-required')
def _render_node(self, verbElement):
"""
Render the required node identifier on the verbElement.
"""
if not self.nodeIdentifier:
raise Exception("Node identifier is required")
verbElement['node'] = self.nodeIdentifier
def _parse_nodeOrEmpty(self, verbElement):
"""
Parse the node identifier out of the verbElement. May be empty.
"""
self.nodeIdentifier = verbElement.getAttribute("node", '')
def _render_nodeOrEmpty(self, verbElement):
"""
Render the node identifier on the verbElement. May be empty.
"""
if self.nodeIdentifier:
verbElement['node'] = self.nodeIdentifier
def _parse_nodeOrNone(self, verbElement):
"""
Parse the optional node identifier out of the verbElement.
"""
self.nodeIdentifier = verbElement.getAttribute("node")
def _render_nodeOrNone(self, verbElement):
"""
Render the optional node identifier on the verbElement.
"""
if self.nodeIdentifier:
verbElement['node'] = self.nodeIdentifier
def _parse_items(self, verbElement):
"""
Parse items out of the verbElement for publish requests.
"""
self.items = []
for element in verbElement.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
self.items.append(element)
def _render_items(self, verbElement):
"""
Render items into the verbElement for publish requests.
"""
if self.items:
for item in self.items:
verbElement.addChild(item)
def _parse_jid(self, verbElement):
"""
Parse subscriber out of the verbElement for un-/subscribe requests.
"""
try:
self.subscriber = jid.internJID(verbElement["jid"])
except KeyError:
raise BadRequest('jid-required')
def _render_jid(self, verbElement):
"""
Render subscriber into the verbElement for un-/subscribe requests.
"""
verbElement['jid'] = self.subscriber.full()
def _parse_default(self, verbElement):
"""
Parse node type out of a request for the default node configuration.
"""
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_NODE_CONFIG)
if form and form.formType == 'submit':
values = form.getValues()
self.nodeType = values.get('pubsub#node_type', 'leaf')
else:
self.nodeType = 'leaf'
def _parse_configure(self, verbElement):
"""
Parse options out of a request for setting the node configuration.
"""
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_NODE_CONFIG)
if form:
if form.formType == 'submit':
self.options = form.getValues()
elif form.formType == 'cancel':
self.options = {}
else:
raise BadRequest(text="Unexpected form type %r" % form.formType)
else:
raise BadRequest(text="Missing configuration form")
def _parse_itemIdentifiers(self, verbElement):
"""
Parse item identifiers out of items and retract requests.
"""
self.itemIdentifiers = []
for element in verbElement.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
try:
self.itemIdentifiers.append(element["id"])
except KeyError:
raise BadRequest()
def _render_itemIdentifiers(self, verbElement):
"""
Render item identifiers into items and retract requests.
"""
if self.itemIdentifiers:
for itemIdentifier in self.itemIdentifiers:
item = verbElement.addElement('item')
item['id'] = itemIdentifier
def _parse_maxItems(self, verbElement):
"""
Parse maximum items out of an items request.
"""
value = verbElement.getAttribute('max_items')
if value:
try:
self.maxItems = int(value)
except ValueError:
raise BadRequest(text="Field max_items requires a positive " +
"integer value")
def _render_maxItems(self, verbElement):
"""
Parse maximum items into an items request.
"""
if self.maxItems:
verbElement['max_items'] = unicode(self.maxItems)
def _parse_options(self, verbElement):
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_SUBSCRIBE_OPTIONS)
if form:
if form.formType == 'submit':
self.options = form.getValues()
elif form.formType == 'cancel':
self.options = {}
else:
raise BadRequest(text="Unexpected form type %r" % form.formType)
else:
raise BadRequest(text="Missing options form")
def parseElement(self, element):
"""
Parse the publish-subscribe verb and parameters out of a request.
"""
generic.Stanza.parseElement(self, element)
for child in element.pubsub.elements():
key = (self.stanzaType, child.uri, child.name)
try:
verb = self._requestVerbMap[key]
except KeyError:
continue
else:
self.verb = verb
break
if not self.verb:
raise NotImplementedError()
for parameter in self._parameters[verb]:
getattr(self, '_parse_%s' % parameter)(child)
def send(self, xs):
"""
Send this request to its recipient.
This renders all of the relevant parameters for this specific
requests into an L{IQ}, and invoke its C{send} method.
This returns a deferred that fires upon reception of a response. See
L{IQ} for details.
@param xs: The XML stream to send the request on.
@type xs: L{xmlstream.XmlStream}
@rtype: L{defer.Deferred}.
"""
try:
(self.stanzaType,
childURI,
childName) = self._verbRequestMap[self.verb]
except KeyError:
raise NotImplementedError()
iq = IQ(xs, self.stanzaType)
iq.addElement((childURI, 'pubsub'))
verbElement = iq.pubsub.addElement(childName)
if self.sender:
iq['from'] = self.sender.full()
if self.recipient:
iq['to'] = self.recipient.full()
for parameter in self._parameters[self.verb]:
getattr(self, '_render_%s' % parameter)(verbElement)
return iq.send()
class PubSubEvent(object):
"""
A publish subscribe event.
@param sender: The entity from which the notification was received.
@type sender: L{jid.JID}
@param recipient: The entity to which the notification was sent.
@type recipient: L{wokkel.pubsub.ItemsEvent}
@param nodeIdentifier: Identifier of the node the event pertains to.
@type nodeIdentifier: C{unicode}
@param headers: SHIM headers, see L{wokkel.shim.extractHeaders}.
@type headers: L{dict}
"""
def __init__(self, sender, recipient, nodeIdentifier, headers):
self.sender = sender
self.recipient = recipient
self.nodeIdentifier = nodeIdentifier
self.headers = headers
class ItemsEvent(PubSubEvent):
"""
A publish-subscribe event that signifies new, updated and retracted items.
@param items: List of received items as domish elements.
@type items: C{list} of L{domish.Element}
"""
def __init__(self, sender, recipient, nodeIdentifier, items, headers):
PubSubEvent.__init__(self, sender, recipient, nodeIdentifier, headers)
self.items = items
class DeleteEvent(PubSubEvent):
"""
A publish-subscribe event that signifies the deletion of a node.
"""
redirectURI = None
class PurgeEvent(PubSubEvent):
"""
A publish-subscribe event that signifies the purging of a node.
"""
class PubSubClient(XMPPHandler):
"""
Publish subscribe client protocol.
"""
implements(IPubSubClient)
def connectionInitialized(self):
self.xmlstream.addObserver('/message/event[@xmlns="%s"]' %
NS_PUBSUB_EVENT, self._onEvent)
def _onEvent(self, message):
try:
sender = jid.JID(message["from"])
recipient = jid.JID(message["to"])
except KeyError:
return
actionElement = None
for element in message.event.elements():
if element.uri == NS_PUBSUB_EVENT:
actionElement = element
if not actionElement:
return
eventHandler = getattr(self, "_onEvent_%s" % actionElement.name, None)
if eventHandler:
headers = shim.extractHeaders(message)
eventHandler(sender, recipient, actionElement, headers)
message.handled = True
def _onEvent_items(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
items = [element for element in action.elements()
if element.name in ('item', 'retract')]
event = ItemsEvent(sender, recipient, nodeIdentifier, items, headers)
self.itemsReceived(event)
def _onEvent_delete(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
event = DeleteEvent(sender, recipient, nodeIdentifier, headers)
if action.redirect:
event.redirectURI = action.redirect.getAttribute('uri')
self.deleteReceived(event)
def _onEvent_purge(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
event = PurgeEvent(sender, recipient, nodeIdentifier, headers)
self.purgeReceived(event)
def itemsReceived(self, event):
pass
def deleteReceived(self, event):
pass
def purgeReceived(self, event):
pass
def createNode(self, service, nodeIdentifier=None, sender=None):
"""
Create a publish subscribe node.
@param service: The publish subscribe service to create the node at.
@type service: L{JID}
@param nodeIdentifier: Optional suggestion for the id of the node.
@type nodeIdentifier: C{unicode}
"""
request = PubSubRequest('create')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
def cb(iq):
try:
new_node = iq.pubsub.create["node"]
except AttributeError:
# the suggested node identifier was accepted
new_node = nodeIdentifier
return new_node
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
def deleteNode(self, service, nodeIdentifier, sender=None):
"""
Delete a publish subscribe node.
@param service: The publish subscribe service to delete the node from.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
"""
request = PubSubRequest('delete')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
return request.send(self.xmlstream)
def subscribe(self, service, nodeIdentifier, subscriber, sender=None):
"""
Subscribe to a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param subscriber: The entity to subscribe to the node. This entity
will get notifications of new published items.
@type subscriber: L{JID}
"""
request = PubSubRequest('subscribe')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.subscriber = subscriber
request.sender = sender
def cb(iq):
subscription = iq.pubsub.subscription["subscription"]
if subscription == 'pending':
raise SubscriptionPending
elif subscription == 'unconfigured':
raise SubscriptionUnconfigured
else:
# we assume subscription == 'subscribed'
# any other value would be invalid, but that should have
# yielded a stanza error.
return None
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
def unsubscribe(self, service, nodeIdentifier, subscriber, sender=None):
"""
Unsubscribe from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param subscriber: The entity to unsubscribe from the node.
@type subscriber: L{JID}
"""
request = PubSubRequest('unsubscribe')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.subscriber = subscriber
request.sender = sender
return request.send(self.xmlstream)
def publish(self, service, nodeIdentifier, items=None, sender=None):
"""
Publish to a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param items: Optional list of L{Item}s to publish.
@type items: C{list}
"""
request = PubSubRequest('publish')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.items = items
request.sender = sender
return request.send(self.xmlstream)
def items(self, service, nodeIdentifier, maxItems=None, sender=None):
"""
Retrieve previously published items from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param maxItems: Optional limit on the number of retrieved items.
@type maxItems: C{int}
"""
request = PubSubRequest('items')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
if maxItems:
request.maxItems = str(int(maxItems))
request.sender = sender
def cb(iq):
items = []
for element in iq.pubsub.items.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
items.append(element)
return items
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
class PubSubService(XMPPHandler, IQHandlerMixin):
"""
Protocol implementation for a XMPP Publish Subscribe Service.
The word Service here is used as taken from the Publish Subscribe
specification. It is the party responsible for keeping nodes and their
subscriptions, and sending out notifications.
Methods from the L{IPubSubService} interface that are called as
a result of an XMPP request may raise exceptions. Alternatively the
deferred returned by these methods may have their errback called. These are
handled as follows:
- If the exception is an instance of L{error.StanzaError}, an error
response iq is returned.
- Any other exception is reported using L{log.msg}. An error response
with the condition C{internal-server-error} is returned.
The default implementation of said methods raises an L{Unsupported}
exception and are meant to be overridden.
@ivar discoIdentity: Service discovery identity as a dictionary with
keys C{'category'}, C{'type'} and C{'name'}.
@ivar pubSubFeatures: List of supported publish-subscribe features for
service discovery, as C{str}.
@type pubSubFeatures: C{list} or C{None}
"""
implements(IPubSubService)
iqHandlers = {
'/*': '_onPubSubRequest',
}
_legacyHandlers = {
'publish': ('publish', ['sender', 'recipient',
'nodeIdentifier', 'items']),
'subscribe': ('subscribe', ['sender', 'recipient',
'nodeIdentifier', 'subscriber']),
'unsubscribe': ('unsubscribe', ['sender', 'recipient',
'nodeIdentifier', 'subscriber']),
'subscriptions': ('subscriptions', ['sender', 'recipient']),
'affiliations': ('affiliations', ['sender', 'recipient']),
'create': ('create', ['sender', 'recipient', 'nodeIdentifier']),
'getConfigurationOptions': ('getConfigurationOptions', []),
'default': ('getDefaultConfiguration',
['sender', 'recipient', 'nodeType']),
'configureGet': ('getConfiguration', ['sender', 'recipient',
'nodeIdentifier']),
'configureSet': ('setConfiguration', ['sender', 'recipient',
'nodeIdentifier', 'options']),
'items': ('items', ['sender', 'recipient', 'nodeIdentifier',
'maxItems', 'itemIdentifiers']),
'retract': ('retract', ['sender', 'recipient', 'nodeIdentifier',
'itemIdentifiers']),
'purge': ('purge', ['sender', 'recipient', 'nodeIdentifier']),
'delete': ('delete', ['sender', 'recipient', 'nodeIdentifier']),
}
hideNodes = False
def __init__(self, resource=None):
self.resource = resource
self.discoIdentity = {'category': 'pubsub',
'type': 'generic',
'name': 'Generic Publish-Subscribe Service'}
self.pubSubFeatures = []
def connectionMade(self):
self.xmlstream.addObserver(PUBSUB_REQUEST, self.handleRequest)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
def toInfo(nodeInfo, info):
if not nodeInfo:
return info
(nodeType, metaData) = nodeInfo['type'], nodeInfo['meta-data']
info.append(disco.DiscoIdentity('pubsub', nodeType))
if metaData:
form = data_form.Form(formType="result",
formNamespace=NS_PUBSUB_META_DATA)
form.addField(
data_form.Field(
var='pubsub#node_type',
value=nodeType,
label='The type of node (collection or leaf)'
)
)
for metaDatum in metaData:
form.addField(data_form.Field.fromDict(metaDatum))
info.append(form)
return info
info = []
request = PubSubRequest('discoInfo')
if self.resource is not None:
resource = self.resource.locateResource(request)
identity = resource.discoIdentity
features = resource.features
getInfo = resource.getInfo
else:
category, idType, name = self.discoIdentity
identity = disco.DiscoIdentity(category, idType, name)
features = self.pubSubFeatures
getInfo = self.getNodeInfo
if not nodeIdentifier:
info.append(identity)
info.append(disco.DiscoFeature(disco.NS_DISCO_ITEMS))
info.extend([disco.DiscoFeature("%s#%s" % (NS_PUBSUB, feature))
for feature in features])
d = getInfo(requestor, target, nodeIdentifier or '')
d.addCallback(toInfo, info)
d.addErrback(log.err)
return d
def getDiscoItems(self, requestor, target, nodeIdentifier):
if self.hideNodes:
d = defer.succeed([])
elif self.resource is not None:
request = PubSubRequest('discoInfo')
resource = self.resource.locateResource(request)
d = resource.getNodes(requestor, target, nodeIdentifier)
elif nodeIdentifier:
d = self.getNodes(requestor, target)
else:
d = defer.succeed([])
d.addCallback(lambda nodes: [disco.DiscoItem(target, node)
for node in nodes])
return d
def _onPubSubRequest(self, iq):
request = PubSubRequest.fromElement(iq)
if self.resource is not None:
resource = self.resource.locateResource(request)
else:
resource = self
# Preprocess the request, knowing the handling resource
try:
preProcessor = getattr(self, '_preProcess_%s' % request.verb)
except AttributeError:
pass
else:
request = preProcessor(resource, request)
if request is None:
return defer.succeed(None)
# Process the request itself,
if resource is not self:
try:
handler = getattr(resource, request.verb)
except AttributeError:
# fix lookup feature
text = "Request verb: %s" % request.verb
return defer.fail(Unsupported('', text))
d = handler(request)
else:
handlerName, argNames = self._legacyHandlers[request.verb]
handler = getattr(self, handlerName)
args = [getattr(request, arg) for arg in argNames]
d = handler(*args)
# If needed, translate the result into a response
try:
cb = getattr(self, '_toResponse_%s' % request.verb)
except AttributeError:
pass
else:
d.addCallback(cb, resource, request)
return d
def _toResponse_subscribe(self, result, resource, request):
response = domish.Element((NS_PUBSUB, "pubsub"))
subscription = response.addElement("subscription")
if result.nodeIdentifier:
subscription["node"] = result.nodeIdentifier
subscription["jid"] = result.subscriber.full()
subscription["subscription"] = result.state
return response
def _toResponse_subscriptions(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
subscriptions = response.addElement('subscriptions')
for subscription in result:
item = subscriptions.addElement('subscription')
item['node'] = subscription.nodeIdentifier
item['jid'] = subscription.subscriber.full()
item['subscription'] = subscription.state
return response
def _toResponse_affiliations(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
affiliations = response.addElement('affiliations')
for nodeIdentifier, affiliation in result:
item = affiliations.addElement('affiliation')
item['node'] = nodeIdentifier
item['affiliation'] = affiliation
return response
def _toResponse_create(self, result, resource, request):
if not request.nodeIdentifier or request.nodeIdentifier != result:
response = domish.Element((NS_PUBSUB, 'pubsub'))
create = response.addElement('create')
create['node'] = result
return response
else:
return None
def _makeFields(self, options, values):
fields = []
for name, value in values.iteritems():
if name not in options:
continue
option = {'var': name}
option.update(options[name])
if isinstance(value, list):
option['values'] = value
else:
option['value'] = value
fields.append(data_form.Field.fromDict(option))
return fields
def _formFromConfiguration(self, resource, values):
options = resource.getConfigurationOptions()
fields = self._makeFields(options, values)
form = data_form.Form(formType="form",
formNamespace=NS_PUBSUB_NODE_CONFIG,
fields=fields)
return form
def _checkConfiguration(self, resource, values):
options = resource.getConfigurationOptions()
processedValues = {}
for key, value in values.iteritems():
if key not in options:
continue
option = {'var': key}
option.update(options[key])
field = data_form.Field.fromDict(option)
if isinstance(value, list):
field.values = value
else:
field.value = value
field.typeCheck()
if isinstance(value, list):
processedValues[key] = field.values
else:
processedValues[key] = field.value
return processedValues
def _preProcess_default(self, resource, request):
if request.nodeType not in ('leaf', 'collection'):
raise error.StanzaError('not-acceptable')
else:
return request
def _toResponse_default(self, options, resource, request):
response = domish.Element((NS_PUBSUB_OWNER, "pubsub"))
default = response.addElement("default")
form = self._formFromConfiguration(resource, options)
default.addChild(form.toElement())
return response
def _toResponse_configureGet(self, options, resource, request):
response = domish.Element((NS_PUBSUB_OWNER, "pubsub"))
configure = response.addElement("configure")
form = self._formFromConfiguration(resource, options)
configure.addChild(form.toElement())
if request.nodeIdentifier:
configure["node"] = request.nodeIdentifier
return response
def _preProcess_configureSet(self, resource, request):
if request.options:
request.options = self._checkConfiguration(resource,
request.options)
return request
else:
return None
def _toResponse_items(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
items = response.addElement('items')
items["node"] = request.nodeIdentifier
for item in result:
items.addChild(item)
return response
def _createNotification(self, eventType, service, nodeIdentifier,
subscriber, subscriptions=None):
headers = []
if subscriptions:
for subscription in subscriptions:
if nodeIdentifier != subscription.nodeIdentifier:
headers.append(('Collection', subscription.nodeIdentifier))
message = domish.Element((None, "message"))
message["from"] = service.full()
message["to"] = subscriber.full()
event = message.addElement((NS_PUBSUB_EVENT, "event"))
element = event.addElement(eventType)
element["node"] = nodeIdentifier
if headers:
message.addChild(shim.Headers(headers))
return message
# public methods
def notifyPublish(self, service, nodeIdentifier, notifications):
for subscriber, subscriptions, items in notifications:
message = self._createNotification('items', service,
nodeIdentifier, subscriber,
subscriptions)
message.event.items.children = items
self.send(message)
def notifyDelete(self, service, nodeIdentifier, subscribers,
redirectURI=None):
for subscriber in subscribers:
message = self._createNotification('delete', service,
nodeIdentifier,
subscriber)
if redirectURI:
redirect = message.event.delete.addElement('redirect')
redirect['uri'] = redirectURI
self.send(message)
def getNodeInfo(self, requestor, service, nodeIdentifier):
return None
def getNodes(self, requestor, service):
return []
def publish(self, requestor, service, nodeIdentifier, items):
raise Unsupported('publish')
def subscribe(self, requestor, service, nodeIdentifier, subscriber):
raise Unsupported('subscribe')
def unsubscribe(self, requestor, service, nodeIdentifier, subscriber):
raise Unsupported('subscribe')
def subscriptions(self, requestor, service):
raise Unsupported('retrieve-subscriptions')
def affiliations(self, requestor, service):
raise Unsupported('retrieve-affiliations')
def create(self, requestor, service, nodeIdentifier):
raise Unsupported('create-nodes')
def getConfigurationOptions(self):
return {}
def getDefaultConfiguration(self, requestor, service, nodeType):
raise Unsupported('retrieve-default')
def getConfiguration(self, requestor, service, nodeIdentifier):
raise Unsupported('config-node')
def setConfiguration(self, requestor, service, nodeIdentifier, options):
raise Unsupported('config-node')
def items(self, requestor, service, nodeIdentifier, maxItems,
itemIdentifiers):
raise Unsupported('retrieve-items')
def retract(self, requestor, service, nodeIdentifier, itemIdentifiers):
raise Unsupported('retract-items')
def purge(self, requestor, service, nodeIdentifier):
raise Unsupported('purge-nodes')
def delete(self, requestor, service, nodeIdentifier):
raise Unsupported('delete-nodes')
class PubSubResource(object):
implements(IPubSubResource)
features = []
discoIdentity = disco.DiscoIdentity('pubsub',
'service',
'Publish-Subscribe Service')
def locateResource(self, request):
return self
def getInfo(self, requestor, service, nodeIdentifier):
return defer.succeed(None)
def getNodes(self, requestor, service, nodeIdentifier):
return defer.succeed([])
def getConfigurationOptions(self):
return {}
def publish(self, request):
return defer.fail(Unsupported('publish'))
def subscribe(self, request):
return defer.fail(Unsupported('subscribe'))
def unsubscribe(self, request):
return defer.fail(Unsupported('subscribe'))
def subscriptions(self, request):
return defer.fail(Unsupported('retrieve-subscriptions'))
def affiliations(self, request):
return defer.fail(Unsupported('retrieve-affiliations'))
def create(self, request):
return defer.fail(Unsupported('create-nodes'))
def default(self, request):
return defer.fail(Unsupported('retrieve-default'))
def configureGet(self, request):
return defer.fail(Unsupported('config-node'))
def configureSet(self, request):
return defer.fail(Unsupported('config-node'))
def items(self, request):
return defer.fail(Unsupported('retrieve-items'))
def retract(self, request):
return defer.fail(Unsupported('retract-items'))
def purge(self, request):
return defer.fail(Unsupported('purge-nodes'))
def delete(self, request):
return defer.fail(Unsupported('delete-nodes'))
def affiliationsGet(self, request):
return defer.fail(Unsupported('modify-affiliations'))
def affiliationsSet(self, request):
return defer.fail(Unsupported('modify-affiliations'))
def subscriptionsGet(self, request):
return defer.fail(Unsupported('manage-subscriptions'))
def subscriptionsSet(self, request):
return defer.fail(Unsupported('manage-subscriptions'))
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import qt.QtCore as qc
import qt.QtGui as qg
import numpy
from popupcad.graphics2d.graphicsitems import Common
import popupcad
import shapely.geometry as sg
import qt.qt_hacks as qh
class GenericText(object):
editable = ['*']
deletable = ['*']
hidden = ['id']
def __init__(self, text, pos, font='Arial', fontsize=1):
self.text = text
self.pos = pos
self.font = font
self.fontsize = fontsize
self.exteriors = []
self.id = id(self)
def copy(self, identical=True):
new = type(self)(self.text,self.pos.copy(identical),self.font,self.fontsize)
if identical:
new.id = self.id
return new
def upgrade(self, *args, **kwargs):
if self.font == 'Courier':
self.font='Courier New'
return self
def isValid(self):
return True
def is_construction(self):
return False
def to_generic_polygons(self,add_shift = True):
import idealab_tools.text_to_polygons
from matplotlib.font_manager import FontProperties
from popupcad.filetypes.genericshapes import GenericPoly
text = self.text
# small font scalings actually produce different paths. use 10pt font as invariant size
internal_font = 10
fp = {'family':self.font,'size':internal_font}
if text !='':
polygons = idealab_tools.text_to_polygons.text_to_polygons(self.text,fp,popupcad.text_approximation)
generic_polygons = []
for item in polygons:
item = numpy.array(item)
if popupcad.flip_y:
item[:,1]=-1*item[:,1]+internal_font
item*=(4/3)
item = item.tolist()
generic_polygons.append(GenericPoly.gen_from_point_lists(item,[]))
#
else:
generic_polygons = []
T = numpy.eye(3)
T[1,1]=-1
generic_polygons = [item.transform(T) for item in generic_polygons]
[item.scale(self.fontsize/internal_font) for item in generic_polygons]
if add_shift:
for item in generic_polygons:
item.shift(self.pos.getpos())
return generic_polygons
def generic_polys_to_shapely(self,generic_polygons,scaling):
shapelies = [item.to_shapely(scaling = scaling) for item in generic_polygons]
if len(shapelies) > 1:
obj1 = shapelies.pop(0)
while shapelies:
obj1 = obj1.symmetric_difference(shapelies.pop(0))
elif len(shapelies) ==1 :
obj1 = shapelies[0]
else:
obj1 = sg.Polygon()
return obj1
def to_shapely(self,add_shift = True,scaling = None):
generic_polygons = self.to_generic_polygons(add_shift)
shapely = self.generic_polys_to_shapely(generic_polygons,scaling)
return shapely
def to_generics(self,add_shift = True,scaling = 1):
shapely = self.to_shapely(add_shift,scaling=scaling)
shapelies = popupcad.algorithms.csg_shapely.condition_shapely_entities(shapely)
generics = [popupcad.algorithms.csg_shapely.to_generic(item) for item in shapelies]
return generics
def painterpath(self,add_shift = True):
generics = self.to_generics(add_shift,scaling = popupcad.csg_processing_scaling)
p2 = qg.QPainterPath()
[p2.addPath(item.painterpath()) for item in generics]
return p2
def outputinteractive(self):
tp = TextParent(self)
return tp
def properties(self):
from idealab_tools.propertyeditor import PropertyEditor
return PropertyEditor(self)
def output_dxf(self,model_space,layer = None):
generics = self.to_generics(scaling = popupcad.csg_processing_scaling)
[item.output_dxf(model_space,layer) for item in generics]
def vertices(self):
return []
class TextParent(qg.QGraphicsPathItem, Common):
isDeletable = True
def __init__(self, generic, *args, **kwargs):
super(TextParent, self).__init__(*args, **kwargs)
self.generic = generic
self.editchild = TextItem(generic, self)
self.setFlag(qg.QGraphicsItem.ItemIsMovable, True)
self.setFlag(qg.QGraphicsItem.ItemIsSelectable, True)
self.setFlag(qg.QGraphicsItem.ItemIsFocusable, True)
self.pen = qg.QPen(qg.QColor.fromRgbF(0,0,0,1),1,qc.Qt.SolidLine,qc.Qt.RoundCap,qc.Qt.RoundJoin)
self.pen.setCosmetic(True)
self.brush = qg.QBrush(qg.QColor.fromRgbF(0, 0, 0, .25), qc.Qt.SolidPattern)
self.setPen(self.pen)
self.setBrush(self.brush)
self.setPos(*self.generic.pos.getpos(scaling = popupcad.view_scaling))
self.setFlag(self.ItemSendsGeometryChanges, True)
self.changed_trigger = False
# def focusInEvent(self,*args,**kwargs):
# self.editmode()
def itemChange(self, change, value):
if change == self.ItemPositionHasChanged:
if self.changed_trigger:
self.changed_trigger = False
self.scene().savesnapshot.emit()
self.generic.pos.setpos(qh.to_tuple(self.pos()))
return super(TextParent, self).itemChange(change, value)
def editmode(self):
self.setPath(qg.QPainterPath())
self.editchild.updatefont()
self.editchild.setParentItem(self)
self.editchild.resetTransform()
if popupcad.flip_y:
self.editchild.scale(1, -1)
self.editchild.setTextInteractionFlags(qc.Qt.TextEditorInteraction)
self.editchild.setFocus()
def finish_edit(self):
self.editchild.setTextInteractionFlags(qc.Qt.NoTextInteraction)
self.generic.text = self.editchild.toPlainText()
self.editchild.removefromscene()
if self.generic.text == '':
self.harddelete()
self.refreshview()
# self.scene().savesnapshot.emit()
def refreshview(self):
path = self.generic.painterpath(add_shift = False)
self.setPath(path)
# path, dummy = self.generic.genpath(popupcad.view_scaling)
# self.setPath(path)
def mouseDoubleClickEvent(self, event):
self.editmode()
def mousePressEvent(self, event):
self.changed_trigger = True
self.scene().itemclicked.emit(self.generic)
super(TextParent, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if self.changed_trigger:
self.changed_trigger = False
super(TextParent, self).mouseReleaseEvent(event)
def copy(self):
genericcopy = self.generic.copy(identical=False)
return genericcopy.outputinteractive()
def output_dxf(self,model_space,layer = None):
pass
class TextItem(qg.QGraphicsTextItem, Common):
def __init__(self, generic, parent, *args, **kwargs):
self.generic = generic
super(TextItem, self).__init__(*args, **kwargs)
self.setTextInteractionFlags(qc.Qt.TextEditorInteraction)
self.parent = parent
self.setPlainText(self.generic.text)
self.updatefont()
def focusOutEvent(self, event):
self.parent.finish_edit()
def updatefont(self):
font = qg.QFont(self.generic.font, pointSize=self.generic.fontsize * popupcad.view_scaling)
font.setStyleStrategy(font.ForceOutline)
self.setFont(font)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under th
import os
import gzip
import numpy as np
import codecs
from singa import device
from singa import tensor
from singa import opt
from singa import autograd
from singa import sonnx
import onnx
from utils import check_exist_or_download
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
def load_dataset():
train_x_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
train_y_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'
valid_x_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'
valid_y_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
train_x = read_image_file(check_exist_or_download(train_x_url)).astype(
np.float32)
train_y = read_label_file(check_exist_or_download(train_y_url)).astype(
np.float32)
valid_x = read_image_file(check_exist_or_download(valid_x_url)).astype(
np.float32)
valid_y = read_label_file(check_exist_or_download(valid_y_url)).astype(
np.float32)
return train_x, train_y, valid_x, valid_y
def read_label_file(path):
with gzip.open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2049
length = get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8).reshape((length))
return parsed
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def read_image_file(path):
with gzip.open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
parsed = np.frombuffer(data, dtype=np.uint8, offset=16).reshape(
(length, 1, num_rows, num_cols))
return parsed
def to_categorical(y, num_classes):
y = np.array(y, dtype="int")
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
categorical = categorical.astype(np.float32)
return categorical
class CNN:
def __init__(self):
self.conv1 = autograd.Conv2d(1, 20, 5, padding=0)
self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
self.linear1 = autograd.Linear(4 * 4 * 50, 500, bias=False)
self.linear2 = autograd.Linear(500, 10, bias=False)
self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
def forward(self, x):
y = self.conv1(x)
y = autograd.relu(y)
y = self.pooling1(y)
y = self.conv2(y)
y = autograd.relu(y)
y = self.pooling2(y)
y = autograd.flatten(y)
y = self.linear1(y)
y = autograd.relu(y)
y = self.linear2(y)
return y
def accuracy(pred, target):
y = np.argmax(pred, axis=1)
t = np.argmax(target, axis=1)
a = y == t
return np.array(a, "int").sum() / float(len(t))
def train(model,
x,
y,
epochs=1,
batch_size=64,
dev=device.get_default_device()):
batch_number = x.shape[0] // batch_size
for i in range(epochs):
for b in range(batch_number):
l_idx = b * batch_size
r_idx = (b + 1) * batch_size
x_batch = tensor.Tensor(device=dev, data=x[l_idx:r_idx])
target_batch = tensor.Tensor(device=dev, data=y[l_idx:r_idx])
output_batch = model.forward(x_batch)
loss = autograd.softmax_cross_entropy(output_batch, target_batch)
accuracy_rate = accuracy(tensor.to_numpy(output_batch),
tensor.to_numpy(target_batch))
sgd = opt.SGD(lr=0.001)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
if b % 1e2 == 0:
logging.info("acc %6.2f loss, %6.2f" %
(accuracy_rate, tensor.to_numpy(loss)[0]))
logging.info("training completed")
return x_batch, output_batch
def make_onnx(x, y):
return sonnx.to_onnx([x], [y])
class Infer:
def __init__(self, sg_ir):
self.sg_ir = sg_ir
for idx, tens in sg_ir.tensor_map.items():
# allow the tensors to be updated
tens.requires_grad = True
tens.stores_grad = True
def forward(self, x):
return sg_ir.run([x])[0]
def re_train(sg_ir,
x,
y,
epochs=1,
batch_size=64,
dev=device.get_default_device()):
batch_number = x.shape[0] // batch_size
new_model = Infer(sg_ir)
for i in range(epochs):
for b in range(batch_number):
l_idx = b * batch_size
r_idx = (b + 1) * batch_size
x_batch = tensor.Tensor(device=dev, data=x[l_idx:r_idx])
target_batch = tensor.Tensor(device=dev, data=y[l_idx:r_idx])
output_batch = new_model.forward(x_batch)
loss = autograd.softmax_cross_entropy(output_batch, target_batch)
accuracy_rate = accuracy(tensor.to_numpy(output_batch),
tensor.to_numpy(target_batch))
sgd = opt.SGD(lr=0.01)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
if b % 1e2 == 0:
logging.info("acc %6.2f loss, %6.2f" %
(accuracy_rate, tensor.to_numpy(loss)[0]))
logging.info("re-training completed")
return new_model
class Trans:
def __init__(self, sg_ir, last_layers):
self.sg_ir = sg_ir
self.last_layers = last_layers
self.append_linear1 = autograd.Linear(500, 128, bias=False)
self.append_linear2 = autograd.Linear(128, 32, bias=False)
self.append_linear3 = autograd.Linear(32, 10, bias=False)
def forward(self, x):
y = sg_ir.run([x], last_layers=self.last_layers)[0]
y = self.append_linear1(y)
y = autograd.relu(y)
y = self.append_linear2(y)
y = autograd.relu(y)
y = self.append_linear3(y)
y = autograd.relu(y)
return y
def transfer_learning(sg_ir,
x,
y,
epochs=1,
batch_size=64,
dev=device.get_default_device()):
batch_number = x.shape[0] // batch_size
trans_model = Trans(sg_ir, -1)
for i in range(epochs):
for b in range(batch_number):
l_idx = b * batch_size
r_idx = (b + 1) * batch_size
x_batch = tensor.Tensor(device=dev, data=x[l_idx:r_idx])
target_batch = tensor.Tensor(device=dev, data=y[l_idx:r_idx])
output_batch = trans_model.forward(x_batch)
loss = autograd.softmax_cross_entropy(output_batch, target_batch)
accuracy_rate = accuracy(tensor.to_numpy(output_batch),
tensor.to_numpy(target_batch))
sgd = opt.SGD(lr=0.07)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
if b % 1e2 == 0:
logging.info("acc %6.2f loss, %6.2f" %
(accuracy_rate, tensor.to_numpy(loss)[0]))
logging.info("transfer-learning completed")
return trans_model
def test(model, x, y, batch_size=64, dev=device.get_default_device()):
batch_number = x.shape[0] // batch_size
result = 0
for b in range(batch_number):
l_idx = b * batch_size
r_idx = (b + 1) * batch_size
x_batch = tensor.Tensor(device=dev, data=x[l_idx:r_idx])
target_batch = tensor.Tensor(device=dev, data=y[l_idx:r_idx])
output_batch = model.forward(x_batch)
result += accuracy(tensor.to_numpy(output_batch),
tensor.to_numpy(target_batch))
logging.info("testing acc %6.2f" % (result / batch_number))
if __name__ == "__main__":
# create device
dev = device.create_cuda_gpu()
#dev = device.get_default_device()
# create model
model = CNN()
# load data
train_x, train_y, valid_x, valid_y = load_dataset()
# normalization
train_x = train_x / 255
valid_x = valid_x / 255
train_y = to_categorical(train_y, 10)
valid_y = to_categorical(valid_y, 10)
# do training
autograd.training = True
x, y = train(model, train_x, train_y, dev=dev)
onnx_model = make_onnx(x, y)
# logging.info('The model is:\n{}'.format(onnx_model))
# Save the ONNX model
model_path = os.path.join('/', 'tmp', 'mnist.onnx')
onnx.save(onnx_model, model_path)
logging.info('The model is saved.')
# load the ONNX model
onnx_model = onnx.load(model_path)
sg_ir = sonnx.prepare(onnx_model, device=dev)
# inference
autograd.training = False
logging.info('The inference result is:')
test(Infer(sg_ir), valid_x, valid_y, dev=dev)
# re-training
autograd.training = True
new_model = re_train(sg_ir, train_x, train_y, dev=dev)
autograd.training = False
test(new_model, valid_x, valid_y, dev=dev)
# transfer-learning
autograd.training = True
new_model = transfer_learning(sg_ir, train_x, train_y, dev=dev)
autograd.training = False
test(new_model, valid_x, valid_y, dev=dev)
|
|
""" miscellaneous sorting / groupby utilities """
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import numpy as np
from pandas._libs import algos, hashtable, lib
from pandas._libs.hashtable import unique_label_indices
from pandas._typing import IndexKeyFunc
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_extension_array_dtype,
)
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import MultiIndex
from pandas.core.indexes.base import Index
_INT64_MAX = np.iinfo(np.int64).max
def get_indexer_indexer(
target: "Index",
level: Union[str, int, List[str], List[int]],
ascending: bool,
kind: str,
na_position: str,
sort_remaining: bool,
key: IndexKeyFunc,
) -> Optional[np.array]:
"""
Helper method that return the indexer according to input parameters for
the sort_index method of DataFrame and Series.
Parameters
----------
target : Index
level : int or level name or list of ints or list of level names
ascending : bool or list of bools, default True
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
na_position : {'first', 'last'}, default 'last'
sort_remaining : bool, default True
key : callable, optional
Returns
-------
Optional[ndarray]
The indexer for the new index.
"""
target = ensure_key_mapped(target, key, levels=level)
target = target._sort_levels_monotonic()
if level is not None:
_, indexer = target.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(target, ABCMultiIndex):
indexer = lexsort_indexer(
target._get_codes_for_sorting(), orders=ascending, na_position=na_position
)
else:
# Check monotonic-ness before sort an index (GH 11080)
if (ascending and target.is_monotonic_increasing) or (
not ascending and target.is_monotonic_decreasing
):
return None
indexer = nargsort(
target, kind=kind, ascending=ascending, na_position=na_position
)
return indexer
def get_group_index(labels, shape, sort: bool, xnull: bool):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels : sequence of arrays
Integers identifying levels at each location
shape : sequence of ints
Number of unique levels at each location
sort : bool
If the ranks of returned ids should match lexical ranks of labels
xnull : bool
If true nulls are excluded. i.e. -1 values in the labels are
passed through.
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
Notes
-----
The length of `labels` and `shape` must be identical.
"""
def _int64_cut_off(shape) -> int:
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype="i8")
out = stride * labels[0].astype("i8", subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape) -> bool:
the_prod = 1
for x in shape:
the_prod *= int(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError("cannot deconstruct factorized group indices!")
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool):
"""
Reconstruct labels from observed group ids.
Parameters
----------
xnull : bool
If nulls are excluded; i.e. -1 labels are passed through.
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype="i8")
shape = np.asarray(shape, dtype="i8") + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype("i8", subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress: bool = True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(
keys, orders=None, na_position: str = "last", key: Optional[Callable] = None
):
"""
Performs lexical sorting on a set of keys
Parameters
----------
keys : sequence of arrays
Sequence of ndarrays to be sorted by the indexer
orders : boolean or list of booleans, optional
Determines the sorting order for each element in keys. If a list,
it must be the same length as keys. This determines whether the
corresponding element in keys should be sorted in ascending
(True) or descending (False) order. if bool, applied to all
elements as above. if None, defaults to True.
na_position : {'first', 'last'}, default 'last'
Determines placement of NA elements in the sorted list ("last" or "first")
key : Callable, optional
Callable key function applied to every element in keys before sorting
.. versionadded:: 1.0.0
"""
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
keys = [ensure_key_mapped(k, key) for k in keys]
for k, order in zip(keys, orders):
cat = Categorical(k, ordered=True)
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {na_position}")
n = len(cat.categories)
codes = cat.codes.copy()
mask = cat.codes == -1
if order: # ascending
if na_position == "last":
codes = np.where(mask, n, codes)
elif na_position == "first":
codes += 1
else: # not order means descending
if na_position == "last":
codes = np.where(mask, n, n - codes - 1)
elif na_position == "first":
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(
items,
kind: str = "quicksort",
ascending: bool = True,
na_position: str = "last",
key: Optional[Callable] = None,
mask: Optional[np.ndarray] = None,
):
"""
Intended to be a drop-in replacement for np.argsort which handles NaNs.
Adds ascending, na_position, and key parameters.
(GH #6399, #5231, #27237)
Parameters
----------
kind : str, default 'quicksort'
ascending : bool, default True
na_position : {'first', 'last'}, default 'last'
key : Optional[Callable], default None
mask : Optional[np.ndarray], default None
Passed when called by ExtensionArray.argsort.
"""
if key is not None:
items = ensure_key_mapped(items, key)
return nargsort(
items,
kind=kind,
ascending=ascending,
na_position=na_position,
key=None,
mask=mask,
)
items = extract_array(items)
if mask is None:
mask = np.asarray(isna(items))
if is_extension_array_dtype(items):
return items.argsort(ascending=ascending, kind=kind, na_position=na_position)
else:
items = np.asanyarray(items)
idx = np.arange(len(items))
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == "last":
indexer = np.concatenate([indexer, nan_idx])
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError(f"invalid na_position: {na_position}")
return indexer
def nargminmax(values, method: str):
"""
Implementation of np.argmin/argmax but for ExtensionArray and which
handles missing values.
Parameters
----------
values : ExtensionArray
method : {"argmax", "argmin"}
Returns
-------
int
"""
assert method in {"argmax", "argmin"}
func = np.argmax if method == "argmax" else np.argmin
mask = np.asarray(isna(values))
values = values._values_for_argsort()
idx = np.arange(len(values))
non_nans = values[~mask]
non_nan_idx = idx[~mask]
return non_nan_idx[func(non_nans)]
def _ensure_key_mapped_multiindex(
index: "MultiIndex", key: Callable, level=None
) -> "MultiIndex":
"""
Returns a new MultiIndex in which key has been applied
to all levels specified in level (or all levels if level
is None). Used for key sorting for MultiIndex.
Parameters
----------
index : MultiIndex
Index to which to apply the key function on the
specified levels.
key : Callable
Function that takes an Index and returns an Index of
the same shape. This key is applied to each level
separately. The name of the level can be used to
distinguish different levels for application.
level : list-like, int or str, default None
Level or list of levels to apply the key function to.
If None, key function is applied to all levels. Other
levels are left unchanged.
Returns
-------
labels : MultiIndex
Resulting MultiIndex with modified levels.
"""
if level is not None:
if isinstance(level, (str, int)):
sort_levels = [level]
else:
sort_levels = level
sort_levels = [index._get_level_number(lev) for lev in sort_levels]
else:
sort_levels = list(range(index.nlevels)) # satisfies mypy
mapped = [
ensure_key_mapped(index._get_level_values(level), key)
if level in sort_levels
else index._get_level_values(level)
for level in range(index.nlevels)
]
labels = type(index).from_arrays(mapped)
return labels
def ensure_key_mapped(values, key: Optional[Callable], levels=None):
"""
Applies a callable key function to the values function and checks
that the resulting value has the same shape. Can be called on Index
subclasses, Series, DataFrames, or ndarrays.
Parameters
----------
values : Series, DataFrame, Index subclass, or ndarray
key : Optional[Callable], key to be called on the values array
levels : Optional[List], if values is a MultiIndex, list of levels to
apply the key to.
"""
from pandas.core.indexes.api import Index
if not key:
return values
if isinstance(values, ABCMultiIndex):
return _ensure_key_mapped_multiindex(values, key, level=levels)
result = key(values.copy())
if len(result) != len(values):
raise ValueError(
"User-provided `key` function must not change the shape of the array."
)
try:
if isinstance(
values, Index
): # convert to a new Index subclass, not necessarily the same
result = Index(result)
else:
type_of_values = type(values)
result = type_of_values(result) # try to revert to original type otherwise
except TypeError:
raise TypeError(
f"User-provided `key` function returned an invalid type {type(result)} \
which could not be converted to {type(values)}."
)
return result
def get_flattened_list(
comp_ids: np.ndarray,
ngroups: int,
levels: Iterable["Index"],
labels: Iterable[np.ndarray],
) -> List[Tuple]:
"""Map compressed group id -> key tuple."""
comp_ids = comp_ids.astype(np.int64, copy=False)
arrays: DefaultDict[int, List[int]] = defaultdict(list)
for labs, level in zip(labels, levels):
table = hashtable.Int64HashTable(ngroups)
table.map(comp_ids, labs.astype(np.int64, copy=False))
for i in range(ngroups):
arrays[i].append(level[table.get_item(i)])
return [tuple(array) for array in arrays.values()]
def get_indexer_dict(
label_list: List[np.ndarray], keys: List["Index"]
) -> Dict[Union[str, Tuple], np.ndarray]:
"""
Returns
-------
dict:
Labels mapped to indexers.
"""
shape = [len(x) for x in keys]
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
if np.all(group_index == -1):
# Short-circuit, lib.indices_fast will return the same
return {}
ngroups = (
((group_index.size and group_index.max()) + 1)
if is_int64_overflow_possible(shape)
else np.prod(shape, dtype="i8")
)
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups: int):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count)))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups)
return ensure_platform_int(sorter)
else:
return group_index.argsort(kind="mergesort")
def compress_group_index(group_index, sort: bool = True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable.SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return ensure_int64(comp_ids), ensure_int64(obs_group_ids)
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
|
|
<<<<<<< HEAD
<<<<<<< HEAD
from test import support
import types
import unittest
def global_function():
def inner_function():
class LocalClass:
pass
global inner_global_function
def inner_global_function():
def inner_function2():
pass
return inner_function2
return LocalClass
return lambda: inner_function
class FuncAttrsTest(unittest.TestCase):
def setUp(self):
class F:
def a(self):
pass
def b():
return 3
self.fi = F()
self.F = F
self.b = b
def cannot_set_attr(self, obj, name, value, exceptions):
try:
setattr(obj, name, value)
except exceptions:
pass
else:
self.fail("shouldn't be able to set %s to %r" % (name, value))
try:
delattr(obj, name)
except exceptions:
pass
else:
self.fail("shouldn't be able to del %s" % name)
class FunctionPropertiesTest(FuncAttrsTest):
# Include the external setUp method that is common to all tests
def test_module(self):
self.assertEqual(self.b.__module__, __name__)
def test_dir_includes_correct_attrs(self):
self.b.known_attr = 7
self.assertIn('known_attr', dir(self.b),
"set attributes not in dir listing of method")
# Test on underlying function object of method
self.F.a.known_attr = 7
self.assertIn('known_attr', dir(self.fi.a), "set attribute on function "
"implementations, should show up in next dir")
def test_duplicate_function_equality(self):
# Body of `duplicate' is the exact same as self.b
def duplicate():
'my docstring'
return 3
self.assertNotEqual(self.b, duplicate)
def test_copying___code__(self):
def test(): pass
self.assertEqual(test(), None)
test.__code__ = self.b.__code__
self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily
def test___globals__(self):
self.assertIs(self.b.__globals__, globals())
self.cannot_set_attr(self.b, '__globals__', 2,
(AttributeError, TypeError))
def test___closure__(self):
a = 12
def f(): print(a)
c = f.__closure__
self.assertIsInstance(c, tuple)
self.assertEqual(len(c), 1)
# don't have a type object handy
self.assertEqual(c[0].__class__.__name__, "cell")
self.cannot_set_attr(f, "__closure__", c, AttributeError)
def test_empty_cell(self):
def f(): print(a)
try:
f.__closure__[0].cell_contents
except ValueError:
pass
else:
self.fail("shouldn't be able to read an empty cell")
a = 12
def test___name__(self):
self.assertEqual(self.b.__name__, 'b')
self.b.__name__ = 'c'
self.assertEqual(self.b.__name__, 'c')
self.b.__name__ = 'd'
self.assertEqual(self.b.__name__, 'd')
# __name__ and __name__ must be a string
self.cannot_set_attr(self.b, '__name__', 7, TypeError)
# __name__ must be available when in restricted mode. Exec will raise
# AttributeError if __name__ is not available on f.
s = """def f(): pass\nf.__name__"""
exec(s, {'__builtins__': {}})
# Test on methods, too
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
def test___qualname__(self):
# PEP 3155
self.assertEqual(self.b.__qualname__, 'FuncAttrsTest.setUp.<locals>.b')
self.assertEqual(FuncAttrsTest.setUp.__qualname__, 'FuncAttrsTest.setUp')
self.assertEqual(global_function.__qualname__, 'global_function')
self.assertEqual(global_function().__qualname__,
'global_function.<locals>.<lambda>')
self.assertEqual(global_function()().__qualname__,
'global_function.<locals>.inner_function')
self.assertEqual(global_function()()().__qualname__,
'global_function.<locals>.inner_function.<locals>.LocalClass')
self.assertEqual(inner_global_function.__qualname__, 'inner_global_function')
self.assertEqual(inner_global_function().__qualname__, 'inner_global_function.<locals>.inner_function2')
self.b.__qualname__ = 'c'
self.assertEqual(self.b.__qualname__, 'c')
self.b.__qualname__ = 'd'
self.assertEqual(self.b.__qualname__, 'd')
# __qualname__ must be a string
self.cannot_set_attr(self.b, '__qualname__', 7, TypeError)
def test___code__(self):
num_one, num_two = 7, 8
def a(): pass
def b(): return 12
def c(): return num_one
def d(): return num_two
def e(): return num_one, num_two
for func in [a, b, c, d, e]:
self.assertEqual(type(func.__code__), types.CodeType)
self.assertEqual(c(), 7)
self.assertEqual(d(), 8)
d.__code__ = c.__code__
self.assertEqual(c.__code__, d.__code__)
self.assertEqual(c(), 7)
# self.assertEqual(d(), 7)
try:
b.__code__ = c.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
try:
e.__code__ = d.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
def test_blank_func_defaults(self):
self.assertEqual(self.b.__defaults__, None)
del self.b.__defaults__
self.assertEqual(self.b.__defaults__, None)
def test_func_default_args(self):
def first_func(a, b):
return a+b
def second_func(a=1, b=2):
return a+b
self.assertEqual(first_func.__defaults__, None)
self.assertEqual(second_func.__defaults__, (1, 2))
first_func.__defaults__ = (1, 2)
self.assertEqual(first_func.__defaults__, (1, 2))
self.assertEqual(first_func(), 3)
self.assertEqual(first_func(3), 5)
self.assertEqual(first_func(3, 5), 8)
del second_func.__defaults__
self.assertEqual(second_func.__defaults__, None)
try:
second_func()
except TypeError:
pass
else:
self.fail("__defaults__ does not update; deleting it does not "
"remove requirement")
class InstancemethodAttrTest(FuncAttrsTest):
def test___class__(self):
self.assertEqual(self.fi.a.__self__.__class__, self.F)
self.cannot_set_attr(self.fi.a, "__class__", self.F, TypeError)
def test___func__(self):
self.assertEqual(self.fi.a.__func__, self.F.a)
self.cannot_set_attr(self.fi.a, "__func__", self.F.a, AttributeError)
def test___self__(self):
self.assertEqual(self.fi.a.__self__, self.fi)
self.cannot_set_attr(self.fi.a, "__self__", self.fi, AttributeError)
def test___func___non_method(self):
# Behavior should be the same when a method is added via an attr
# assignment
self.fi.id = types.MethodType(id, self.fi)
self.assertEqual(self.fi.id(), id(self.fi))
# Test usage
try:
self.fi.id.unknown_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise AttributeError")
# Test assignment and deletion
self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError)
class ArbitraryFunctionAttrTest(FuncAttrsTest):
def test_set_attr(self):
self.b.known_attr = 7
self.assertEqual(self.b.known_attr, 7)
try:
self.fi.a.known_attr = 7
except AttributeError:
pass
else:
self.fail("setting attributes on methods should raise error")
def test_delete_unknown_attr(self):
try:
del self.b.unknown_attr
except AttributeError:
pass
else:
self.fail("deleting unknown attribute should raise TypeError")
def test_unset_attr(self):
for func in [self.b, self.fi.a]:
try:
func.non_existent_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise "
"AttributeError")
class FunctionDictsTest(FuncAttrsTest):
def test_setting_dict_to_invalid(self):
self.cannot_set_attr(self.b, '__dict__', None, TypeError)
from collections import UserDict
d = UserDict({'known_attr': 7})
self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError)
def test_setting_dict_to_valid(self):
d = {'known_attr': 7}
self.b.__dict__ = d
# Test assignment
self.assertIs(d, self.b.__dict__)
# ... and on all the different ways of referencing the method's func
self.F.a.__dict__ = d
self.assertIs(d, self.fi.a.__func__.__dict__)
self.assertIs(d, self.fi.a.__dict__)
# Test value
self.assertEqual(self.b.known_attr, 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
# ... and again, on all the different method's names
self.assertEqual(self.fi.a.__func__.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
def test_delete___dict__(self):
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
def test_unassigned_dict(self):
self.assertEqual(self.b.__dict__, {})
def test_func_as_dict_key(self):
value = "Some string"
d = {}
d[self.b] = value
self.assertEqual(d[self.b], value)
class FunctionDocstringTest(FuncAttrsTest):
def test_set_docstring_attr(self):
self.assertEqual(self.b.__doc__, None)
docstr = "A test method that does nothing"
self.b.__doc__ = docstr
self.F.a.__doc__ = docstr
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.fi.a.__doc__, docstr)
self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError)
def test_delete_docstring(self):
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
def cell(value):
"""Create a cell containing the given value."""
def f():
print(a)
a = value
return f.__closure__[0]
def empty_cell(empty=True):
"""Create an empty cell."""
def f():
print(a)
# the intent of the following line is simply "if False:"; it's
# spelt this way to avoid the danger that a future optimization
# might simply remove an "if False:" code block.
if not empty:
a = 1729
return f.__closure__[0]
class CellTest(unittest.TestCase):
def test_comparison(self):
# These tests are here simply to exercise the comparison code;
# their presence should not be interpreted as providing any
# guarantees about the semantics (or even existence) of cell
# comparisons in future versions of CPython.
self.assertTrue(cell(2) < cell(3))
self.assertTrue(empty_cell() < cell('saturday'))
self.assertTrue(empty_cell() == empty_cell())
self.assertTrue(cell(-36) == cell(-36.0))
self.assertTrue(cell(True) > empty_cell())
class StaticMethodAttrsTest(unittest.TestCase):
def test_func_attribute(self):
def f():
pass
c = classmethod(f)
self.assertTrue(c.__func__ is f)
s = staticmethod(f)
self.assertTrue(s.__func__ is f)
class BuiltinFunctionPropertiesTest(unittest.TestCase):
# XXX Not sure where this should really go since I can't find a
# test module specifically for builtin_function_or_method.
def test_builtin__qualname__(self):
import time
# builtin function:
self.assertEqual(len.__qualname__, 'len')
self.assertEqual(time.time.__qualname__, 'time')
# builtin classmethod:
self.assertEqual(dict.fromkeys.__qualname__, 'dict.fromkeys')
self.assertEqual(float.__getformat__.__qualname__,
'float.__getformat__')
# builtin staticmethod:
self.assertEqual(str.maketrans.__qualname__, 'str.maketrans')
self.assertEqual(bytes.maketrans.__qualname__, 'bytes.maketrans')
# builtin bound instance method:
self.assertEqual([1, 2, 3].append.__qualname__, 'list.append')
self.assertEqual({'foo': 'bar'}.pop.__qualname__, 'dict.pop')
def test_main():
support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest,
ArbitraryFunctionAttrTest, FunctionDictsTest,
FunctionDocstringTest, CellTest,
StaticMethodAttrsTest,
BuiltinFunctionPropertiesTest)
if __name__ == "__main__":
test_main()
=======
from test import support
import types
import unittest
def global_function():
def inner_function():
class LocalClass:
pass
global inner_global_function
def inner_global_function():
def inner_function2():
pass
return inner_function2
return LocalClass
return lambda: inner_function
class FuncAttrsTest(unittest.TestCase):
def setUp(self):
class F:
def a(self):
pass
def b():
return 3
self.fi = F()
self.F = F
self.b = b
def cannot_set_attr(self, obj, name, value, exceptions):
try:
setattr(obj, name, value)
except exceptions:
pass
else:
self.fail("shouldn't be able to set %s to %r" % (name, value))
try:
delattr(obj, name)
except exceptions:
pass
else:
self.fail("shouldn't be able to del %s" % name)
class FunctionPropertiesTest(FuncAttrsTest):
# Include the external setUp method that is common to all tests
def test_module(self):
self.assertEqual(self.b.__module__, __name__)
def test_dir_includes_correct_attrs(self):
self.b.known_attr = 7
self.assertIn('known_attr', dir(self.b),
"set attributes not in dir listing of method")
# Test on underlying function object of method
self.F.a.known_attr = 7
self.assertIn('known_attr', dir(self.fi.a), "set attribute on function "
"implementations, should show up in next dir")
def test_duplicate_function_equality(self):
# Body of `duplicate' is the exact same as self.b
def duplicate():
'my docstring'
return 3
self.assertNotEqual(self.b, duplicate)
def test_copying___code__(self):
def test(): pass
self.assertEqual(test(), None)
test.__code__ = self.b.__code__
self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily
def test___globals__(self):
self.assertIs(self.b.__globals__, globals())
self.cannot_set_attr(self.b, '__globals__', 2,
(AttributeError, TypeError))
def test___closure__(self):
a = 12
def f(): print(a)
c = f.__closure__
self.assertIsInstance(c, tuple)
self.assertEqual(len(c), 1)
# don't have a type object handy
self.assertEqual(c[0].__class__.__name__, "cell")
self.cannot_set_attr(f, "__closure__", c, AttributeError)
def test_empty_cell(self):
def f(): print(a)
try:
f.__closure__[0].cell_contents
except ValueError:
pass
else:
self.fail("shouldn't be able to read an empty cell")
a = 12
def test___name__(self):
self.assertEqual(self.b.__name__, 'b')
self.b.__name__ = 'c'
self.assertEqual(self.b.__name__, 'c')
self.b.__name__ = 'd'
self.assertEqual(self.b.__name__, 'd')
# __name__ and __name__ must be a string
self.cannot_set_attr(self.b, '__name__', 7, TypeError)
# __name__ must be available when in restricted mode. Exec will raise
# AttributeError if __name__ is not available on f.
s = """def f(): pass\nf.__name__"""
exec(s, {'__builtins__': {}})
# Test on methods, too
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
def test___qualname__(self):
# PEP 3155
self.assertEqual(self.b.__qualname__, 'FuncAttrsTest.setUp.<locals>.b')
self.assertEqual(FuncAttrsTest.setUp.__qualname__, 'FuncAttrsTest.setUp')
self.assertEqual(global_function.__qualname__, 'global_function')
self.assertEqual(global_function().__qualname__,
'global_function.<locals>.<lambda>')
self.assertEqual(global_function()().__qualname__,
'global_function.<locals>.inner_function')
self.assertEqual(global_function()()().__qualname__,
'global_function.<locals>.inner_function.<locals>.LocalClass')
self.assertEqual(inner_global_function.__qualname__, 'inner_global_function')
self.assertEqual(inner_global_function().__qualname__, 'inner_global_function.<locals>.inner_function2')
self.b.__qualname__ = 'c'
self.assertEqual(self.b.__qualname__, 'c')
self.b.__qualname__ = 'd'
self.assertEqual(self.b.__qualname__, 'd')
# __qualname__ must be a string
self.cannot_set_attr(self.b, '__qualname__', 7, TypeError)
def test___code__(self):
num_one, num_two = 7, 8
def a(): pass
def b(): return 12
def c(): return num_one
def d(): return num_two
def e(): return num_one, num_two
for func in [a, b, c, d, e]:
self.assertEqual(type(func.__code__), types.CodeType)
self.assertEqual(c(), 7)
self.assertEqual(d(), 8)
d.__code__ = c.__code__
self.assertEqual(c.__code__, d.__code__)
self.assertEqual(c(), 7)
# self.assertEqual(d(), 7)
try:
b.__code__ = c.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
try:
e.__code__ = d.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
def test_blank_func_defaults(self):
self.assertEqual(self.b.__defaults__, None)
del self.b.__defaults__
self.assertEqual(self.b.__defaults__, None)
def test_func_default_args(self):
def first_func(a, b):
return a+b
def second_func(a=1, b=2):
return a+b
self.assertEqual(first_func.__defaults__, None)
self.assertEqual(second_func.__defaults__, (1, 2))
first_func.__defaults__ = (1, 2)
self.assertEqual(first_func.__defaults__, (1, 2))
self.assertEqual(first_func(), 3)
self.assertEqual(first_func(3), 5)
self.assertEqual(first_func(3, 5), 8)
del second_func.__defaults__
self.assertEqual(second_func.__defaults__, None)
try:
second_func()
except TypeError:
pass
else:
self.fail("__defaults__ does not update; deleting it does not "
"remove requirement")
class InstancemethodAttrTest(FuncAttrsTest):
def test___class__(self):
self.assertEqual(self.fi.a.__self__.__class__, self.F)
self.cannot_set_attr(self.fi.a, "__class__", self.F, TypeError)
def test___func__(self):
self.assertEqual(self.fi.a.__func__, self.F.a)
self.cannot_set_attr(self.fi.a, "__func__", self.F.a, AttributeError)
def test___self__(self):
self.assertEqual(self.fi.a.__self__, self.fi)
self.cannot_set_attr(self.fi.a, "__self__", self.fi, AttributeError)
def test___func___non_method(self):
# Behavior should be the same when a method is added via an attr
# assignment
self.fi.id = types.MethodType(id, self.fi)
self.assertEqual(self.fi.id(), id(self.fi))
# Test usage
try:
self.fi.id.unknown_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise AttributeError")
# Test assignment and deletion
self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError)
class ArbitraryFunctionAttrTest(FuncAttrsTest):
def test_set_attr(self):
self.b.known_attr = 7
self.assertEqual(self.b.known_attr, 7)
try:
self.fi.a.known_attr = 7
except AttributeError:
pass
else:
self.fail("setting attributes on methods should raise error")
def test_delete_unknown_attr(self):
try:
del self.b.unknown_attr
except AttributeError:
pass
else:
self.fail("deleting unknown attribute should raise TypeError")
def test_unset_attr(self):
for func in [self.b, self.fi.a]:
try:
func.non_existent_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise "
"AttributeError")
class FunctionDictsTest(FuncAttrsTest):
def test_setting_dict_to_invalid(self):
self.cannot_set_attr(self.b, '__dict__', None, TypeError)
from collections import UserDict
d = UserDict({'known_attr': 7})
self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError)
def test_setting_dict_to_valid(self):
d = {'known_attr': 7}
self.b.__dict__ = d
# Test assignment
self.assertIs(d, self.b.__dict__)
# ... and on all the different ways of referencing the method's func
self.F.a.__dict__ = d
self.assertIs(d, self.fi.a.__func__.__dict__)
self.assertIs(d, self.fi.a.__dict__)
# Test value
self.assertEqual(self.b.known_attr, 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
# ... and again, on all the different method's names
self.assertEqual(self.fi.a.__func__.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
def test_delete___dict__(self):
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
def test_unassigned_dict(self):
self.assertEqual(self.b.__dict__, {})
def test_func_as_dict_key(self):
value = "Some string"
d = {}
d[self.b] = value
self.assertEqual(d[self.b], value)
class FunctionDocstringTest(FuncAttrsTest):
def test_set_docstring_attr(self):
self.assertEqual(self.b.__doc__, None)
docstr = "A test method that does nothing"
self.b.__doc__ = docstr
self.F.a.__doc__ = docstr
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.fi.a.__doc__, docstr)
self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError)
def test_delete_docstring(self):
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
def cell(value):
"""Create a cell containing the given value."""
def f():
print(a)
a = value
return f.__closure__[0]
def empty_cell(empty=True):
"""Create an empty cell."""
def f():
print(a)
# the intent of the following line is simply "if False:"; it's
# spelt this way to avoid the danger that a future optimization
# might simply remove an "if False:" code block.
if not empty:
a = 1729
return f.__closure__[0]
class CellTest(unittest.TestCase):
def test_comparison(self):
# These tests are here simply to exercise the comparison code;
# their presence should not be interpreted as providing any
# guarantees about the semantics (or even existence) of cell
# comparisons in future versions of CPython.
self.assertTrue(cell(2) < cell(3))
self.assertTrue(empty_cell() < cell('saturday'))
self.assertTrue(empty_cell() == empty_cell())
self.assertTrue(cell(-36) == cell(-36.0))
self.assertTrue(cell(True) > empty_cell())
class StaticMethodAttrsTest(unittest.TestCase):
def test_func_attribute(self):
def f():
pass
c = classmethod(f)
self.assertTrue(c.__func__ is f)
s = staticmethod(f)
self.assertTrue(s.__func__ is f)
class BuiltinFunctionPropertiesTest(unittest.TestCase):
# XXX Not sure where this should really go since I can't find a
# test module specifically for builtin_function_or_method.
def test_builtin__qualname__(self):
import time
# builtin function:
self.assertEqual(len.__qualname__, 'len')
self.assertEqual(time.time.__qualname__, 'time')
# builtin classmethod:
self.assertEqual(dict.fromkeys.__qualname__, 'dict.fromkeys')
self.assertEqual(float.__getformat__.__qualname__,
'float.__getformat__')
# builtin staticmethod:
self.assertEqual(str.maketrans.__qualname__, 'str.maketrans')
self.assertEqual(bytes.maketrans.__qualname__, 'bytes.maketrans')
# builtin bound instance method:
self.assertEqual([1, 2, 3].append.__qualname__, 'list.append')
self.assertEqual({'foo': 'bar'}.pop.__qualname__, 'dict.pop')
def test_main():
support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest,
ArbitraryFunctionAttrTest, FunctionDictsTest,
FunctionDocstringTest, CellTest,
StaticMethodAttrsTest,
BuiltinFunctionPropertiesTest)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from test import support
import types
import unittest
def global_function():
def inner_function():
class LocalClass:
pass
global inner_global_function
def inner_global_function():
def inner_function2():
pass
return inner_function2
return LocalClass
return lambda: inner_function
class FuncAttrsTest(unittest.TestCase):
def setUp(self):
class F:
def a(self):
pass
def b():
return 3
self.fi = F()
self.F = F
self.b = b
def cannot_set_attr(self, obj, name, value, exceptions):
try:
setattr(obj, name, value)
except exceptions:
pass
else:
self.fail("shouldn't be able to set %s to %r" % (name, value))
try:
delattr(obj, name)
except exceptions:
pass
else:
self.fail("shouldn't be able to del %s" % name)
class FunctionPropertiesTest(FuncAttrsTest):
# Include the external setUp method that is common to all tests
def test_module(self):
self.assertEqual(self.b.__module__, __name__)
def test_dir_includes_correct_attrs(self):
self.b.known_attr = 7
self.assertIn('known_attr', dir(self.b),
"set attributes not in dir listing of method")
# Test on underlying function object of method
self.F.a.known_attr = 7
self.assertIn('known_attr', dir(self.fi.a), "set attribute on function "
"implementations, should show up in next dir")
def test_duplicate_function_equality(self):
# Body of `duplicate' is the exact same as self.b
def duplicate():
'my docstring'
return 3
self.assertNotEqual(self.b, duplicate)
def test_copying___code__(self):
def test(): pass
self.assertEqual(test(), None)
test.__code__ = self.b.__code__
self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily
def test___globals__(self):
self.assertIs(self.b.__globals__, globals())
self.cannot_set_attr(self.b, '__globals__', 2,
(AttributeError, TypeError))
def test___closure__(self):
a = 12
def f(): print(a)
c = f.__closure__
self.assertIsInstance(c, tuple)
self.assertEqual(len(c), 1)
# don't have a type object handy
self.assertEqual(c[0].__class__.__name__, "cell")
self.cannot_set_attr(f, "__closure__", c, AttributeError)
def test_empty_cell(self):
def f(): print(a)
try:
f.__closure__[0].cell_contents
except ValueError:
pass
else:
self.fail("shouldn't be able to read an empty cell")
a = 12
def test___name__(self):
self.assertEqual(self.b.__name__, 'b')
self.b.__name__ = 'c'
self.assertEqual(self.b.__name__, 'c')
self.b.__name__ = 'd'
self.assertEqual(self.b.__name__, 'd')
# __name__ and __name__ must be a string
self.cannot_set_attr(self.b, '__name__', 7, TypeError)
# __name__ must be available when in restricted mode. Exec will raise
# AttributeError if __name__ is not available on f.
s = """def f(): pass\nf.__name__"""
exec(s, {'__builtins__': {}})
# Test on methods, too
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
def test___qualname__(self):
# PEP 3155
self.assertEqual(self.b.__qualname__, 'FuncAttrsTest.setUp.<locals>.b')
self.assertEqual(FuncAttrsTest.setUp.__qualname__, 'FuncAttrsTest.setUp')
self.assertEqual(global_function.__qualname__, 'global_function')
self.assertEqual(global_function().__qualname__,
'global_function.<locals>.<lambda>')
self.assertEqual(global_function()().__qualname__,
'global_function.<locals>.inner_function')
self.assertEqual(global_function()()().__qualname__,
'global_function.<locals>.inner_function.<locals>.LocalClass')
self.assertEqual(inner_global_function.__qualname__, 'inner_global_function')
self.assertEqual(inner_global_function().__qualname__, 'inner_global_function.<locals>.inner_function2')
self.b.__qualname__ = 'c'
self.assertEqual(self.b.__qualname__, 'c')
self.b.__qualname__ = 'd'
self.assertEqual(self.b.__qualname__, 'd')
# __qualname__ must be a string
self.cannot_set_attr(self.b, '__qualname__', 7, TypeError)
def test___code__(self):
num_one, num_two = 7, 8
def a(): pass
def b(): return 12
def c(): return num_one
def d(): return num_two
def e(): return num_one, num_two
for func in [a, b, c, d, e]:
self.assertEqual(type(func.__code__), types.CodeType)
self.assertEqual(c(), 7)
self.assertEqual(d(), 8)
d.__code__ = c.__code__
self.assertEqual(c.__code__, d.__code__)
self.assertEqual(c(), 7)
# self.assertEqual(d(), 7)
try:
b.__code__ = c.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
try:
e.__code__ = d.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
def test_blank_func_defaults(self):
self.assertEqual(self.b.__defaults__, None)
del self.b.__defaults__
self.assertEqual(self.b.__defaults__, None)
def test_func_default_args(self):
def first_func(a, b):
return a+b
def second_func(a=1, b=2):
return a+b
self.assertEqual(first_func.__defaults__, None)
self.assertEqual(second_func.__defaults__, (1, 2))
first_func.__defaults__ = (1, 2)
self.assertEqual(first_func.__defaults__, (1, 2))
self.assertEqual(first_func(), 3)
self.assertEqual(first_func(3), 5)
self.assertEqual(first_func(3, 5), 8)
del second_func.__defaults__
self.assertEqual(second_func.__defaults__, None)
try:
second_func()
except TypeError:
pass
else:
self.fail("__defaults__ does not update; deleting it does not "
"remove requirement")
class InstancemethodAttrTest(FuncAttrsTest):
def test___class__(self):
self.assertEqual(self.fi.a.__self__.__class__, self.F)
self.cannot_set_attr(self.fi.a, "__class__", self.F, TypeError)
def test___func__(self):
self.assertEqual(self.fi.a.__func__, self.F.a)
self.cannot_set_attr(self.fi.a, "__func__", self.F.a, AttributeError)
def test___self__(self):
self.assertEqual(self.fi.a.__self__, self.fi)
self.cannot_set_attr(self.fi.a, "__self__", self.fi, AttributeError)
def test___func___non_method(self):
# Behavior should be the same when a method is added via an attr
# assignment
self.fi.id = types.MethodType(id, self.fi)
self.assertEqual(self.fi.id(), id(self.fi))
# Test usage
try:
self.fi.id.unknown_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise AttributeError")
# Test assignment and deletion
self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError)
class ArbitraryFunctionAttrTest(FuncAttrsTest):
def test_set_attr(self):
self.b.known_attr = 7
self.assertEqual(self.b.known_attr, 7)
try:
self.fi.a.known_attr = 7
except AttributeError:
pass
else:
self.fail("setting attributes on methods should raise error")
def test_delete_unknown_attr(self):
try:
del self.b.unknown_attr
except AttributeError:
pass
else:
self.fail("deleting unknown attribute should raise TypeError")
def test_unset_attr(self):
for func in [self.b, self.fi.a]:
try:
func.non_existent_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise "
"AttributeError")
class FunctionDictsTest(FuncAttrsTest):
def test_setting_dict_to_invalid(self):
self.cannot_set_attr(self.b, '__dict__', None, TypeError)
from collections import UserDict
d = UserDict({'known_attr': 7})
self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError)
def test_setting_dict_to_valid(self):
d = {'known_attr': 7}
self.b.__dict__ = d
# Test assignment
self.assertIs(d, self.b.__dict__)
# ... and on all the different ways of referencing the method's func
self.F.a.__dict__ = d
self.assertIs(d, self.fi.a.__func__.__dict__)
self.assertIs(d, self.fi.a.__dict__)
# Test value
self.assertEqual(self.b.known_attr, 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
# ... and again, on all the different method's names
self.assertEqual(self.fi.a.__func__.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
def test_delete___dict__(self):
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
def test_unassigned_dict(self):
self.assertEqual(self.b.__dict__, {})
def test_func_as_dict_key(self):
value = "Some string"
d = {}
d[self.b] = value
self.assertEqual(d[self.b], value)
class FunctionDocstringTest(FuncAttrsTest):
def test_set_docstring_attr(self):
self.assertEqual(self.b.__doc__, None)
docstr = "A test method that does nothing"
self.b.__doc__ = docstr
self.F.a.__doc__ = docstr
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.fi.a.__doc__, docstr)
self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError)
def test_delete_docstring(self):
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
def cell(value):
"""Create a cell containing the given value."""
def f():
print(a)
a = value
return f.__closure__[0]
def empty_cell(empty=True):
"""Create an empty cell."""
def f():
print(a)
# the intent of the following line is simply "if False:"; it's
# spelt this way to avoid the danger that a future optimization
# might simply remove an "if False:" code block.
if not empty:
a = 1729
return f.__closure__[0]
class CellTest(unittest.TestCase):
def test_comparison(self):
# These tests are here simply to exercise the comparison code;
# their presence should not be interpreted as providing any
# guarantees about the semantics (or even existence) of cell
# comparisons in future versions of CPython.
self.assertTrue(cell(2) < cell(3))
self.assertTrue(empty_cell() < cell('saturday'))
self.assertTrue(empty_cell() == empty_cell())
self.assertTrue(cell(-36) == cell(-36.0))
self.assertTrue(cell(True) > empty_cell())
class StaticMethodAttrsTest(unittest.TestCase):
def test_func_attribute(self):
def f():
pass
c = classmethod(f)
self.assertTrue(c.__func__ is f)
s = staticmethod(f)
self.assertTrue(s.__func__ is f)
class BuiltinFunctionPropertiesTest(unittest.TestCase):
# XXX Not sure where this should really go since I can't find a
# test module specifically for builtin_function_or_method.
def test_builtin__qualname__(self):
import time
# builtin function:
self.assertEqual(len.__qualname__, 'len')
self.assertEqual(time.time.__qualname__, 'time')
# builtin classmethod:
self.assertEqual(dict.fromkeys.__qualname__, 'dict.fromkeys')
self.assertEqual(float.__getformat__.__qualname__,
'float.__getformat__')
# builtin staticmethod:
self.assertEqual(str.maketrans.__qualname__, 'str.maketrans')
self.assertEqual(bytes.maketrans.__qualname__, 'bytes.maketrans')
# builtin bound instance method:
self.assertEqual([1, 2, 3].append.__qualname__, 'list.append')
self.assertEqual({'foo': 'bar'}.pop.__qualname__, 'dict.pop')
def test_main():
support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest,
ArbitraryFunctionAttrTest, FunctionDictsTest,
FunctionDocstringTest, CellTest,
StaticMethodAttrsTest,
BuiltinFunctionPropertiesTest)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
"""
Test for "region" module.
"""
# Author: Ph. Gervais
# License: simplified BSD
import numpy as np
from nose.tools import assert_raises, assert_true
import nibabel
from .. import region
from .._utils.testing import generate_timeseries, generate_regions_ts
from .._utils.testing import generate_labeled_regions, generate_maps
from .._utils.testing import generate_fake_fmri
from .._utils.testing import write_tmp_imgs
def test_generate_regions_ts():
"""Minimal testing of generate_regions_ts()"""
# Check that no regions overlap
n_voxels = 50
n_regions = 10
regions = generate_regions_ts(n_voxels, n_regions, overlap=0)
assert_true(regions.shape == (n_regions, n_voxels))
# check: no overlap
np.testing.assert_array_less((regions > 0).sum(axis=0) - 0.1,
np.ones(regions.shape[1]))
# check: a region everywhere
np.testing.assert_array_less(np.zeros(regions.shape[1]),
(regions > 0).sum(axis=0))
regions = generate_regions_ts(n_voxels, n_regions, overlap=0,
window="hamming")
assert_true(regions.shape == (n_regions, n_voxels))
# check: no overlap
np.testing.assert_array_less((regions > 0).sum(axis=0) - 0.1,
np.ones(regions.shape[1]))
# check: a region everywhere
np.testing.assert_array_less(np.zeros(regions.shape[1]),
(regions > 0).sum(axis=0))
# Check that some regions overlap
regions = generate_regions_ts(n_voxels, n_regions, overlap=1)
assert_true(regions.shape == (n_regions, n_voxels))
assert(np.any((regions > 0).sum(axis=-1) > 1.9))
regions = generate_regions_ts(n_voxels, n_regions, overlap=1,
window="hamming")
assert(np.any((regions > 0).sum(axis=-1) > 1.9))
def test_generate_labeled_regions():
"""Minimal testing of generate_labeled_regions"""
shape = (3, 4, 5)
n_regions = 10
regions = generate_labeled_regions(shape, n_regions)
assert_true(regions.shape == shape)
assert (len(np.unique(regions.get_data())) == n_regions + 1)
def test_signals_extraction_with_labels():
"""Test conversion between signals and images using regions defined
by labels."""
shape = (8, 9, 10)
n_instants = 11
n_regions = 8 # must be 8
eps = np.finfo(np.float).eps
# data
affine = np.eye(4)
signals = generate_timeseries(n_instants, n_regions)
# mask
mask_data = np.zeros(shape)
mask_data[1:-1, 1:-1, 1:-1] = 1
mask_img = nibabel.Nifti1Image(mask_data, affine)
# labels
labels_data = np.zeros(shape, dtype=np.int)
h0 = shape[0] / 2
h1 = shape[1] / 2
h2 = shape[2] / 2
labels_data[:h0, :h1, :h2] = 1
labels_data[:h0, :h1, h2:] = 2
labels_data[:h0, h1:, :h2] = 3
labels_data[:h0, h1:, h2:] = 4
labels_data[h0:, :h1, :h2] = 5
labels_data[h0:, :h1, h2:] = 6
labels_data[h0:, h1:, :h2] = 7
labels_data[h0:, h1:, h2:] = 8
labels_img = nibabel.Nifti1Image(labels_data, affine)
## Without mask
# from labels
data_img = region.signals_to_img_labels(signals, labels_img)
data = data_img.get_data()
assert_true(data_img.shape == (shape + (n_instants,)))
assert_true(np.all(data.std(axis=-1) > 0))
# There must be non-zero data (safety net)
assert_true(abs(data).max() > 1e-9)
# Check that signals in each region are identical in each voxel
for n in xrange(1, n_regions + 1):
sigs = data[labels_data == n, :]
np.testing.assert_almost_equal(sigs[0, :], signals[:, n - 1])
assert_true(abs(sigs - sigs[0, :]).max() < eps)
# and back
signals_r, labels_r = region.img_to_signals_labels(data_img, labels_img)
np.testing.assert_almost_equal(signals_r, signals)
assert_true(labels_r == range(1, 9))
with write_tmp_imgs(data_img) as fname_img:
signals_r, labels_r = region.img_to_signals_labels(fname_img,
labels_img)
np.testing.assert_almost_equal(signals_r, signals)
assert_true(labels_r == range(1, 9))
## Same thing, with mask.
data_img = region.signals_to_img_labels(signals, labels_img,
mask_img=mask_img)
assert_true(data_img.shape == (shape + (n_instants,)))
data = data_img.get_data()
assert_true(abs(data).max() > 1e-9)
# Zero outside of the mask
assert_true(np.all(data[np.logical_not(mask_img.get_data())
].std(axis=-1) < eps)
)
with write_tmp_imgs(labels_img, mask_img) as filenames:
data_img = region.signals_to_img_labels(signals, filenames[0],
mask_img=filenames[1])
assert_true(data_img.shape == (shape + (n_instants,)))
data = data_img.get_data()
assert_true(abs(data).max() > 1e-9)
# Zero outside of the mask
assert_true(np.all(data[np.logical_not(mask_img.get_data())
].std(axis=-1) < eps)
)
# mask labels before checking
masked_labels_data = labels_data.copy()
masked_labels_data[np.logical_not(mask_img.get_data())] = 0
for n in xrange(1, n_regions + 1):
sigs = data[masked_labels_data == n, :]
np.testing.assert_almost_equal(sigs[0, :], signals[:, n - 1])
assert_true(abs(sigs - sigs[0, :]).max() < eps)
# and back
signals_r, labels_r = region.img_to_signals_labels(data_img, labels_img,
mask_img=mask_img)
np.testing.assert_almost_equal(signals_r, signals)
assert_true(labels_r == range(1, 9))
# Test input validation
data_img = nibabel.Nifti1Image(np.zeros((2, 3, 4, 5)), np.eye(4))
good_labels_img = nibabel.Nifti1Image(np.zeros((2, 3, 4)), np.eye(4))
bad_labels1_img = nibabel.Nifti1Image(np.zeros((2, 3, 5)), np.eye(4))
bad_labels2_img = nibabel.Nifti1Image(np.zeros((2, 3, 4)), 2 * np.eye(4))
good_mask_img = nibabel.Nifti1Image(np.zeros((2, 3, 4)), np.eye(4))
bad_mask1_img = nibabel.Nifti1Image(np.zeros((2, 3, 5)), np.eye(4))
bad_mask2_img = nibabel.Nifti1Image(np.zeros((2, 3, 4)), 2 * np.eye(4))
assert_raises(ValueError, region.img_to_signals_labels, data_img,
bad_labels1_img)
assert_raises(ValueError, region.img_to_signals_labels, data_img,
bad_labels2_img)
assert_raises(ValueError, region.img_to_signals_labels, data_img,
bad_labels1_img, mask_img=good_mask_img)
assert_raises(ValueError, region.img_to_signals_labels, data_img,
bad_labels2_img, mask_img=good_mask_img)
assert_raises(ValueError, region.img_to_signals_labels, data_img,
good_labels_img, mask_img=bad_mask1_img)
assert_raises(ValueError, region.img_to_signals_labels, data_img,
good_labels_img, mask_img=bad_mask2_img)
def test_signal_extraction_with_maps():
shape = (10, 11, 12)
n_regions = 9
n_instants = 13
# Generate signals
rand_gen = np.random.RandomState(0)
maps_img, mask_img = generate_maps(shape, n_regions, border=1)
maps_data = maps_img.get_data()
data = np.zeros(shape + (n_instants,), dtype=np.float32)
signals = np.zeros((n_instants, maps_data.shape[-1]))
for n in xrange(maps_data.shape[-1]):
signals[:, n] = rand_gen.randn(n_instants)
data[maps_data[..., n] > 0, :] = signals[:, n]
img = nibabel.Nifti1Image(data, np.eye(4))
## Get signals
signals_r, labels = region.img_to_signals_maps(img, maps_img,
mask_img=mask_img)
# The output must be identical to the input signals, because every region
# is homogeneous: there is the same signal in all voxels of one given
# region (and all maps are uniform).
np.testing.assert_almost_equal(signals, signals_r)
# Same thing without mask (in that case)
signals_r, labels = region.img_to_signals_maps(img, maps_img)
np.testing.assert_almost_equal(signals, signals_r)
## Recover image
img_r = region.signals_to_img_maps(signals, maps_img, mask_img=mask_img)
np.testing.assert_almost_equal(img_r.get_data(), img.get_data())
img_r = region.signals_to_img_maps(signals, maps_img)
np.testing.assert_almost_equal(img_r.get_data(), img.get_data())
# Test input validation
data_img = nibabel.Nifti1Image(np.zeros((2, 3, 4, 5)), np.eye(4))
good_maps_img = nibabel.Nifti1Image(np.zeros((2, 3, 4, 7)), np.eye(4))
bad_maps1_img = nibabel.Nifti1Image(np.zeros((2, 3, 5, 7)), np.eye(4))
bad_maps2_img = nibabel.Nifti1Image(np.zeros((2, 3, 4, 7)), 2 * np.eye(4))
good_mask_img = nibabel.Nifti1Image(np.zeros((2, 3, 4)), np.eye(4))
bad_mask1_img = nibabel.Nifti1Image(np.zeros((2, 3, 5)), np.eye(4))
bad_mask2_img = nibabel.Nifti1Image(np.zeros((2, 3, 4)), 2 * np.eye(4))
assert_raises(ValueError, region.img_to_signals_maps, data_img,
bad_maps1_img)
assert_raises(ValueError, region.img_to_signals_maps, data_img,
bad_maps2_img)
assert_raises(ValueError, region.img_to_signals_maps, data_img,
bad_maps1_img, mask_img=good_mask_img)
assert_raises(ValueError, region.img_to_signals_maps, data_img,
bad_maps2_img, mask_img=good_mask_img)
assert_raises(ValueError, region.img_to_signals_maps, data_img,
good_maps_img, mask_img=bad_mask1_img)
assert_raises(ValueError, region.img_to_signals_maps, data_img,
good_maps_img, mask_img=bad_mask2_img)
def test_signal_extraction_with_maps_and_labels():
shape = (4, 5, 6)
n_regions = 7
length = 8
# Generate labels
labels = range(n_regions + 1) # 0 is background
labels_img = generate_labeled_regions(shape, n_regions, labels=labels)
labels_data = labels_img.get_data()
# Convert to maps
maps_data = np.zeros(shape + (n_regions,))
for n, l in enumerate(labels):
if n == 0:
continue
maps_data[labels_data == l, n - 1] = 1
maps_img = nibabel.Nifti1Image(maps_data, labels_img.get_affine())
# Generate fake data
fmri_img, _ = generate_fake_fmri(shape=shape, length=length,
affine=labels_img.get_affine())
# Extract signals from maps and labels: results must be identical.
maps_signals, maps_labels = region.img_to_signals_maps(fmri_img, maps_img)
labels_signals, labels_labels =\
region.img_to_signals_labels(fmri_img, labels_img)
np.testing.assert_almost_equal(maps_signals, labels_signals)
## Same thing with a mask, containing only 3 regions.
mask_data = (labels_data == 1) + (labels_data == 2) + (labels_data == 5)
mask_img = nibabel.Nifti1Image(mask_data.astype(np.int8),
labels_img.get_affine())
labels_signals, labels_labels =\
region.img_to_signals_labels(fmri_img, labels_img,
mask_img=mask_img)
maps_signals, maps_labels = \
region.img_to_signals_maps(fmri_img, maps_img,
mask_img=mask_img)
np.testing.assert_almost_equal(maps_signals, labels_signals)
assert_true(maps_signals.shape[1] == n_regions)
assert_true(maps_labels == range(len(maps_labels)))
assert_true(labels_signals.shape == (length, n_regions))
assert_true(labels_labels == labels[1:])
# Inverse operation (mostly smoke test)
labels_img_r = region.signals_to_img_labels(labels_signals, labels_img,
mask_img=mask_img)
assert_true(labels_img_r.shape == shape + (length,))
maps_img_r = region.signals_to_img_maps(maps_signals, maps_img,
mask_img=mask_img)
assert_true(maps_img_r.shape == shape + (length,))
## Check that NaNs in regions inside mask are preserved
region1 = labels_data == 2
indices = [ind[:1] for ind in np.where(region1)]
fmri_img.get_data()[indices + [slice(None)]] = float('nan')
labels_signals, labels_labels =\
region.img_to_signals_labels(fmri_img, labels_img,
mask_img=mask_img)
assert_true(np.all(np.isnan(labels_signals[:, labels_labels.index(2)])))
def test_generate_maps():
# Basic testing of generate_maps()
shape = (10, 11, 12)
n_regions = 9
maps_img, _ = generate_maps(shape, n_regions, border=1)
maps = maps_img.get_data()
assert_true(maps.shape == shape + (n_regions,))
# no empty map
assert_true(np.all(abs(maps).sum(axis=0).sum(axis=0).sum(axis=0) > 0))
# check border
assert_true(np.all(maps[0, ...] == 0))
assert_true(np.all(maps[:, 0, ...] == 0))
assert_true(np.all(maps[:, :, 0, :] == 0))
def test__trim_maps():
shape = (7, 9, 10)
n_regions = 8
# maps
maps_data = np.zeros(shape + (n_regions,), dtype=np.float32)
h0 = shape[0] / 2
h1 = shape[1] / 2
h2 = shape[2] / 2
maps_data[:h0, :h1, :h2, 0] = 1
maps_data[:h0, :h1, h2:, 1] = 1.1
maps_data[:h0, h1:, :h2, 2] = 1
maps_data[:h0, h1:, h2:, 3] = 0.5
maps_data[h0:, :h1, :h2, 4] = 1
maps_data[h0:, :h1, h2:, 5] = 1.4
maps_data[h0:, h1:, :h2, 6] = 1
maps_data[h0:, h1:, h2:, 7] = 1
# mask intersecting all regions
mask_data = np.zeros(shape, dtype=np.int8)
mask_data[1:-1, 1:-1, 1:-1] = 1
maps_i, maps_i_mask, maps_i_indices = region._trim_maps(maps_data,
mask_data)
assert_true(maps_i.flags["F_CONTIGUOUS"])
assert_true(len(maps_i_indices) == maps_i.shape[-1])
assert_true(maps_i.shape == maps_data.shape)
maps_i_correct = maps_data.copy()
maps_i_correct[np.logical_not(mask_data), :] = 0
np.testing.assert_almost_equal(maps_i_correct, maps_i)
np.testing.assert_equal(mask_data, maps_i_mask)
np.testing.assert_equal(np.asarray(range(8)), maps_i_indices)
# mask intersecting half of the regions
mask_data = np.zeros(shape, dtype=np.int8)
mask_data[1:2, 1:-1, 1:-1] = 1
maps_data[1, 1, 1, 0] = 0 # remove one point inside mask
maps_i, maps_i_mask, maps_i_indices = region._trim_maps(maps_data,
mask_data)
assert_true(maps_i.flags["F_CONTIGUOUS"])
assert_true(len(maps_i_indices) == maps_i.shape[-1])
assert_true(maps_i.shape == (maps_data.shape[:3] + (4,)))
maps_i_correct = maps_data[..., :4].copy()
maps_i_correct[np.logical_not(mask_data), :] = 0
np.testing.assert_almost_equal(maps_i_correct, maps_i)
mask_data[1, 1, 1] = 0 # for test to succeed
np.testing.assert_equal(mask_data, maps_i_mask)
mask_data[1, 1, 1] = 1 # reset, just in case.
np.testing.assert_equal(np.asarray(range(4)), maps_i_indices)
|
|
# Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent the topology of servers."""
from collections import namedtuple
from pymongo import common
from pymongo.server_type import SERVER_TYPE
from pymongo.errors import ConfigurationError
from pymongo.server_description import ServerDescription
TOPOLOGY_TYPE = namedtuple('TopologyType', ['Single', 'ReplicaSetNoPrimary',
'ReplicaSetWithPrimary', 'Sharded',
'Unknown'])(*range(5))
class TopologyDescription(object):
def __init__(self, topology_type, server_descriptions, replica_set_name):
"""Represent a topology of servers.
:Parameters:
- `topology_type`: initial type
- `server_descriptions`: dict of (address, ServerDescription) for
all seeds
- `replica_set_name`: replica set name or None
"""
self._topology_type = topology_type
self._replica_set_name = replica_set_name
self._server_descriptions = server_descriptions
# Is PyMongo compatible with all servers' wire protocols?
self._incompatible_err = None
for s in self._server_descriptions.values():
# s.min/max_wire_version is the server's wire protocol.
# MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports.
server_too_new = (
# Server too new.
s.min_wire_version is not None
and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION)
server_too_old = (
# Server too old.
s.max_wire_version is not None
and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION)
if server_too_new or server_too_old:
self._incompatible_err = (
"Server at %s:%d "
"uses wire protocol versions %d through %d, "
"but PyMongo only supports %d through %d"
% (s.address[0], s.address[1],
s.min_wire_version, s.max_wire_version,
common.MIN_SUPPORTED_WIRE_VERSION,
common.MAX_SUPPORTED_WIRE_VERSION))
break
def check_compatible(self):
"""Raise ConfigurationError if any server is incompatible.
A server is incompatible if its wire protocol version range does not
overlap with PyMongo's.
"""
if self._incompatible_err:
raise ConfigurationError(self._incompatible_err)
def has_server(self, address):
return address in self._server_descriptions
def reset_server(self, address):
"""A copy of this description, with one server marked Unknown."""
return updated_topology_description(self, ServerDescription(address))
def reset(self):
"""A copy of this description, with all servers marked Unknown."""
if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
topology_type = self._topology_type
# The default ServerDescription's type is Unknown.
sds = dict((address, ServerDescription(address))
for address in self._server_descriptions)
return TopologyDescription(topology_type, sds, self._replica_set_name)
def server_descriptions(self):
"""Dict of (address, ServerDescription)."""
return self._server_descriptions.copy()
@property
def topology_type(self):
return self._topology_type
@property
def replica_set_name(self):
"""The replica set name."""
return self._replica_set_name
@property
def known_servers(self):
"""List of Servers of types besides Unknown."""
return [s for s in self._server_descriptions.values()
if s.is_server_type_known]
# If topology type is Unknown and we receive an ismaster response, what should
# the new topology type be?
_SERVER_TYPE_TO_TOPOLOGY_TYPE = {
SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded,
SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary,
SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
}
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Single type never changes.
return TopologyDescription(TOPOLOGY_TYPE.Single, sds, set_name)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type == SERVER_TYPE.Standalone:
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
topology_type, set_name = _update_rs_from_primary(
sds, set_name, server_description)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
topology_type, set_name = _update_rs_from_primary(
sds, set_name, server_description)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type, sds, set_name)
def _update_rs_from_primary(sds, replica_set_name, server_description):
"""Update topology description from a primary's ismaster response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name).
"""
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
# We found a primary but it doesn't have the replica_set_name
# provided by the user.
sds.pop(server_description.address)
return _check_has_primary(sds), replica_set_name
# We've heard from the primary. Is it the same primary as before?
for server in sds.values():
if (server.server_type is SERVER_TYPE.RSPrimary
and server.address != server_description.address):
# Reset old primary's type to Unknown.
sds[server.address] = ServerDescription(server.address)
# There can be only one prior primary.
break
# Discover new hosts from this primary's response.
for new_address in server_description.all_hosts:
if new_address not in sds:
sds[new_address] = ServerDescription(new_address)
# Remove hosts not in the response.
for addr in set(sds) - server_description.all_hosts:
sds.pop(addr)
# If the host list differs from the seed list, we may not have a primary
# after all.
return _check_has_primary(sds), replica_set_name
def _update_rs_with_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type.
"""
assert replica_set_name is not None
if replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
# Had this member been the primary?
return _check_has_primary(sds)
def _update_rs_no_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS without known primary. Update from a non-primary's response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name).
"""
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
return topology_type, replica_set_name
# This isn't the primary's response, so don't remove any servers
# it doesn't report. Only add new servers.
for address in server_description.all_hosts:
if address not in sds:
sds[address] = ServerDescription(address)
return topology_type, replica_set_name
def _check_has_primary(sds):
"""Current topology type is ReplicaSetWithPrimary. Is primary still known?
Pass in a dict of ServerDescriptions.
Returns new topology type.
"""
for s in sds.values():
if s.server_type == SERVER_TYPE.RSPrimary:
return TOPOLOGY_TYPE.ReplicaSetWithPrimary
else:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note that files generated by lex/yacc not always fully py 2/3 compatible.
# Hence, the ``clean_parse_tables.py`` tool in the astropy-tools
# (https://github.com/astropy/astropy-tools) repository should be used to fix
# this when/if lextab/parsetab files are re-generated.
"""
Handles a "generic" string format for units
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
import os
import re
import warnings
from . import core, utils
from .base import Base
from ...utils import classproperty
from ...utils.misc import did_you_mean
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append('{0:g}'.format(unit.scale))
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append('1')
if len(negatives):
parts.append('/')
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append('{0}'.format(unit_list))
else:
parts.append('({0})'.format(unit_list))
return ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
'DOUBLE_STAR',
'STAR',
'PERIOD',
'SOLIDUS',
'CARET',
'OPEN_PAREN',
'CLOSE_PAREN',
'FUNCNAME',
'UNIT',
'SIGN',
'UINT',
'UFLOAT'
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
from ...extern.ply import lex
tokens = cls._tokens
t_STAR = r'\*'
t_PERIOD = r'\.'
t_SOLIDUS = r'/'
t_DOUBLE_STAR = r'\*\*'
t_CARET = r'\^'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
elif t.value.endswith('.'):
t.type = 'UINT'
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r'((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()'
return t
def t_UNIT(t):
r"%|([YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+')|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
# PY2: need str() to ensure we do not pass on a unicode object.
lexer = lex.lex(optimize=True, lextab=str('generic_lextab'),
outputdir=os.path.dirname(__file__),
reflags=re.UNICODE)
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
from ...extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
'''
from ..core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
'''
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
'''
from ..core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
'''
inverse_unit : division unit_expression
'''
p[0] = p[2] ** -1
def p_factor(p):
'''
factor : factor_fits
| factor_float
| factor_int
'''
p[0] = p[1]
def p_factor_float(p):
'''
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
'''
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
'''
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power signed_int
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
'''
if p[1] != 10:
if cls.name == 'fits':
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ('**', '^'):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
'''
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
'''
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
'''
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
'''
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
'''
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
'''
paren_expr : sign UINT
| signed_float
| frac
'''
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
'''
frac : sign UINT division sign UINT
'''
p[0] = (p[1] * p[2]) / (p[4] * p[5])
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_product(p):
'''
product : STAR
| PERIOD
'''
pass
def p_division(p):
'''
division : SOLIDUS
'''
pass
def p_power(p):
'''
power : DOUBLE_STAR
| CARET
'''
p[0] = p[1]
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_function_name(p):
'''
function_name : FUNCNAME
'''
p[0] = p[1]
def p_function(p):
'''
function : function_name OPEN_PAREN main CLOSE_PAREN
'''
if p[1] == 'sqrt':
p[0] = p[3] ** 0.5
return
elif p[1] in ('mag', 'dB', 'dex'):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError("'{0}' is not a recognized function".format(p[1]))
def p_error(p):
raise ValueError()
# PY2: need str() to ensure we do not pass on a unicode object.
parser = yacc.yacc(debug=False, tabmodule=str('generic_parsetab'),
outputdir=os.path.dirname(__file__))
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, {1}".format(
t.lexpos, six.text_type(e)))
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s == '%':
return registry['percent']
elif s in registry:
return registry[s]
if detailed_exception:
raise ValueError(
'{0} is not a valid unit. {1}'.format(
s, did_you_mean(s, registry)))
else:
raise ValueError()
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, six.text_type):
s = s.decode('ascii')
result = cls._do_parse(s, debug=debug)
if s.count('/') > 1:
warnings.warn(
"'{0}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if six.text_type(e):
raise
else:
raise ValueError(
"Syntax error parsing unit '{0}'".format(s))
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('generic')
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append('{0}({1})'.format(
cls._get_unit_name(base), power))
else:
out.append('{0}{1}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
|
|
from ._compat import PY2, filename_to_ui, get_text_stderr
from .utils import echo
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception
exit_code = 1
def __init__(self, message):
if PY2:
if message is not None:
message = message.encode('utf-8')
Exception.__init__(self, message)
self.message = message
def format_message(self):
return self.message
def show(self, file=None):
if file is None:
file = get_text_stderr()
echo('Error: %s' % self.format_message(), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
def show(self, file=None):
if file is None:
file = get_text_stderr()
color = None
if self.ctx is not None:
color = self.ctx.color
echo(self.ctx.get_usage() + '\n', file=file, color=color)
echo('Error: %s' % self.format_message(), file=file, color=color)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(self, message, ctx=None, param=None,
param_hint=None):
UsageError.__init__(self, message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.opts or [self.param.human_readable_name]
else:
return 'Invalid value: %s' % self.message
if isinstance(param_hint, (tuple, list)):
param_hint = ' / '.join('"%s"' % x for x in param_hint)
return 'Invalid value for %s: %s' % (param_hint, self.message)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(self, message=None, ctx=None, param=None,
param_hint=None, param_type=None):
BadParameter.__init__(self, message, ctx, param, param_hint)
self.param_type = param_type
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.opts or [self.param.human_readable_name]
else:
param_hint = None
if isinstance(param_hint, (tuple, list)):
param_hint = ' / '.join('"%s"' % x for x in param_hint)
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += '. ' + msg_extra
else:
msg = msg_extra
return 'Missing %s%s%s%s' % (
param_type,
param_hint and ' %s' % param_hint or '',
msg and '. ' or '.',
msg or '',
)
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(self, option_name, message=None, possibilities=None,
ctx=None):
if message is None:
message = 'no such option: %s' % option_name
UsageError.__init__(self, message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self):
bits = [self.message]
if self.possibilities:
if len(self.possibilities) == 1:
bits.append('Did you mean %s?' % self.possibilities[0])
else:
possibilities = sorted(self.possibilities)
bits.append('(Possible options: %s)' % ', '.join(possibilities))
return ' '.join(bits)
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(self, option_name, message, ctx=None):
UsageError.__init__(self, message, ctx)
self.option_name = option_name
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx)
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename, hint=None):
ui_filename = filename_to_ui(filename)
if hint is None:
hint = 'unknown error'
ClickException.__init__(self, hint)
self.ui_filename = ui_filename
self.filename = filename
def format_message(self):
return 'Could not open file %s: %s' % (self.ui_filename, self.message)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses options for the instrumentation tests."""
#TODO(craigdh): pylib/utils/ should not depend on pylib/.
from pylib import constants
import optparse
import os
import sys
_SDK_OUT_DIR = os.path.join(constants.CHROME_DIR, 'out')
def AddBuildTypeOption(option_parser):
"""Decorates OptionParser with build type option."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
option_parser.add_option('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
def AddInstallAPKOption(option_parser):
"""Decorates OptionParser with apk option used to install the APK."""
AddBuildTypeOption(option_parser)
option_parser.add_option('--apk',
help=('The name of the apk containing the '
' application (with the .apk extension).'))
option_parser.add_option('--apk_package',
help=('The package name used by the apk containing '
'the application.'))
option_parser.add_option('--keep_data',
action='store_true',
default=False,
help=('Keep the package data when installing '
'the application.'))
def ValidateInstallAPKOption(option_parser, options):
if not options.apk:
option_parser.error('--apk is mandatory.')
if not os.path.exists(options.apk):
options.apk = os.path.join(constants.CHROME_DIR,
'out', options.build_type,
'apks', options.apk)
def AddTestRunnerOptions(option_parser, default_timeout=60):
"""Decorates OptionParser with options applicable to all tests."""
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=default_timeout)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps',
'traceview']
option_parser.add_option('--profiler', dest='profilers', action='append',
choices=profilers,
help='Profiling tool to run during test. '
'Pass multiple times to run multiple profilers. '
'Available profilers: %s' % profilers)
option_parser.add_option('--tool',
dest='tool',
help='Run the test under a tool '
'(use --tool help to list them)')
option_parser.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
AddBuildTypeOption(option_parser)
def AddGTestOptions(option_parser):
"""Decorates OptionParser with GTest tests options."""
AddTestRunnerOptions(option_parser, default_timeout=0)
option_parser.add_option('-s', '--suite', dest='test_suite',
help='Executable name of the test suite to run '
'(use -s help to list them).')
option_parser.add_option('--out-directory', dest='out_directory',
help='Path to the out/ directory, irrespective of '
'the build type. Only for non-Chromium uses.')
option_parser.add_option('-d', '--device', dest='test_device',
help='Target device for the test suite to run on.')
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
help='gtest filter.')
#TODO(craigdh): Replace _ with - in arguments for consistency.
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test.')
option_parser.add_option('-e', '--emulator', dest='use_emulator',
action='store_true',
help='Run tests in a new instance of emulator.')
option_parser.add_option('-n', '--emulator_count',
type='int', default=1,
help='Number of emulators to launch for running the '
'tests.')
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
action='store_true',
help='Use Xvfb around tests (ignored if not Linux).')
option_parser.add_option('--webkit', action='store_true',
help='Run the tests from a WebKit checkout.')
option_parser.add_option('--repeat', dest='repeat', type='int',
default=2,
help='Repeat count on test timeout.')
option_parser.add_option('--exit_code', action='store_true',
help='If set, the exit code will be total number '
'of failures.')
option_parser.add_option('--exe', action='store_true',
help='If set, use the exe test runner instead of '
'the APK.')
def AddCommonInstrumentationOptions(option_parser):
"""Decorates OptionParser with base instrumentation tests options."""
AddTestRunnerOptions(option_parser)
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true', help='Wait for debugger.')
option_parser.add_option('-f', '--test_filter',
help='Test filter (if not fully qualified, '
'will run all matches).')
option_parser.add_option('-A', '--annotation', dest='annotation_str',
help=('Run only tests with any of the given '
'annotations. '
'An annotation can be either a key or a '
'key-values pair. '
'A test that has no annotation is '
'considered "SmallTest".'))
option_parser.add_option('-j', '--java_only', action='store_true',
help='Run only the Java tests.')
option_parser.add_option('-p', '--python_only', action='store_true',
help='Run only the Python tests.')
option_parser.add_option('-n', '--run_count', type='int',
dest='number_of_runs', default=1,
help=('How many times to run each test, regardless '
'of the result. (Default is 1)'))
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--shard_retries', type=int, default=1,
help=('Number of times to retry each failure when '
'sharding.'))
option_parser.add_option('--official-build', help='Run official build tests.')
option_parser.add_option('--device',
help='Serial number of device we should use.')
option_parser.add_option('--python_test_root',
help='Root of the python-driven tests.')
option_parser.add_option('--keep_test_server_ports',
action='store_true',
help='Indicates the test server ports must be '
'kept. When this is run via a sharder '
'the test server ports should be kept and '
'should not be reset.')
option_parser.add_option('--buildbot-step-failure',
action='store_true',
help=('If present, will set the buildbot status '
'as STEP_FAILURE, otherwise as STEP_WARNINGS '
'when test(s) fail.'))
option_parser.add_option('--disable_assertions', action='store_true',
help='Run with java assertions disabled.')
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
def AddInstrumentationOptions(option_parser):
"""Decorates OptionParser with instrumentation tests options."""
AddCommonInstrumentationOptions(option_parser)
option_parser.add_option('-I', dest='install_apk',
help='Install APK.', action='store_true')
option_parser.add_option('--test-apk', dest='test_apk',
help=('The name of the apk containing the tests '
'(without the .apk extension). For SDK '
'builds, the apk name without the debug '
'suffix(for example, ContentShellTest).'))
def AddUIAutomatorOptions(option_parser):
"""Decorates OptionParser with uiautomator tests options."""
AddCommonInstrumentationOptions(option_parser)
option_parser.add_option(
'--package-name',
help=('The package name used by the apk containing the application.'))
option_parser.add_option(
'--uiautomator-jar',
help=('Path to the uiautomator jar to be installed on the device.'))
option_parser.add_option(
'--uiautomator-info-jar',
help=('Path to the uiautomator jar for use by proguard.'))
def ValidateCommonInstrumentationOptions(option_parser, options, args):
"""Validate common options/arguments and populate options with defaults."""
if len(args) > 1:
option_parser.print_help(sys.stderr)
option_parser.error('Unknown arguments: %s' % args[1:])
if options.java_only and options.python_only:
option_parser.error('Options java_only (-j) and python_only (-p) '
'are mutually exclusive.')
options.run_java_tests = True
options.run_python_tests = True
if options.java_only:
options.run_python_tests = False
elif options.python_only:
options.run_java_tests = False
if options.annotation_str:
options.annotation = options.annotation_str.split()
elif options.test_filter:
options.annotation = []
else:
options.annotation = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest']
def ValidateInstrumentationOptions(option_parser, options, args):
"""Validate options/arguments and populate options with defaults."""
ValidateCommonInstrumentationOptions(option_parser, options, args)
if not options.test_apk:
option_parser.error('--test-apk must be specified.')
if os.path.exists(options.test_apk):
# The APK is fully qualified, assume the JAR lives along side.
options.test_apk_path = options.test_apk
options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] +
'.jar')
else:
options.test_apk_path = os.path.join(_SDK_OUT_DIR,
options.build_type,
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % options.test_apk)
options.test_apk_jar_path = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % options.test_apk)
def ValidateUIAutomatorOptions(option_parser, options, args):
"""Validate uiautomator options/arguments."""
ValidateCommonInstrumentationOptions(option_parser, options, args)
if not options.package_name:
option_parser.error('--package-name must be specified.')
if not options.uiautomator_jar:
option_parser.error('--uiautomator-jar must be specified.')
if not options.uiautomator_info_jar:
option_parser.error('--uiautomator-info-jar must be specified.')
|
|
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
from pylons import response
from ckan.lib.base import BaseController
from ckan.config.environment import config as ckan_config
from mappers import *
from builders import Catalog, Dataset, ThemeTaxonomy
logger = logging.getLogger('jsoncatalog.controller')
class JsonCatalogController(BaseController):
"""
Controlador principal del plugin.
"""
_errors_json = []
def __init__(self):
plugin_folder = path.dirname(__file__)
self.mappers_folder = path.join(plugin_folder, 'mappers')
mapper = ckan_config.get('ckanext.json_catalog.schema', 'default')
mapper_version = ckan_config.get('ckanext.json_catalog.version', '1.0')
self.mappers = Mappers(schema=mapper, version=mapper_version)
self.wildcards = WildCards()
def generate_catalog(self):
"""
Genera catalogo.json.
La generacion del catalogo se realiza mediante el uso de los field
propios de las datasets almacenados en CKAN y las reglas de mapeo
definidas en el mapper.
Args:
- None.
Returns:
- JSON response. Catalogo en formato json.
"""
err_response = {
'status': 404,
'message': ''
}
try:
return self.build_response(self.map_catalog(self.get_catalog()))
except KeyError:
err_response.update({'message': 'Faltan Parametros requerido.'})
except ValueError:
err_response.update({'message': 'Formato no esperado.'})
return self.build_response(err_response)
def generate_theme_taxonomy(self):
"""
Genera la taxonomia de temas.
Args:
- None.
Returns:
- JSON response. ThemeTaxonomy en formato json.
"""
_response = {
'status': 404,
'message': ''
}
thm_txnm = []
try:
thm_txnm = self.map_themes(self.get_themes())
return self.build_response(thm_txnm)
except KeyError as e:
_response['message'] = 'Falta parametro {} requerido.'.format(e)
except ValueError as e:
_response['message'] = 'La clave {} no existe dentro de CKAN.'.format(e)
finally:
if len(_response['message']) < 0:
_response = thm_txnm
return self.build_response(_response)
def get_catalog(self):
"""
Obtiene informacion del catalogo.
Retunrs:
- Dict.
"""
return self.get_ckan_data(_content_of='catalog')
def map_catalog(self, _catalog):
"""
Returns:
Dict():
- {}(vacio), ante Fallo.
- {catalogo}, Exito.
"""
mapped_catalogs = {}
try:
mapped_catalogs = self.mappers.apply(data=_catalog, _mapper='catalog')
for k, v in mapped_catalogs.items():
if u'@datasets' == unicode(v):
mapped_catalogs.update({k: self.map_dataset(self.get_datasets())})
if u'@themeTaxonomy' == unicode(v):
mapped_catalogs.update({k: self.map_themes(self.get_themes())})
except (AttributeError, TypeError, KeyError) as e:
logger.error('>> {}'.format(e))
return mapped_catalogs
@staticmethod
def get_ckan_data(_content_of='catalog', dataset_id=None):
if _content_of.lower() == 'catalog':
datadict = {'sort': 'metadata_modified desc',
'rows': 5000}
action = u'package_search'
return toolkit.get_action(action)(data_dict=datadict)
elif _content_of.lower() == 'distributions':
datadict = {'sort': 'metadata_modified desc',
'rows': 5000}
action = u'package_search'
return toolkit.get_action(action)(data_dict=datadict)['results']
elif _content_of.lower() == 'datasets':
datadict = {'sort': 'metadata_modified desc',
'rows': 5000}
action = u'package_search'
return toolkit.get_action(action)(data_dict=datadict)
elif _content_of.lower() == 'groups':
datadict = {'all_fields': True}
action = u'group_list'
return toolkit.get_action(action)(data_dict=datadict)
else:
raise AttributeError
def get_datasets(self):
"""
Obtener lista de datasets contenidos dentro de CKAN.
Returns:
- List(). Len(list) == n: Lista de los n Dataset existentes en CKAN.
- List(). Len(list) == 0: si ocurrio un error o no se han cargado datasets.
"""
return self.get_ckan_data(_content_of='datasets')
def map_dataset(self, _datasets):
mapped_datasets = []
try:
mapped_datasets = self.mappers.apply(_datasets, _mapper='dataset')
for mapped_dataset in mapped_datasets:
for k, v in mapped_dataset.items():
if u'@distributions' == unicode(v):
mapped_dataset.update({k: self.map_distribution(self.get_themes())})
except (AttributeError, TypeError, KeyError) as e:
logger.error('++ {}'.format(e))
return mapped_datasets
def exists(self, _obj='dataset', _key=None, _value=None):
"""
Busqueda dentro de la data de ckan.
Args:
- _obj:
- key_to_search:
Returns:
- bool():
- True, Existe dentro de _obj la clave:_key y posee el valor: _value.
- False: No existe dentro de _obj la clave:_key o no posee el valor: _value.
"""
def search_in_dict(d, _k, _v):
r = False
try:
if d[_k] == _v:
r = True
except IndexError:
pass
return r
# si _key o _value es None, retorno false.
results = False
if None in [_key, _value]:
return results
data = self.get_ckan_data(_obj)
if isinstance(data, list):
for elem in data:
results = search_in_dict(elem, _key, _value)
if results:
break
elif isinstance(data, dict):
results = search_in_dict(data, _key, _value)
else:
return results
return results
def map_distribution(self, _dataset):
mapped_distributions = []
try:
mapped_distributions = self.mappers.apply(_dataset, _mapper='distributions')
except (AttributeError, TypeError, KeyError) as e:
logger.error('[mapper.distributions] {}'.format(e))
return mapped_distributions
def get_dataset(self, dataset_id=None):
"""
Obtener diccionario con el contenido del dataset.
Returns:
- dict(). Len(dict) == n: Lista de los n grupos existentes en CKAN.
- dict(). Len(dict) == 0: si ocurrio un error o no se han cargado dataset.
"""
_dataset = {}
if dataset_id in [None]:
return _dataset
return self.get_ckan_data(_content_of='distributions')
def map_themes(self, _themes):
mapped_themes = []
try:
mapped_themes = self.mappers.apply(_themes, _mapper='themeTaxonomy')
except (AttributeError, TypeError, KeyError) as e:
logger.error('-- {}'.format(e))
return mapped_themes
def get_themes(self):
"""
Obtener lista de grupos contenidos dentro de CKAN.
Returns:
- List(). Len(list) == n: Lista de los n grupos existentes en CKAN.
- List(). Len(list) == 0: si ocurrio un error o no se han cargado grupos.
"""
return self.get_ckan_data(_content_of='groups')
@staticmethod
def build_response(_json_data):
data = {}
if isinstance(_json_data, (dict, list)):
data = _json_data
response.content_type = 'application/json; charset=UTF-8'
del response.headers["Cache-Control"]
del response.headers["Pragma"]
return plugins.toolkit.literal(json.dumps(data))
def test_responses(self):
c = Catalog()
return self.build_response(c.render())
|
|
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating shares.
You can customize this scheduler by specifying your own share Filters and
Weighing Functions.
"""
from oslo_config import cfg
from oslo_log import log
from manila import exception
from manila.i18n import _
from manila.i18n import _LE, _LI
from manila.scheduler.drivers import base
from manila.scheduler import scheduler_options
from manila.share import share_types
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class FilterScheduler(base.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def get_pools(self, context, filters):
return self.host_manager.get_pools(context, filters)
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Add additional information to filter properties.
Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add retry entry for the selected volume backend.
In the event that the request gets re-scheduled, this entry
will signal that the given backend has already been tried.
"""
retry = filter_properties.get('retry')
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
msg = _("Invalid value for 'scheduler_max_attempts', "
"must be >=1")
raise exception.InvalidParameterValue(err=msg)
return max_attempts
def schedule_create_share(self, context, request_spec, filter_properties):
weighed_host = self._schedule_share(context,
request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason="")
host = weighed_host.obj.host
share_id = request_spec['share_id']
snapshot_id = request_spec['snapshot_id']
updated_share = base.share_update_db(context, share_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.share_rpcapi.create_share_instance(
context, updated_share.instance, host,
request_spec=request_spec,
filter_properties=filter_properties,
snapshot_id=snapshot_id
)
def schedule_create_replica(self, context, request_spec,
filter_properties):
share_replica_id = request_spec['share_instance_properties'].get('id')
weighed_host = self._schedule_share(
context, request_spec, filter_properties)
if not weighed_host:
msg = _('Failed to find a weighted host for scheduling share '
'replica %s.')
raise exception.NoValidHost(reason=msg % share_replica_id)
host = weighed_host.obj.host
updated_share_replica = base.share_replica_update_db(
context, share_replica_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.share_rpcapi.create_share_replica(
context, updated_share_replica, host, request_spec=request_spec,
filter_properties=filter_properties)
def _format_filter_properties(self, context, filter_properties,
request_spec):
elevated = context.elevated()
share_properties = request_spec['share_properties']
share_instance_properties = (request_spec.get(
'share_instance_properties', {}))
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
resource_properties.update(share_instance_properties.copy())
share_type = request_spec.get("share_type", {})
if not share_type:
msg = _("You must create a share type in advance,"
" and specify in request body or"
" set default_share_type in manila.conf.")
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
extra_specs = share_type.get('extra_specs', {})
if extra_specs:
for extra_spec_name in share_types.get_boolean_extra_specs():
extra_spec = extra_specs.get(extra_spec_name)
if extra_spec is not None:
if not extra_spec.startswith("<is>"):
extra_spec = "<is> %s" % extra_spec
share_type['extra_specs'][extra_spec_name] = extra_spec
resource_type = request_spec.get("share_type") or {}
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
share_group = request_spec.get('share_group')
# NOTE(gouthamr): If 'active_replica_host' is present in the request
# spec, pass that host's 'replication_domain' to the
# ShareReplication filter.
active_replica_host = request_spec.get('active_replica_host')
replication_domain = None
if active_replica_host:
temp_hosts = self.host_manager.get_all_host_states_share(elevated)
ar_host = next((host for host in temp_hosts
if host.host == active_replica_host), None)
if ar_host:
replication_domain = ar_host.replication_domain
if filter_properties is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': resource_type,
'share_group': share_group,
'replication_domain': replication_domain,
})
self.populate_filter_properties_share(request_spec, filter_properties)
return filter_properties, share_properties
def _schedule_share(self, context, request_spec, filter_properties=None):
"""Returns a list of hosts that meet the required specs.
The list is ordered by their fitness.
"""
elevated = context.elevated()
filter_properties, share_properties = self._format_filter_properties(
context, filter_properties, request_spec)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states_share(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return None
LOG.debug("Filtered share %(hosts)s", {"hosts": hosts})
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
best_host = weighed_hosts[0]
LOG.debug("Choosing for share: %(best_host)s",
{"best_host": best_host})
# NOTE(rushiagr): updating the available space parameters at same place
best_host.obj.consume_from_share(share_properties)
return best_host
def _populate_retry_share(self, filter_properties, properties):
"""Populate filter properties with retry history.
Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of share service hosts tried
}
filter_properties['retry'] = retry
share_id = properties.get('share_id')
self._log_share_error(share_id, retry)
if retry['num_attempts'] > max_attempts:
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"share %(share_id)s") % {
"max_attempts": max_attempts,
"share_id": share_id
}
raise exception.NoValidHost(reason=msg)
def _log_share_error(self, share_id, retry):
"""Log any exceptions from a previous share create operation.
If the request contained an exception from a previous share
create operation, log it to aid debugging.
"""
exc = retry.pop('exc', None) # string-ified exception from share
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts')
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
LOG.error(_LE("Error scheduling %(share_id)s from last share-service: "
"%(last_host)s : %(exc)s"), {
"share_id": share_id,
"last_host": last_host,
"exc": "exc"
})
def populate_filter_properties_share(self, request_spec,
filter_properties):
"""Stuff things into filter_properties.
Can be overridden in a subclass to add more data.
"""
shr = request_spec['share_properties']
inst = request_spec['share_instance_properties']
filter_properties['size'] = shr['size']
filter_properties['availability_zone_id'] = (
inst.get('availability_zone_id')
)
filter_properties['user_id'] = shr.get('user_id')
filter_properties['metadata'] = shr.get('metadata')
def schedule_create_share_group(self, context, share_group_id,
request_spec, filter_properties):
LOG.info(_LI("Scheduling share group %s.") % share_group_id)
host = self._get_best_host_for_share_group(context, request_spec)
if not host:
msg = _("No hosts available for share group %s.") % share_group_id
raise exception.NoValidHost(reason=msg)
msg = _LI("Chose host %(host)s for create_share_group %(group)s.")
LOG.info(msg % {'host': host, 'group': share_group_id})
updated_share_group = base.share_group_update_db(
context, share_group_id, host)
self.share_rpcapi.create_share_group(
context, updated_share_group, host)
def _get_weighted_hosts_for_share_type(self, context, request_spec,
share_type):
config_options = self._get_configuration_options()
# NOTE(ameade): Find our local list of acceptable hosts by
# filtering and weighing our options. We virtually consume
# resources on it so subsequent selections can adjust accordingly.
# NOTE(ameade): Remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states_share(context)
if not all_hosts:
return []
share_type['extra_specs'] = share_type.get('extra_specs', {})
if share_type['extra_specs']:
for spec_name in share_types.get_required_extra_specs():
extra_spec = share_type['extra_specs'].get(spec_name)
if extra_spec is not None:
share_type['extra_specs'][spec_name] = (
"<is> %s" % extra_spec)
filter_properties = {
'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': share_type,
'size': 0,
}
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s" % hosts)
# weighted_host = WeightedHost() ... the best host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not weighed_hosts:
return []
return weighed_hosts
def _get_weighted_hosts_for_share_group_type(self, context, request_spec,
share_group_type):
config_options = self._get_configuration_options()
all_hosts = self.host_manager.get_all_host_states_share(context)
if not all_hosts:
return []
filter_properties = {
'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_group_type': share_group_type,
'resource_type': share_group_type,
}
hosts = self.host_manager.get_filtered_hosts(
all_hosts, filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s" % hosts)
weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not weighed_hosts:
return []
return weighed_hosts
def _get_weighted_candidates_share_group(self, context, request_spec):
"""Finds hosts that support the share group.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
shr_types = request_spec.get("share_types")
weighed_hosts = []
for iteration_count, share_type in enumerate(shr_types):
temp_weighed_hosts = self._get_weighted_hosts_for_share_type(
elevated, request_spec, share_type)
# NOTE(ameade): Take the intersection of hosts so we have one that
# can support all share types of the share group
if iteration_count == 0:
weighed_hosts = temp_weighed_hosts
else:
new_weighed_hosts = []
for host1 in weighed_hosts:
for host2 in temp_weighed_hosts:
if host1.obj.host == host2.obj.host:
new_weighed_hosts.append(host1)
weighed_hosts = new_weighed_hosts
if not weighed_hosts:
return []
# NOTE(ameade): Ensure the hosts support the share group type
share_group_type = request_spec.get("resource_type", {})
temp_weighed_group_hosts = (
self._get_weighted_hosts_for_share_group_type(
elevated, request_spec, share_group_type))
new_weighed_hosts = []
for host1 in weighed_hosts:
for host2 in temp_weighed_group_hosts:
if host1.obj.host == host2.obj.host:
new_weighed_hosts.append(host1)
weighed_hosts = new_weighed_hosts
return weighed_hosts
def _get_best_host_for_share_group(self, context, request_spec):
weighed_hosts = self._get_weighted_candidates_share_group(
context,
request_spec)
if not weighed_hosts:
return None
return weighed_hosts[0].obj.host
def host_passes_filters(self, context, host, request_spec,
filter_properties):
elevated = context.elevated()
filter_properties, share_properties = self._format_filter_properties(
context, filter_properties, request_spec)
hosts = self.host_manager.get_all_host_states_share(elevated)
hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties)
hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties)
for tgt_host in hosts:
if tgt_host.obj.host == host:
return tgt_host.obj
msg = (_('Cannot place share %(id)s on %(host)s')
% {'id': request_spec['share_id'], 'host': host})
raise exception.NoValidHost(reason=msg)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curses-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import curses
from curses import textpad
import signal
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
class CursesUI(object):
"""Curses-based Command-line UI.
In this class, the methods with the prefix "_screen_" are the methods that
interact with the actual terminal using the curses library.
"""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
CLI_TERMINATOR_KEY = 7 # Terminator key for input text box.
CLI_TAB_KEY = ord("\t")
REGEX_SEARCH_PREFIX = "/"
TENSOR_INDICES_NAVIGATION_PREFIX = "@"
ERROR_MESSAGE_PREFIX = "ERROR: "
INFO_MESSAGE_PREFIX = "INFO: "
# Possible Enter keys. 343 is curses key code for the num-pad Enter key when
# num lock is off.
CLI_CR_KEYS = [ord("\n"), ord("\r"), 343]
_SCROLL_REFRESH = "refresh"
_SCROLL_UP = "up"
_SCROLL_DOWN = "down"
_SCROLL_HOME = "home"
_SCROLL_END = "end"
_SCROLL_TO_LINE_INDEX = "scroll_to_line_index"
_FOREGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"cyan": curses.COLOR_CYAN,
"magenta": curses.COLOR_MAGENTA,
"black": curses.COLOR_BLACK,
}
_BACKGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"black": curses.COLOR_BLACK,
}
# Font attribute for search and highlighting.
_SEARCH_HIGHLIGHT_FONT_ATTR = "black_on_white"
_ARRAY_INDICES_COLOR_PAIR = "black_on_white"
_ERROR_TOAST_COLOR_PAIR = "red_on_white"
_INFO_TOAST_COLOR_PAIR = "blue_on_white"
_STATUS_BAR_COLOR_PAIR = "black_on_white"
def __init__(self, on_ui_exit=None):
"""Constructor of CursesUI.
Args:
on_ui_exit: (Callable) Callback invoked when the UI exits.
"""
self._screen_init()
self._screen_refresh_size()
# TODO(cais): Error out if the size of the screen is too small.
# Initialize some UI component size and locations.
self._init_layout()
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
# Create tab completion registry and register the empty-str (top-level)
# tab-completion context with it.
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._command_history_store = debugger_cli_common.CommandHistory()
# Active list of command history, used in history navigation.
# _command_handler_registry holds all the history commands the CLI has
# received, up to a size limit. _active_command_history is the history
# currently being navigated in, e.g., using the Up/Down keys. The latter
# can be different from the former during prefixed or regex-based history
# navigation, e.g., when user enter the beginning of a command and hit Up.
self._active_command_history = []
# Pointer to the current position in the history sequence.
# 0 means it is a new command being keyed in.
self._command_pointer = 0
self._command_history_limit = 100
self._pending_command = ""
# State related to screen output.
self._output_pad = None
self._output_pad_row = 0
self._output_array_pointer_indices = None
self._curr_unwrapped_output = None
self._curr_wrapped_output = None
# Register signal handler for SIGINT.
signal.signal(signal.SIGINT, self._interrupt_handler)
# Configurable callbacks.
self._on_ui_exit = on_ui_exit
def _init_layout(self):
"""Initialize the layout of UI components.
Initialize the location and size of UI components such as command textbox
and output region according to the terminal size.
"""
# NamedTuple for rectangular locations on screen
self.rectangle = collections.namedtuple("rectangle",
"top left bottom right")
# Height of command text box
self._command_textbox_height = 2
self._title_row = 0
# Top row index of the output pad.
# A "pad" is a curses object that holds lines of text and not limited to
# screen size. It can be rendered on the screen partially with scroll
# parameters specified.
self._output_top_row = 1
# Number of rows that the output pad has.
self._output_num_rows = (
self._max_y - self._output_top_row - self._command_textbox_height - 1)
# Row index of scroll information line: Taking into account the zero-based
# row indexing and the command textbox area under the scroll information
# row.
self._output_scroll_row = self._max_y - 1 - self._command_textbox_height
# Tab completion bottom row.
self._candidates_top_row = self._output_scroll_row - 4
self._candidates_bottom_row = self._output_scroll_row - 1
# Maximum number of lines the candidates display can have.
self._candidates_max_lines = int(self._output_num_rows / 2)
self.max_output_lines = 10000
# Regex search state.
self._curr_search_regex = None
self._unwrapped_regex_match_lines = []
# Size of view port on screen, which is always smaller or equal to the
# screen size.
self._output_pad_screen_height = self._output_num_rows - 1
self._output_pad_screen_width = self._max_x - 1
self._output_pad_screen_location = self.rectangle(
top=self._output_top_row,
left=0,
bottom=self._output_top_row + self._output_num_rows,
right=self._output_pad_screen_width)
def _screen_init(self):
"""Screen initialization.
Creates curses stdscr and initialize the color pairs for display.
"""
self._stdscr = curses.initscr()
self._command_window = None
# Prepare color pairs.
curses.start_color()
self._color_pairs = {}
color_index = 0
for fg_color in self._FOREGROUND_COLORS:
for bg_color in self._BACKGROUND_COLORS:
color_index += 1
curses.init_pair(color_index, self._FOREGROUND_COLORS[fg_color],
self._BACKGROUND_COLORS[bg_color])
color_name = fg_color
if bg_color != "black":
color_name += "_on_" + bg_color
self._color_pairs[color_name] = curses.color_pair(color_index)
# A_BOLD or A_BLINK is not really a "color". But place it here for
# convenience.
self._color_pairs["bold"] = curses.A_BOLD
self._color_pairs["blink"] = curses.A_BLINK
# Default color pair to use when a specified color pair does not exist.
self._default_color_pair = self._color_pairs["white"]
def _screen_launch(self):
"""Launch the curses screen."""
curses.noecho()
curses.cbreak()
self._stdscr.keypad(1)
self._screen_create_command_window()
def _screen_create_command_window(self):
"""Create command window according to screen size."""
if self._command_window:
del self._command_window
self._command_window = curses.newwin(
self._command_textbox_height, self._max_x - len(self.CLI_PROMPT),
self._max_y - self._command_textbox_height, len(self.CLI_PROMPT))
def _screen_refresh(self):
self._stdscr.refresh()
def _screen_terminate(self):
"""Terminate the curses screen."""
self._stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
# Remove SIGINT handler.
signal.signal(signal.SIGINT, signal.SIG_DFL)
def run_ui(self, init_command=None, title=None, title_color=None):
"""Run the Curses CLI.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
Returns:
An exit token of arbitrary type. Can be None.
"""
self._screen_launch()
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
if self._on_ui_exit:
self._on_ui_exit()
self._screen_terminate()
return exit_token
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (RichTextLines) Rich text lines appended to the beginning of
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def get_help(self):
return self._command_handler_registry.get_help()
def _screen_create_command_textbox(self, existing_command):
"""Create command textbox on screen.
Args:
existing_command: (str) A command string to put in the textbox right
after its creation.
"""
# Display the tfdbg prompt.
self._stdscr.addstr(self._max_y - self._command_textbox_height, 0,
self.CLI_PROMPT, curses.A_BOLD)
self._stdscr.refresh()
self._command_window.clear()
# Command text box.
self._command_textbox = textpad.Textbox(
self._command_window, insert_mode=True)
# Enter existing command.
self._auto_key_in(existing_command)
def _ui_loop(self):
"""Command-line UI loop.
Returns:
An exit token of arbitrary type. The token can be None.
"""
while True:
# Enter history command if pointer is in history (> 0):
if self._command_pointer > 0:
existing_command = self._active_command_history[-self._command_pointer]
else:
existing_command = self._pending_command
self._screen_create_command_textbox(existing_command)
command, terminator, pending_command_changed = self._get_user_command()
if terminator in self.CLI_CR_KEYS:
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
elif terminator == self.CLI_TAB_KEY:
tab_completed = self._tab_complete(command)
self._pending_command = tab_completed
self._cmd_ptr = 0
elif pending_command_changed:
self._pending_command = command
return
def _get_user_command(self):
"""Get user command from UI.
Returns:
command: (str) The user-entered command.
terminator: (str) Terminator type for the command.
If command is a normal command entered with the Enter key, the value
will be the key itself. If this is a tab completion call (using the
Tab key), the value will reflect that as well.
pending_command_changed: (bool) If the pending command has changed.
Used during command history navigation.
"""
# First, reset textbox state variables.
self._textbox_curr_terminator = None
self._textbox_pending_command_changed = False
command = self._screen_get_user_command()
command = self._strip_terminator(command)
return (command, self._textbox_curr_terminator,
self._textbox_pending_command_changed)
def _screen_get_user_command(self):
return self._command_textbox.edit(validate=self._on_textbox_keypress)
def _strip_terminator(self, command):
for v in self.CLI_CR_KEYS:
if v < 256:
command = command.replace(chr(v), "")
return command.strip()
def _screen_refresh_size(self):
self._max_y, self._max_x = self._stdscr.getmaxyx()
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
if command:
self._command_history_store.add_command(command)
if (command.startswith(self.REGEX_SEARCH_PREFIX) and
self._curr_unwrapped_output):
if len(command) > len(self.REGEX_SEARCH_PREFIX):
# Command is like "/regex". Perform regex search.
regex = command[len(self.REGEX_SEARCH_PREFIX):]
self._curr_search_regex = regex
self._display_output(self._curr_unwrapped_output, highlight_regex=regex)
elif self._unwrapped_regex_match_lines:
# Command is "/". Continue scrolling down matching lines.
self._display_output(
self._curr_unwrapped_output,
is_refresh=True,
highlight_regex=self._curr_search_regex)
self._command_pointer = 0
self._pending_command = ""
return
elif command.startswith(self.TENSOR_INDICES_NAVIGATION_PREFIX):
indices_str = command[1:].strip()
if indices_str:
try:
indices = command_parser.parse_indices(indices_str)
omitted, line_index, _, _ = tensor_format.locate_tensor_element(
self._curr_wrapped_output, indices)
if not omitted:
self._scroll_output(
self._SCROLL_TO_LINE_INDEX, line_index=line_index)
except Exception as e: # pylint: disable=broad-except
self._error_toast(str(e))
else:
self._error_toast("Empty indices.")
return
try:
prefix, args, output_file_path = self._parse_command(command)
except SyntaxError as e:
self._error_toast(str(e))
return
if not prefix:
# Empty command: take no action. Should not exit.
return
screen_info = {"cols": self._max_x}
exit_token = None
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=screen_info)
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
# Clear active command history. Until next up/down history navigation
# occurs, it will stay empty.
self._active_command_history = []
if exit_token is not None:
return exit_token
self._display_output(screen_output)
if output_file_path:
try:
screen_output.write_to_file(output_file_path)
self._info_toast("Wrote output to %s" % output_file_path)
except Exception: # pylint: disable=broad-except
self._error_toast("Failed to write output to %s" % output_file_path)
self._command_pointer = 0
self._pending_command = ""
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
output_file_path: (str or None) The path to save the screen output
to (if any).
"""
command = command.strip()
if not command:
return "", [], None
command_items = command_parser.parse_command(command)
command_items, output_file_path = command_parser.extract_output_file_path(
command_items)
return command_items[0], command_items[1:], output_file_path
def _screen_gather_textbox_str(self):
"""Gather the text string in the command text box.
Returns:
(str) the current text string in the command textbox, excluding any
return keys.
"""
txt = self._command_textbox.gather()
return txt.strip()
def _on_textbox_keypress(self, x):
"""Text box key validator: Callback of key strokes.
Handles a user's keypress in the input text box. Translates certain keys to
terminator keys for the textbox to allow its edit() method to return.
Also handles special key-triggered events such as PgUp/PgDown scrolling of
the screen output.
Args:
x: (int) Key code.
Returns:
(int) A translated key code. In most cases, this is identical to the
input x. However, if x is a Return key, the return value will be
CLI_TERMINATOR_KEY, so that the text box's edit() method can return.
Raises:
TypeError: If the input x is not of type int.
"""
if not isinstance(x, int):
raise TypeError("Key validator expected type int, received type %s" %
type(x))
if x in self.CLI_CR_KEYS:
# Make Enter key the terminator
self._textbox_curr_terminator = x
return self.CLI_TERMINATOR_KEY
elif x == self.CLI_TAB_KEY:
self._textbox_curr_terminator = self.CLI_TAB_KEY
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_PPAGE:
self._scroll_output(self._SCROLL_UP)
return x
elif x == curses.KEY_NPAGE:
self._scroll_output(self._SCROLL_DOWN)
return x
elif x == curses.KEY_HOME:
self._scroll_output(self._SCROLL_HOME)
return x
elif x == curses.KEY_END:
self._scroll_output(self._SCROLL_END)
return x
elif x in [curses.KEY_UP, curses.KEY_DOWN]:
# Command history navigation.
if not self._active_command_history:
hist_prefix = self._screen_gather_textbox_str()
self._active_command_history = (
self._command_history_store.lookup_prefix(
hist_prefix, self._command_history_limit))
if self._active_command_history:
if x == curses.KEY_UP:
if self._command_pointer < len(self._active_command_history):
self._command_pointer += 1
elif x == curses.KEY_DOWN:
if self._command_pointer > 0:
self._command_pointer -= 1
else:
self._command_pointer = 0
self._textbox_curr_terminator = x
# Force return from the textbox edit(), so that the textbox can be
# redrawn with a history command entered.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_RESIZE:
# Respond to terminal resize.
self._screen_refresh_size()
self._init_layout()
self._screen_create_command_window()
if self._curr_unwrapped_output is not None:
# Force render screen output again, under new screen size.
self._output_pad = self._display_output(
self._curr_unwrapped_output, is_refresh=True)
# Force return from the textbox edit(), so that the textbox can be
# redrawn.
return self.CLI_TERMINATOR_KEY
else:
# Mark the pending command as modified.
self._textbox_pending_command_changed = True
# Invalidate active command history.
self._command_pointer = 0
self._active_command_history = []
return x
def _title(self, title, title_color=None):
"""Display title.
Args:
title: (str) The title to display.
title_color: (str) Color of the title, e.g., "yellow".
"""
# Pad input title str with "-" and space characters to make it pretty.
self._title_line = "--- %s " % title
if len(self._title_line) < self._max_x:
self._title_line += "-" * (self._max_x - len(self._title_line))
self._screen_draw_text_line(
self._title_row, self._title_line, color=title_color)
def _auto_key_in(self, command):
"""Automatically key in a command to the command Textbox.
Args:
command: The command, as a string.
"""
for c in command:
self._command_textbox.do_command(ord(c))
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
"""Render a line of text on the screen.
Args:
row: (int) Row index.
line: (str) The line content.
attr: curses font attribute.
color: (str) font foreground color name.
Raises:
TypeError: If row is not of type int.
"""
if not isinstance(row, int):
raise TypeError("Invalid type in row")
if len(line) > self._max_x:
line = line[:self._max_x]
if color is None:
self._stdscr.addstr(row, 0, line, attr)
else:
self._stdscr.addstr(row, 0, line, self._color_pairs[color])
self._screen_refresh()
def _screen_new_output_pad(self, rows, cols):
"""Generate a new pad on the screen.
Args:
rows: (int) Number of rows the pad will have: not limited to screen size.
cols: (int) Number of columns the pad will have: not limited to screen
size.
Returns:
A curses textpad object.
"""
return curses.newpad(rows, cols)
def _screen_display_output(self, output):
"""Actually render text output on the screen.
Wraps the lines according to screen width. Pad lines below according to
screen height so that the user can scroll the output to a state where
the last non-empty line is on the top of the screen. Then renders the
lines on the screen.
Args:
output: (RichTextLines) text lines to display on the screen. These lines
may have widths exceeding the screen width. This method will take care
of the wrapping.
Returns:
(List of int) A list of line indices, in the wrapped output, where there
are regex matches.
"""
# Wrap the output lines according to screen width.
self._curr_wrapped_output, wrapped_line_indices = (
debugger_cli_common.wrap_rich_text_lines(output, self._max_x - 1))
# Append lines to curr_wrapped_output so that the user can scroll to a
# state where the last text line is on the top of the output area.
self._curr_wrapped_output.lines.extend([""] * (self._output_num_rows - 1))
# Limit number of lines displayed to avoid curses overflow problems.
if self._curr_wrapped_output.num_lines() > self.max_output_lines:
self._curr_wrapped_output = self._curr_wrapped_output.slice(
0, self.max_output_lines)
self._curr_wrapped_output.lines.append("Output cut off at %d lines!" %
self.max_output_lines)
self._curr_wrapped_output.font_attr_segs[self.max_output_lines] = [
(0, len(output.lines[-1]), "magenta")
]
(self._output_pad, self._output_pad_height,
self._output_pad_width) = self._display_lines(self._curr_wrapped_output,
self._output_num_rows)
# The indices of lines with regex matches (if any) need to be mapped to
# indices of wrapped lines.
return [
wrapped_line_indices[line]
for line in self._unwrapped_regex_match_lines
]
def _display_output(self, output, is_refresh=False, highlight_regex=None):
"""Display text output in a scrollable text pad.
This method does some preprocessing on the text lines, render them on the
screen and scroll to the appropriate line. These are done according to regex
highlighting requests (if any), scroll-to-next-match requests (if any),
and screen refresh requests (if any).
TODO(cais): Separate these unrelated request to increase clarity and
maintainability.
Args:
output: A RichTextLines object that is the screen output text.
is_refresh: (bool) Is this a refreshing display with existing output.
highlight_regex: (str) Optional string representing the regex used to
search and highlight in the current screen output.
"""
if highlight_regex:
try:
output = debugger_cli_common.regex_find(
output, highlight_regex, font_attr=self._SEARCH_HIGHLIGHT_FONT_ATTR)
except ValueError as e:
self._error_toast(str(e))
return
if not is_refresh:
# Perform new regex search on the current output.
self._unwrapped_regex_match_lines = output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY]
else:
# Continue scrolling down.
self._output_pad_row += 1
else:
self._curr_unwrapped_output = output
self._unwrapped_regex_match_lines = []
# Display output on the screen.
wrapped_regex_match_lines = self._screen_display_output(output)
# Now that the text lines are displayed on the screen scroll to the
# appropriate line according to previous scrolling state and regex search
# and highlighting state.
if highlight_regex:
next_match_line = -1
for match_line in wrapped_regex_match_lines:
if match_line >= self._output_pad_row:
next_match_line = match_line
break
if next_match_line >= 0:
self._scroll_output(
self._SCROLL_TO_LINE_INDEX, line_index=next_match_line)
else:
# Regex search found no match >= current line number. Display message
# stating as such.
self._toast("Pattern not found", color=self._ERROR_TOAST_COLOR_PAIR)
elif is_refresh:
self._scroll_output(self._SCROLL_REFRESH)
else:
self._output_pad_row = 0
self._scroll_output(self._SCROLL_HOME)
def _display_lines(self, output, min_num_rows):
"""Display RichTextLines object on screen.
Args:
output: A RichTextLines object.
min_num_rows: (int) Minimum number of output rows.
Returns:
1) The text pad object used to display the text.
2) (int) number of rows of the text pad, which may exceed screen size.
3) (int) number of columns of the text pad.
Raises:
ValueError: If input argument "output" is invalid.
"""
if not isinstance(output, debugger_cli_common.RichTextLines):
raise ValueError(
"Output is required to be an instance of RichTextLines, but is not.")
self._screen_refresh()
# Number of rows the output area will have.
rows = max(min_num_rows, len(output.lines))
# Size of the output pad, which may exceed screen size and require
# scrolling.
cols = self._max_x - 1
# Create new output pad.
pad = self._screen_new_output_pad(rows, cols)
for i in xrange(len(output.lines)):
if i in output.font_attr_segs:
self._screen_add_line_to_output_pad(
pad, i, output.lines[i], color_segments=output.font_attr_segs[i])
else:
self._screen_add_line_to_output_pad(pad, i, output.lines[i])
return pad, rows, cols
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
"""Render a line in a text pad.
Assumes: segments in color_segments are sorted in ascending order of the
beginning index.
Note: Gaps between the segments are allowed and will be fixed in with a
default color.
Args:
pad: The text pad to render the line in.
row: Row index, as an int.
txt: The text to be displayed on the specified row, as a str.
color_segments: A list of 3-tuples. Each tuple represents the beginning
and the end of a color segment, in the form of a right-open interval:
[start, end). The last element of the tuple is a color string, e.g.,
"red".
Raisee:
TypeError: If color_segments is not of type list.
"""
if not color_segments:
pad.addstr(row, 0, txt, self._default_color_pair)
return
if not isinstance(color_segments, list):
raise TypeError("Input color_segments needs to be a list, but is not.")
all_segments = []
all_color_pairs = []
# Process the beginning.
if color_segments[0][0] == 0:
pass
else:
all_segments.append((0, color_segments[0][0]))
all_color_pairs.append(self._default_color_pair)
for (curr_start, curr_end, curr_color), (next_start, _, _) in zip(
color_segments, color_segments[1:] + [(len(txt), None, None)]):
all_segments.append((curr_start, curr_end))
all_color_pairs.append(
self._color_pairs.get(curr_color, self._default_color_pair))
if curr_end < next_start:
# Fill in the gap with the default color.
all_segments.append((curr_end, next_start))
all_color_pairs.append(self._default_color_pair)
# Finally, draw all the segments.
for segment, color_pair in zip(all_segments, all_color_pairs):
pad.addstr(row, segment[0], txt[segment[0]:segment[1]], color_pair)
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pad.refresh(viewport_top, viewport_left, screen_location_top,
screen_location_left, screen_location_bottom,
screen_location_right)
def _scroll_output(self, direction, line_index=None):
"""Scroll the output pad.
Args:
direction: _SCROLL_REFRESH, _SCROLL_UP, _SCROLL_DOWN, _SCROLL_HOME or
_SCROLL_END, _SCROLL_TO_LINE_INDEX
line_index: (int) Specifies the zero-based line index to scroll to.
Applicable only if direction is _SCROLL_TO_LINE_INDEX.
Raises:
ValueError: On invalid scroll direction.
TypeError: If line_index is not int and direction is
_SCROLL_TO_LINE_INDEX.
"""
if not self._output_pad:
# No output pad is present. Do nothing.
return
if direction == self._SCROLL_REFRESH:
pass
elif direction == self._SCROLL_UP:
# Scroll up
if self._output_pad_row - 1 >= 0:
self._output_pad_row -= 1
elif direction == self._SCROLL_DOWN:
# Scroll down
if self._output_pad_row + 1 < (
self._output_pad_height - self._output_pad_screen_height):
self._output_pad_row += 1
elif direction == self._SCROLL_HOME:
# Scroll to top
self._output_pad_row = 0
elif direction == self._SCROLL_END:
# Scroll to bottom
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == self._SCROLL_TO_LINE_INDEX:
if not isinstance(line_index, int):
raise TypeError("Invalid line_index type (%s) under mode %s" %
(type(line_index), self._SCROLL_TO_LINE_INDEX))
self._output_pad_row = line_index
else:
raise ValueError("Unsupported scroll mode: %s" % direction)
# Actually scroll the output pad: refresh with new location.
self._screen_scroll_output_pad(self._output_pad, self._output_pad_row, 0,
self._output_pad_screen_location.top,
self._output_pad_screen_location.left,
self._output_pad_screen_location.bottom,
self._output_pad_screen_location.right)
if self._output_pad_height > self._output_pad_screen_height + 1:
# Display information about the scrolling of tall screen output.
scroll_percentage = 100.0 * (min(
1.0,
float(self._output_pad_row) /
(self._output_pad_height - self._output_pad_screen_height - 1)))
if self._output_pad_row == 0:
scroll_directions = " (PgDn)"
elif self._output_pad_row >= (
self._output_pad_height - self._output_pad_screen_height - 1):
scroll_directions = " (PgUp)"
else:
scroll_directions = " (PgDn/PgUp)"
self._scroll_info = "--- Scroll%s: %.2f%% " % (scroll_directions,
scroll_percentage)
self._output_array_pointer_indices = self._show_array_indices()
# Add array indices information to scroll message.
if self._output_array_pointer_indices:
if self._output_array_pointer_indices[0]:
self._scroll_info += self._format_indices(
self._output_array_pointer_indices[0])
self._scroll_info += "-"
if self._output_array_pointer_indices[-1]:
self._scroll_info += self._format_indices(
self._output_array_pointer_indices[-1])
self._scroll_info += " "
if len(self._scroll_info) < self._max_x:
self._scroll_info += "-" * (self._max_x - len(self._scroll_info))
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
else:
# Screen output is not tall enough to cause scrolling.
self._scroll_info = "-" * self._max_x
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
def _format_indices(self, indices):
# Remove the spaces to make it compact.
return repr(indices).replace(" ", "")
def _show_array_indices(self):
"""Show array indices for the lines at the top and bottom of the output.
For the top line and bottom line of the output display area, show the
element indices of the array being displayed.
Returns:
If either the top of the bottom row has any matching array indices,
a dict from line index (0 being the top of the display area, -1
being the bottom of the display area) to array element indices. For
example:
{0: [0, 0], -1: [10, 0]}
Otherwise, None.
"""
indices_top = self._show_array_index_at_line(0)
bottom_line_index = (self._output_pad_screen_location.bottom -
self._output_pad_screen_location.top - 1)
indices_bottom = self._show_array_index_at_line(bottom_line_index)
if indices_top or indices_bottom:
return {0: indices_top, -1: indices_bottom}
else:
return None
def _show_array_index_at_line(self, line_index):
"""Show array indices for the specified line in the display area.
Uses the line number to array indices map in the annotations field of the
RichTextLines object being displayed.
If the displayed RichTextLines object does not contain such a mapping,
will do nothing.
Args:
line_index: (int) 0-based line index from the top of the display area.
For example,if line_index == 0, this method will display the array
indices for the line currently at the top of the display area.
Returns:
(list) The array indices at the specified line, if available. None, if
not available.
"""
# Examine whether the index information is available for the specified line
# number.
pointer = self._output_pad_row + line_index
if (pointer in self._curr_wrapped_output.annotations and
"i0" in self._curr_wrapped_output.annotations[pointer]):
indices = self._curr_wrapped_output.annotations[pointer]["i0"]
array_indices_str = self._format_indices(indices)
array_indices_info = "@" + array_indices_str
self._toast(
array_indices_info,
color=self._ARRAY_INDICES_COLOR_PAIR,
line_index=self._output_pad_screen_location.top + line_index)
return indices
else:
return None
def _tab_complete(self, command_str):
"""Perform tab completion.
Obtains tab completion candidates.
If there are no candidates, return command_str and take no other actions.
If there are candidates, display the candidates on screen and return
command_str + (common prefix of the candidates).
Args:
command_str: (str) The str in the command input textbox when Tab key is
hit.
Returns:
(str) Completed string. Could be the same as command_str if no completion
candidate is available. If candidate(s) are available, return command_str
appended by the common prefix of the candidates.
"""
command_str = command_str.lstrip()
if not command_str:
# Empty (top-level) context.
context = ""
prefix = ""
items = []
else:
items = command_str.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
else:
# Multiple words.
context = items[0]
prefix = items[-1]
candidates, common_prefix = self._tab_completion_registry.get_completions(
context, prefix)
if candidates and len(candidates) > 1:
self._display_candidates(candidates)
else:
# In the case of len(candidates) == 1, the single completion will be
# entered to the textbox automatically. So there is no need to show any
# candidates.
self._display_candidates([])
if common_prefix:
# Common prefix is not None and non-empty. The completed string will
# incorporate the common prefix.
return " ".join(items[:-1] + [common_prefix])
else:
return " ".join(items)
def _display_candidates(self, candidates):
"""Show candidates (e.g., tab-completion candidates) on multiple lines.
Args:
candidates: (list of str) candidates.
"""
if self._curr_unwrapped_output:
# Force refresh screen output.
self._scroll_output(self._SCROLL_REFRESH)
if not candidates:
return
candidates_prefix = "Candidates: "
candidates_line = candidates_prefix + " ".join(candidates)
candidates_output = debugger_cli_common.RichTextLines(
candidates_line,
font_attr_segs={
0: [(len(candidates_prefix), len(candidates_line), "yellow")]
})
candidates_output, _ = debugger_cli_common.wrap_rich_text_lines(
candidates_output, self._max_x - 2)
# Calculate how many lines the candidate text should occupy. Limit it to
# a maximum value.
candidates_num_rows = min(
len(candidates_output.lines), self._candidates_max_lines)
self._candidates_top_row = (
self._candidates_bottom_row - candidates_num_rows + 1)
# Render the candidate text on screen.
pad, _, _ = self._display_lines(candidates_output, 0)
self._screen_scroll_output_pad(
pad, 0, 0, self._candidates_top_row, 0,
self._candidates_top_row + candidates_num_rows - 1, self._max_x - 1)
def _toast(self, message, color=None, line_index=None):
"""Display a one-line message on the screen.
By default, the toast is displayed in the line right above the scroll bar.
But the line location can be overridden with the line_index arg.
Args:
message: (str) the message to display.
color: (str) optional color attribute for the message.
line_index: (int) line index.
"""
pad, _, _ = self._display_lines(
debugger_cli_common.RichTextLines(
message,
font_attr_segs={0: [(0, len(message), color or "white")]}),
0)
right_end = min(len(message), self._max_x - 1)
if line_index is None:
line_index = self._output_scroll_row - 1
self._screen_scroll_output_pad(pad, 0, 0, line_index, 0, line_index,
right_end)
def _error_toast(self, message):
"""Display a one-line error message on screen.
Args:
message: The error message, without the preceding "ERROR: " substring.
"""
self._toast(
self.ERROR_MESSAGE_PREFIX + message, color=self._ERROR_TOAST_COLOR_PAIR)
def _info_toast(self, message):
"""Display a one-line informational message on screen.
Args:
message: The informational message.
"""
self._toast(
self.INFO_MESSAGE_PREFIX + message, color=self._INFO_TOAST_COLOR_PAIR)
def _interrupt_handler(self, signal_num, frame):
_ = signal_num # Unused.
_ = frame # Unused.
self._screen_terminate()
print("\ntfdbg: caught SIGINT; calling sys.exit(1).", file=sys.stderr)
sys.exit(1)
|
|
"""
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
import json
import socket
from ast import literal_eval
from .conf import CONF_PATH
from argparse import Namespace
from .pjf_version import PYJFUZZ_LOGO
from .pjf_grammar import generate_json
from . import GRAMMAR_PATH
from .errors import PJFInvalidType
class PJFConfiguration(Namespace):
"""
A class that represent PyJFuzz startup configuration , it makes the standard checks
"""
def __init__(self, arguments):
"""
Init the command line
"""
super(PJFConfiguration, self).__init__(**arguments.__dict__)
setattr(self, "generate_json", generate_json)
setattr(self, "grammar_path", GRAMMAR_PATH)
if self.json:
if type(self.json) != dict:
if type(self.json) != list:
raise PJFInvalidType(self.json, dict)
if self.level:
if type(self.level) != int:
raise PJFInvalidType(self.level, int)
if self.techniques:
if type(self.techniques) != str:
raise PJFInvalidType(self.techniques, str)
if self.command:
if type(self.command) != list:
raise PJFInvalidType(self.command, list)
if self.parameters:
if type(self.parameters) != str:
raise PJFInvalidType(self.parameters, str)
if not self.nologo:
sys.stderr.write("{0}\n".format(PYJFUZZ_LOGO))
if self.recheck_ports:
if self.fuzz_web or self.web_server or self.browser_auto:
with open(CONF_PATH, "rb") as config:
setattr(self, "ports", self.check_ports(json.loads(config.read())))
config.close()
if self.parameters:
self.parameters = str(self.parameters).split(",")
if self.techniques:
techniques = {
"C": [10, 5, 13],
"H": [9],
"P": [6, 2, 8],
"T": [11, 12],
"R": [14],
"S": [3, 1],
"X": [0, 4, 7]
}
temp = []
for technique in self.techniques:
if technique in techniques:
temp += techniques[str(technique)]
self.techniques = temp
else:
self.techniques = list(range(0, 14))
if not self.utf8:
self.utf8 = False
if not self.command:
self.command = ["echo"]
self.stdin = True
else:
if "@@" in self.command:
self.stdin = False
else:
self.stdin = True
if not self.parameters:
self.parameters = []
if self.auto:
self.json = self.generate_json(self.grammar_path)
def __contains__(self, items):
if type(items) != list:
raise PJFInvalidType(type(items), list)
for element in items:
try:
getattr(self, element)
except AttributeError:
return False
return True
def __getattr__(self, item):
"""
Get a parameter from configuration, return False if parameter was not found
"""
if item in self.__dict__:
return self.__dict__[item]
else:
if item == "recheck_ports":
return True
return False
def start(self):
"""
Parse the command line and start PyJFuzz
"""
from .pjf_worker import PJFWorker
worker = PJFWorker(self)
if self.update_pjf:
worker.update_library()
elif self.browser_auto:
worker.browser_autopwn()
elif self.fuzz_web:
worker.web_fuzzer()
elif self.json:
if not self.web_server and not self.ext_fuzz and not self.cmd_fuzz:
worker.fuzz()
elif self.ext_fuzz:
if self.stdin:
worker.fuzz_stdin()
else:
worker.fuzz_command_line()
elif self.cmd_fuzz:
if self.stdin:
worker.fuzz_external(True)
else:
worker.fuzz_external()
else:
worker.start_http_server()
elif self.json_file:
worker.start_file_fuzz()
elif self.process_to_monitor:
worker.start_process_monitor()
def check_ports(self, ports):
for p in ports["servers"]:
try:
p_checker = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p_checker.bind(("127.0.0.1", ports["servers"][p]))
p_checker.close()
except socket.error as e:
if e.errno == 48:
print("[\033[92mINFO\033[0m] Port %s is already in use switching to different port" %
ports["servers"][p])
ports["servers"][p] = self.get_free_port()
return ports
def get_free_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
port = s.getsockname()[1]
s.close()
return port
@staticmethod
def valid_dir(value):
import argparse
parser = argparse.ArgumentParser()
if os.path.isdir(value):
return value
else:
raise parser.error("Directory does not exists!")
@staticmethod
def valid_file(value):
import argparse
parser = argparse.ArgumentParser()
try:
with open(value, "rb") as html_file:
html_file.close()
return value
except IOError:
raise parser.error("File does not exists!")
@staticmethod
def valid_json(value):
import argparse
parser = argparse.ArgumentParser()
try:
try:
value = literal_eval(value)
except:
value = json.loads(value)
if type(value) not in (dict, list):
raise SyntaxError
except SyntaxError:
raise parser.error("Please insert a valid JSON value!")
return value
|
|
"""Exposes a set of layout and utility functions to be invoked from 164. These
These functions have access to the global browser instance.
"""
import json
from urllib import urlencode, urlopen
from urlparse import urlparse, urlunparse, parse_qsl
from warnings import warn
import sys
from PyQt4.QtNetwork import QNetworkReply
from browser import Browser, Renderer
import grammar_parser
import parser_generator
import interpreter
tmlGrm = './tml.grm'
cs164Grm = 'cs164c.grm'
cs164Lib = 'library.164'
def initialize():
global _browser
# Initialize reusable parts of the browser infrastructure.
tmlParser = parser_generator.makeParser(grammar_parser.parseFile(tmlGrm))
print >> sys.stderr, 'TML parser loaded.'
cs164Parser = parser_generator.makeParser(grammar_parser.parseFile(cs164Grm))
print >> sys.stderr, 'CS164 parser loaded.'
# TODO: Currently, the interpreter "object" is the module itself. Later,
# expose an API for constructing interpreters.
renderer = Renderer(interpreter)
print >> sys.stderr, 'Renderer initialized.'
_browser = Browser(tmlParser, cs164Parser, interpreter, renderer)
interpreter.ExecGlobal(cs164Parser.parse(open(cs164Lib).read()))
print >> sys.stderr, 'Browser initialized.'
print >> sys.stderr, 'Loading the layout engine modules...'
# Load the layout engine, which is implemented in 164.
dependencies = ['object', 'node', 'window', 'box',
'hbox', 'vbox', 'link', 'word', 'img',
'script', 'bquery', 'rx', 'textbox',
#TODO: Uncomment for PA8 'pbar', 'ibox', 'script',
#TODO: Uncomment for PA9 'rx',
#TODO: Uncomment for PA8 'bquery',
'layout', 'browser']
for dependency in dependencies:
print >> sys.stderr, '\tloading ' + dependency
ast = cs164Parser.parse(open('./browser/{0}.164'.format(dependency)).read())
interpreter.ExecGlobal(ast)
print >> sys.stderr, 'Done.'
def clear():
_browser.clear(_browser.window)
def drawBox(x, y, width, height, properties, widget=None):
return _browser.renderer.drawBox(x, y, width, height, properties,
_browser.window.canvas, widget)
def drawWord(word, x, y, properties, widget=None):
return _browser.renderer.drawWord(word, x, y, properties,
_browser.window.canvas, widget)
def drawInput(x, y, width, height, text, widget=None):
inp = _browser.renderer.drawInput(x, y, width, height, '',
_browser.window.canvas, widget)
inp.setPlaceholderText(text)
return inp
def drawPBar(x, y, width, height, value, widget=None):
return _browser.renderer.drawPBar(x, y, width, height, value,
_browser.window.canvas, widget)
def drawImg(x, y, width, height, rawdata, widget=None):
return _browser.renderer.drawImg(x, y, width, height, rawdata,
_browser.window.canvas, widget)
def getWordDimensions(word, properties):
return _browser.renderer.getWordDimensions(word, properties)
def setDOMWindowSize(width, height):
"""Sets the size of the window in the DOM. This is not the application
window.
"""
_browser.window.canvas.setMinimumSize(width, height)
_browser.window.canvas.resize(width, height)
def parseTML(tml):
"""Returns the raw DOM object constructed by the TML parser."""
parsed = _browser.tmlParser.parse(tml)
return parsed
def evaluateScript(url):
"""Evaluates the script at the given URL."""
connection = urlopen(url)
code = connection.read()
connection.close()
_browser.execScript(code)
def load(url):
print >> sys.stderr, 'Loading', url
_browser.load(url)
def create164Callback(node, code):
"""Run the specified string of 164 code when an event is fired on the
target node. The 'code' argument may be a 164 function instead of a string
of code to allow the 164 programmer to pass in lambdas as callbacks.
"""
assert (isinstance(code, basestring) or
isinstance(code, interpreter.FunVal)), \
'callback must be a string or closure'
def callback(ev = None, relayout = False):
if isinstance(code, basestring):
_browser.execScript(code, {'self': node, 'event': ev})
else:
_browser.interpreter.ExecFun(code, [ev])
if (relayout):
_browser.relayout()
return callback
def removeHandlers(node):
node['__qt'].clicked.disconnect()
def addEventListener(node, event, code, context=None):
"""Subscribes to an event on the given node, executing the specified code
when such an event is triggered. A "context" node may be specified to
indicate the node that is the "self" object for the callback.
"""
callback = create164Callback(node if context is None else context, code)
# For each event type, we need to define custom handlers (Qt slots) that
# convert Qt events into 164 event objects.
if event == 'click':
def clickHandler(ev):
event = {'x': ev.x(), 'y': ev.y(),
'screenX': ev.globalX(), 'screenY': ev.globalY()}
callback(event, True)
node['__qt'].clicked.connect(clickHandler)
elif event == 'textChanged':
def changeHandler(text):
event = {'text': text}
callback(event, False)
node['__qt'].textChanged.connect(changeHandler)
elif event == 'returnPressed':
def returnHandler():
event = {'text': str(node['__qt'].text())}
node['__qt'].setText("")
callback(event, False)
node['__qt'].returnPressed.connect(returnHandler)
elif event == 'focusChanged':
def focusHandler(ev):
event = {'gotFocus': ev.gotFocus(), 'lostFocus': ev.lostFocus()}
callback(event, False)
node['__qt'].focused.connect(focusHandler)
elif event == 'editingFinished':
def editHandler():
callback({}, False)
node['__qt'].editingFinished.connect(editHandler)
else:
raise TypeError("Unknown event " + str(event))
def addAnchorTarget(node, url):
node['__qt'].clicked.connect(lambda _: load(url))
def addTimerCallback(window, ms, code, repeat=False):
callback = create164Callback(window, code)
timer = _browser.createTimer()
timer.setInterval(ms)
timer.setSingleShot(not repeat)
timer.timeout.connect(callback)
return timer.timerId()
def sendHttp(window, uri, code):
reply = _browser.createNetworkReply(uri)
callback = create164Callback(window, code)
def responseHandler():
if reply.error() == QNetworkReply.NoError:
data = reply.readAll().data()
try:
data = json.loads(data)
except ValueError:
# The data is not JSON.
pass
else:
data = jsonTo164Object(data)
response = {'status': 'ok', 'response': data}
else:
response = {'status': 'error', 'response': reply.errorString()}
callback(response, False)
reply.finished.connect(responseHandler)
# Create an object that 164 can handle.
return {'__qt': reply}
def keys(obj):
ks = obj.keys()
result = {}
for i,v in enumerate(ks):
result[i] = ks[i]
return result
def objToJson(obj):
if isinstance(obj, dict):
keys = obj.keys()
if len(keys) > 0 and isinstance(keys[0], int):
res = []
keys.sort()
for v in keys:
res.append(objToJson(obj[v]))
obj = res
else:
obj = {k: objToJson(v) for k, v in obj.items()}
return obj
def jsondumps(obj):
obj = objToJson(obj)
return json.dumps(obj, sort_keys=True)
def setFocus(node):
"""Set the focus onto the Qt widget underneath "node"."""
_browser.setFocus(node)
# Basically, convert all lists seen.
def jsonTo164Object(json):
"""Convert the JSON string "json" into a 164 object."""
if isinstance(json, dict):
json = dict((key, jsonTo164Object(json[key])) for key in json)
elif isinstance(json, list):
json = dict(enumerate(map(jsonTo164Object, json)))
return json
def doQuote(str):
"""Add quotes around "str"."""
return "\"" + str + "\""
|
|
import random
import time
import datetime
import Queue
import collections
import itertools
import os
import os.path
# import array
import numpy as np
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
def get_all_from_queue(Q):
""" Generator to yield one after the others all items currently
in the queue Q, without any waiting.
"""
try:
while True:
yield Q.get_nowait()
except Queue.Empty:
raise StopIteration
def get_item_from_queue(Q, timeout=0.01):
""" Attempts to retrieve an item from the queue Q. If Q is
empty, None is returned.
Blocks for 'timeout' seconds in case the queue is empty,
so don't use this method for speedy retrieval of multiple
items (use get_all_from_queue for that).
"""
try:
item = Q.get(True, 0.01)
except Queue.Empty:
return None
return item
def flatten(iterables):
""" Flatten an iterable of iterables. Returns a generator.
list(flatten([[2, 3], [5, 6]])) => [2, 3, 5, 6]
"""
return (elem for iterable in iterables for elem in iterable)
def argmin_list(seq, func):
""" Return a list of elements of seq[i] with the lowest
func(seq[i]) scores.
>>> argmin_list(['one', 'to', 'three', 'or'], len)
['to', 'or']
"""
best_score, best = func(seq[0]), []
for x in seq:
x_score = func(x)
if x_score < best_score:
best, best_score = [x], x_score
elif x_score == best_score:
best.append(x)
return best
def argmin_random_tie(seq, func):
""" Return an element with lowest func(seq[i]) score; break
ties at random.
"""
return random.choice(argmin_list(seq, func))
def argmin(seq, func):
""" Return an element with lowest func(seq[i]) score; tie goes
to first one.
>>> argmin(['one', 'to', 'three'], len)
'to'
"""
return min(seq, key=func)
def argmax_list(seq, func):
""" Return a list of elements of seq[i] with the highest
func(seq[i]) scores.
>>> argmax_list(['one', 'three', 'seven'], len)
['three', 'seven']
"""
return argmin_list(seq, lambda x: -func(x))
def argmax_random_tie(seq, func):
""" Return an element with highest func(seq[i]) score; break
ties at random.
"""
return random.choice(argmax_list(seq, func))
def argmax(seq, func):
""" Return an element with highest func(seq[i]) score; tie
goes to first one.
>>> argmax(['one', 'to', 'three'], len)
'three'
"""
return max(seq, key=func)
def convert_to_int(n):
try:
return int(n)
except ValueError:
return None
def convert_to_float(n):
try:
return float(n)
except ValueError:
return None
def find_key(dictionary, val):
return [k for k, v in dictionary.iteritems() if v == val][0]
def find_keys(dictionary, val):
return [k for k, v in dictionary.iteritems() if v == val]
def find_key_with_match(dictionary, val):
return [k for k, v in dictionary.iteritems() if v in val][0]
def int_to_bin(n):
return [int(digit) for digit in bin(n)[2:]] # [2:] to chop off the "0b" part
def string_is_binary(string):
try:
int(string, 2)
return True
except (TypeError, ValueError):
return False
def bitvector_to_bytearray(bitvector, pad_to_n_bytes=4):
pieces = []
pad_to_n_bits = 8 * pad_to_n_bytes
bit_string = str(bitvector).ljust(((bitvector.length() + (pad_to_n_bits - 1)) / pad_to_n_bits) * pad_to_n_bits, "0") # right padding zeroes
# bitvector.pad_from_right(pad_to_n_bits-bitvector.length()%pad_to_n_bits)
# bit_string = str(bitvector)
for i in range(0, len(bit_string), 8):
byte = int(bit_string[i: i + 8], 2)
pieces.append(byte)
# array.array('B', [17, 24, 121, 1, 12, 222, 34, 76])
# struct.pack('B' * len(integers), *integers)
return bytearray(pieces)
def bitvector_to_array(bitvec):
bs = np.fromstring(bitvec.vector, dtype=np.uint8)
bs = (bs * 0x0202020202 & 0x010884422010) % 1023
return bs.astype(np.uint8).tostring()
# bs = array.array('B', bitvec.vector.tostring()) # no padding needed here, replaces bitvector.getTextFromBitVector()
# bitstream_swap = ''
# lsbits = lambda b: (b * 0x0202020202 & 0x010884422010) % 1023
# for b in bs:
# bitstream_swap += chr(lsbits(b))
# return bitstream_swap
def bitarray_to_array(bitarr):
bs = np.fromstring(bitarr.tobytes(), dtype=np.uint8) # byte padding happens here, bitarray.tobytes()
bs = (bs * 0x0202020202 & 0x010884422010) % 1023
return bs.astype(np.uint8).tostring()
def list_intersection(list_1, list_2):
"""intersection of lists
equivalent to set.intersection
"""
return [i for i in list_1 if i in list_2]
def flatten_iterable(iterable):
"""flatten iterable, but leaves out strings
[[[1, 2, 3], [4, 5]], 6] -> [1, 2, 3, 4, 5, 6]
"""
for item in iterable:
if isinstance(item, collections.Iterable) and not isinstance(item, basestring):
for sub in flatten_iterable(item):
yield sub
else:
yield item
def iterable(item):
"""generate iterable from item, but leaves out strings
"""
if isinstance(item, collections.Iterable) and not isinstance(item, basestring):
return item
else:
return [item]
# # {{{ http://code.activestate.com/recipes/285264/ (r1)
# ---------------------------------------------------------
# natsort.py: Natural string sorting.
# ---------------------------------------------------------
# By Seo Sanghyeon. Some changes by Connelly Barnes.
def try_int(s):
"Convert to integer if possible."
try:
return int(s)
except Exception:
return s
def natsort_key(s):
"Used internally to get a tuple by which s is sorted."
import re
return map(try_int, re.findall(r'(\d+|\D+)', s))
def natcmp(a, b):
"Natural string comparison, case sensitive."
return cmp(natsort_key(a), natsort_key(b))
def natcasecmp(a, b):
"Natural string comparison, ignores case."
return natcmp(a.lower(), b.lower())
def natsort(seq, cmp=natcmp):
"In-place natural string sort."
seq.sort(cmp)
def natsorted(seq, cmp=natcmp):
"Returns a copy of seq, sorted by natural string sort."
import copy
temp = copy.copy(seq)
natsort(temp, cmp)
return temp
# # end of http://code.activestate.com/recipes/285264/ }}}
def get_iso_time():
'''returns time as ISO string, mapping to and from datetime in ugly way
convert to string with str()
'''
t1 = time.time()
t2 = datetime.datetime.fromtimestamp(t1)
t4 = t2.__str__()
try:
t4a, t4b = t4.split(".", 1)
except ValueError:
t4a = t4
t4b = '000000'
t5 = datetime.datetime.strptime(t4a, "%Y-%m-%d %H:%M:%S")
ms = int(t4b.ljust(6, '0')[:6])
return t5.replace(microsecond=ms)
def get_float_time():
'''returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's
'''
t1 = time.time()
t2 = datetime.datetime.fromtimestamp(t1)
return time.mktime(t2.timetuple()) + 1e-6 * t2.microsecond
def split_seq(iterable, size):
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
def str2bool(value):
try:
if value.lower() in ("yes", "y", "true", "t", "1"):
return True
elif value.lower() in ("no", "n", "false", "f", "0"):
return False
raise ValueError('Cannot convert to boolean: unknown string %s' % value)
except AttributeError: # not a string
return bool(value)
def groupby_dict(dictionary, key):
''' Group dict of dicts by key.
'''
return dict((k, list(g)) for k, g in itertools.groupby(sorted(dictionary.keys(), key=lambda name: dictionary[name][key]), key=lambda name: dictionary[name][key]))
def dict_compare(d1, d2):
'''Comparing two dictionaries.
Note: https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python
'''
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
def zip_nofill(*iterables):
'''Zipping iterables without fillvalue.
Note: https://stackoverflow.com/questions/38054593/zip-longest-without-fillvalue
'''
return (tuple([entry for entry in iterable if entry is not None]) for iterable in itertools.izip_longest(*iterables, fillvalue=None))
def find_file_dir_up(filename, path=None, n=None):
'''Finding file in directory upwards.
'''
if path is None:
path = os.getcwd()
i = 0
while True:
current_path = path
for _ in range(i):
current_path = os.path.split(current_path)[0]
if os.path.isfile(os.path.join(current_path, filename)): # found file and return
return os.path.join(current_path, filename)
elif os.path.dirname(current_path) == current_path: # root of filesystem
return
elif n is not None and i == n:
return
else: # file not found
i += 1
continue
|
|
from __future__ import division, absolute_import, print_function
import types
from copy import copy
from distutils import ccompiler
from distutils.ccompiler import *
from distutils.errors import DistutilsExecError, DistutilsModuleError, \
DistutilsPlatformError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
quote_args, get_num_build_jobs
def replace_method(klass, method_name, func):
if sys.version_info[0] < 3:
m = types.MethodType(func, None, klass)
else:
# Py3k does not have unbound method anymore, MethodType does not work
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
if display is None:
display = cmd
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
s, o = exec_command(cmd)
if s:
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
try:
print(o)
except UnicodeError:
# When installing through pip, `o` can contain non-ascii chars
pass
if re.search('Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
else:
msg = ''
raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""
Return the name of the object files for the given source files.
Parameters
----------
source_filenames : list of str
The list of paths to source files. Paths can be either relative or
absolute, this is handled transparently.
strip_dir : bool, optional
Whether to strip the directory from the returned paths. If True,
the file name prepended by `output_dir` is returned. Default is False.
output_dir : str, optional
If given, this path is prepended to the returned paths to the
object files.
Returns
-------
obj_names : list of str
The list of paths to the object files corresponding to the source
files in `source_filenames`.
"""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normpath(src_name))
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if base.startswith('..'):
# Resolve starting relative path components, middle ones
# (if any) have been handled by os.path.normpath above.
i = base.rfind('..') + 2
d = base[:i]
d = os.path.basename(os.path.abspath(d))
base = d + base[i:]
if ext not in self.src_extensions:
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_name = os.path.join(output_dir, base + self.obj_extension)
obj_names.append(obj_name)
return obj_names
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
def CCompiler_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""
Compile one or more source files.
Please refer to the Python distutils API reference for more details.
Parameters
----------
sources : list of str
A list of filenames
output_dir : str, optional
Path to the output directory.
macros : list of tuples
A list of macro definitions.
include_dirs : list of str, optional
The directories to add to the default include file search path for
this compilation only.
debug : bool, optional
Whether or not to output debug symbols in or alongside the object
file(s).
extra_preargs, extra_postargs : ?
Extra pre- and post-arguments.
depends : list of str, optional
A list of file names that all targets depend on.
Returns
-------
objects : list of str
A list of object file names, one per source file `sources`.
Raises
------
CompileError
If compilation fails.
"""
# This method is effective only with Python >=2.3 distutils.
# Any changes here should be applied also to fcompiler.compile
# method to support pre Python 2.3 distutils.
if not sources:
return []
# FIXME:RELATIVE_IMPORT
if sys.version_info[0] < 3:
from .fcompiler import FCompiler, is_f_file, has_f90_header
else:
from numpy.distutils.fcompiler import (FCompiler, is_f_file,
has_f90_header)
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
fcomp = getattr(self, 'compiler_' + fc)
if fcomp is None:
continue
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
display = '\n'.join(display)
else:
ccomp = self.compiler_so
display = "C compiler: %s\n" % (' '.join(ccomp),)
log.info(display)
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
display = "compile options: '%s'" % (' '.join(cc_args))
if extra_postargs:
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
def single_compile(args):
obj, (src, ext) = args
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
if isinstance(self, FCompiler):
objects_to_build = list(build.keys())
f77_objects, other_objects = [], []
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type == 'absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
if is_f_file(src) and not has_f90_header(src):
f77_objects.append((obj, (src, ext)))
else:
other_objects.append((obj, (src, ext)))
# f77 objects can be built in parallel
build_items = f77_objects
# build f90 modules serial, module files are generated during
# compilation and may be used by files later in the list so the
# ordering is important
for o in other_objects:
single_compile(o)
else:
build_items = build.items()
jobs = get_num_build_jobs()
if len(build) > 1 and jobs > 1:
# build parallel
import multiprocessing.pool
pool = multiprocessing.pool.ThreadPool(jobs)
pool.map(single_compile, build_items)
pool.close()
else:
# build serial
for o in build_items:
single_compile(o)
# Return *all* object filenames, not just the ones we just built.
return objects
replace_method(CCompiler, 'compile', CCompiler_compile)
def CCompiler_customize_cmd(self, cmd, ignore=()):
"""
Customize compiler using distutils command.
Parameters
----------
cmd : class instance
An instance inheriting from `distutils.cmd.Command`.
ignore : sequence of str, optional
List of `CCompiler` commands (without ``'set_'``) that should not be
altered. Strings that are checked for are:
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
'rpath', 'link_objects')``.
Returns
-------
None
"""
log.info('customize %s using %s' % (self.__class__.__name__,
cmd.__class__.__name__))
def allow(attr):
return getattr(cmd, attr, None) is not None and attr not in ignore
if allow('include_dirs'):
self.set_include_dirs(cmd.include_dirs)
if allow('define'):
for (name, value) in cmd.define:
self.define_macro(name, value)
if allow('undef'):
for macro in cmd.undef:
self.undefine_macro(macro)
if allow('libraries'):
self.set_libraries(self.libraries + cmd.libraries)
if allow('library_dirs'):
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
if allow('rpath'):
self.set_runtime_library_dirs(cmd.rpath)
if allow('link_objects'):
self.set_link_objects(cmd.link_objects)
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
keys = list(compiler.executables.keys())
for key in ['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch',
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
if key not in keys:
keys.append(key)
for key in keys:
if hasattr(compiler, key):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
lines = []
format = '%-' + repr(mx + 1) + 's = %s'
for prop in props:
lines.append(format % prop)
return '\n'.join(lines)
def CCompiler_show_customization(self):
"""
Print the compiler customizations to stdout.
Parameters
----------
None
Returns
-------
None
Notes
-----
Printing is only done if the distutils log threshold is < 2.
"""
if 0:
for attrname in ['include_dirs', 'define', 'undef',
'libraries', 'library_dirs',
'rpath', 'link_objects']:
attr = getattr(self, attrname, None)
if not attr:
continue
log.info("compiler '%s' is set to %s" % (attrname, attr))
try:
self.get_version()
except:
pass
if log._global_log.threshold < 2:
print('*' * 80)
print(self.__class__)
print(_compiler_to_string(self))
print('*' * 80)
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
def CCompiler_customize(self, dist, need_cxx=0):
"""
Do any platform-specific customization of a compiler instance.
This method calls `distutils.sysconfig.customize_compiler` for
platform-specific customization, as well as optionally remove a flag
to suppress spurious warnings in case C++ code is being compiled.
Parameters
----------
dist : object
This parameter is not used for anything.
need_cxx : bool, optional
Whether or not C++ has to be compiled. If so (True), the
``"-Wstrict-prototypes"`` option is removed to prevent spurious
warnings. Default is False.
Returns
-------
None
Notes
-----
All the default options used by distutils can be extracted with::
from distutils import sysconfig
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
"""
# See FCompiler.customize for suggested usage.
log.info('customize %s' % (self.__class__.__name__))
customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
if not self.compiler_cxx:
if self.compiler[0].startswith('gcc'):
a, b = 'gcc', 'g++'
else:
a, b = 'cc', 'c++'
self.compiler_cxx = [self.compiler[0].replace(a, b)] \
+ self.compiler[1:]
else:
if hasattr(self, 'compiler'):
log.warn("#### %s #######" % (self.compiler,))
if not hasattr(self, 'compiler_cxx'):
log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
return
replace_method(CCompiler, 'customize', CCompiler_customize)
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
"""
Simple matching of version numbers, for use in CCompiler and FCompiler.
Parameters
----------
pat : str, optional
A regular expression matching version numbers.
Default is ``r'[-.\\d]+'``.
ignore : str, optional
A regular expression matching patterns to skip.
Default is ``''``, in which case nothing is skipped.
start : str, optional
A regular expression matching the start of where to start looking
for version numbers.
Default is ``''``, in which case searching is started at the
beginning of the version string given to `matcher`.
Returns
-------
matcher : callable
A function that is appropriate to use as the ``.version_match``
attribute of a `CCompiler` class. `matcher` takes a single parameter,
a version string.
"""
def matcher(self, version_string):
# version string may appear in the second line, so getting rid
# of new lines:
version_string = version_string.replace('\n', ' ')
pos = 0
if start:
m = re.match(start, version_string)
if not m:
return None
pos = m.end()
while True:
m = re.search(pat, version_string[pos:])
if not m:
return None
if ignore and re.match(ignore, m.group(0)):
pos = m.end()
continue
break
return m.group(0)
return matcher
def CCompiler_get_version(self, force=False, ok_status=[0]):
"""
Return compiler version, or None if compiler is not available.
Parameters
----------
force : bool, optional
If True, force a new determination of the version, even if the
compiler already has a version attribute. Default is False.
ok_status : list of int, optional
The list of status values returned by the version look-up process
for which a version string is returned. If the status value is not
in `ok_status`, None is returned. Default is ``[0]``.
Returns
-------
version : str or None
Version string, in the format of `distutils.version.LooseVersion`.
"""
if not force and hasattr(self, 'version'):
return self.version
self.find_executables()
try:
version_cmd = self.version_cmd
except AttributeError:
return None
if not version_cmd or not version_cmd[0]:
return None
try:
matcher = self.version_match
except AttributeError:
try:
pat = self.version_pattern
except AttributeError:
return None
def matcher(version_string):
m = re.match(pat, version_string)
if not m:
return None
version = m.group('version')
return version
status, output = exec_command(version_cmd, use_tee=0)
version = None
if status in ok_status:
version = matcher(output)
if version:
version = LooseVersion(version)
self.version = version
return version
replace_method(CCompiler, 'get_version', CCompiler_get_version)
def CCompiler_cxx_compiler(self):
"""
Return the C++ compiler.
Parameters
----------
None
Returns
-------
cxx : class instance
The C++ compiler, as a `CCompiler` instance.
"""
if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
return self
cxx = copy(self)
cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ cxx.linker_so[2:]
else:
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
return cxx
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
"Intel C Itanium Compiler for Itanium-based applications")
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
"Intel C Compiler for 32-bit applications on Windows")
compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
"Intel C Compiler for 64-bit applications on Windows")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
('linux.*', 'intelem'),
('linux.*', 'pathcc'),
('nt', 'intelw'),
('nt', 'intelemw'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32" \
"(for MSC built Python)")
if mingw32():
# On windows platforms, we want to default to mingw32 (gcc)
# because msvc can't build blitz stuff.
log.info('Setting mingw32 as default compiler for nt.')
ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ ccompiler._default_compilers
_distutils_new_compiler = new_compiler
def new_compiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
module_name = "numpy.distutils." + module_name
try:
__import__(module_name)
except ImportError:
msg = str(get_exception())
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
except ImportError:
msg = str(get_exception())
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
module = sys.modules[module_name]
klass = vars(module)[class_name]
except KeyError:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
log.debug('new_compiler returns %s' % (klass))
return compiler
ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
library_dirs = quote_args(library_dirs)
runtime_library_dirs = quote_args(runtime_library_dirs)
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
for i in r:
if is_sequence(i):
lib_opts.extend(list(i))
else:
lib_opts.append(i)
return lib_opts
ccompiler.gen_lib_options = gen_lib_options
# Also fix up the various compiler modules, which do
# from distutils.ccompiler import gen_lib_options
# Don't bother with mwerks, as we don't support Classic Mac.
for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
_m = sys.modules.get('distutils.' + _cc + 'compiler')
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
_distutils_gen_preprocess_options = gen_preprocess_options
def gen_preprocess_options(macros, include_dirs):
include_dirs = quote_args(include_dirs)
return _distutils_gen_preprocess_options(macros, include_dirs)
ccompiler.gen_preprocess_options = gen_preprocess_options
##Fix distutils.util.split_quoted:
# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
# that removing this fix causes f2py problems on Windows XP (see ticket #723).
# Specifically, on WinXP when gfortran is installed in a directory path, which
# contains spaces, then f2py is unable to find it.
import string
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
_has_white_re = re.compile(r'\s')
def split_quoted(s):
s = s.strip()
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = s[end:].lstrip()
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end + 1:]
pos = end + 1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError("this can't happen (bad char '%c')" % s[end])
if m is None:
raise ValueError("bad string (mismatched %s quotes?)" % s[end])
(beg, end) = m.span()
if _has_white_re.search(s[beg + 1:end - 1]):
s = s[:beg] + s[beg + 1:end - 1] + s[end:]
pos = m.end() - 2
else:
# Keeping quotes when a quoted word does not contain
# white-space. XXX: send a patch to distutils
pos = m.end()
if pos >= len(s):
words.append(s)
break
return words
ccompiler.split_quoted = split_quoted
##Fix distutils.util.split_quoted:
|
|
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope
from layers import *
class Model(object):
def __init__(self, config,
inputs, labels, enc_seq_length, dec_seq_length, mask,
reuse=False, is_critic=False):
self.task = config.task
self.debug = config.debug
self.config = config
self.input_dim = config.input_dim
self.hidden_dim = config.hidden_dim
self.num_layers = config.num_layers
self.max_enc_length = config.max_enc_length
self.max_dec_length = config.max_dec_length
self.num_glimpse = config.num_glimpse
self.init_min_val = config.init_min_val
self.init_max_val = config.init_max_val
self.initializer = \
tf.random_uniform_initializer(self.init_min_val, self.init_max_val)
self.use_terminal_symbol = config.use_terminal_symbol
self.lr_start = config.lr_start
self.lr_decay_step = config.lr_decay_step
self.lr_decay_rate = config.lr_decay_rate
self.max_grad_norm = config.max_grad_norm
self.layer_dict = {}
##############
# inputs
##############
self.is_training = tf.placeholder_with_default(
tf.constant(False, dtype=tf.bool),
shape=(), name='is_training'
)
self.enc_inputs, self.dec_targets, self.enc_seq_length, self.dec_seq_length, self.mask = \
tf.contrib.layers.utils.smart_cond(
self.is_training,
lambda: (inputs['train'], labels['train'], enc_seq_length['train'],
dec_seq_length['train'], mask['train']),
lambda: (inputs['test'], labels['test'], enc_seq_length['test'],
dec_seq_length['test'], mask['test'])
)
if self.use_terminal_symbol:
self.dec_seq_length += 1 # terminal symbol
self._build_model()
self._build_steps()
if not reuse:
self._build_optim()
self.train_summary = tf.summary.merge([
tf.summary.scalar("train/total_loss", self.total_loss),
tf.summary.scalar("train/lr", self.lr),
])
self.test_summary = tf.summary.merge([
tf.summary.scalar("test/total_loss", self.total_loss),
])
def _build_steps(self):
def run(sess, fetch, feed_dict, summary_writer, summary):
fetch['step'] = self.global_step
if summary is not None:
fetch['summary'] = summary
result = sess.run(fetch)
if summary_writer is not None:
summary_writer.add_summary(result['summary'], result['step'])
summary_writer.flush()
return result
def train(sess, fetch, summary_writer):
return run(sess, fetch, feed_dict={},
summary_writer=summary_writer, summary=self.train_summary)
def test(sess, fetch, summary_writer=None):
return run(sess, fetch, feed_dict={self.is_training: False},
summary_writer=summary_writer, summary=self.test_summary)
self.train = train
self.test = test
def _build_model(self):
tf.logging.info("Create a model..")
self.global_step = tf.Variable(0, trainable=False)
input_embed = tf.get_variable(
"input_embed", [1, self.input_dim, self.hidden_dim],
initializer=self.initializer)
with tf.variable_scope("encoder"):
self.embeded_enc_inputs = tf.nn.conv1d(
self.enc_inputs, input_embed, 1, "VALID")
batch_size = tf.shape(self.enc_inputs)[0]
with tf.variable_scope("encoder"):
self.enc_cell = LSTMCell(
self.hidden_dim,
initializer=self.initializer)
if self.num_layers > 1:
cells = [self.enc_cell] * self.num_layers
self.enc_cell = MultiRNNCell(cells)
self.enc_init_state = trainable_initial_state(
batch_size, self.enc_cell.state_size)
# self.encoder_outputs : [None, max_time, output_size]
self.enc_outputs, self.enc_final_states = tf.nn.dynamic_rnn(
self.enc_cell, self.embeded_enc_inputs,
self.enc_seq_length, self.enc_init_state)
if self.use_terminal_symbol:
# 0 index indicates terminal
self.first_decoder_input = tf.expand_dims(trainable_initial_state(
batch_size, self.hidden_dim, name="first_decoder_input"), 1)
self.enc_outputs = tf.concat_v2(
[self.first_decoder_input, self.enc_outputs], axis=1)
with tf.variable_scope("dencoder"):
self.idx_pairs = index_matrix_to_pairs(self.dec_targets)
self.embeded_dec_inputs = tf.stop_gradient(
tf.gather_nd(self.enc_outputs, self.idx_pairs))
if self.use_terminal_symbol:
tiled_zero_idxs = tf.tile(tf.zeros(
[1, 1], dtype=tf.int32), [batch_size, 1], name="tiled_zero_idxs")
self.dec_targets = tf.concat_v2([self.dec_targets, tiled_zero_idxs], axis=1)
self.embeded_dec_inputs = tf.concat_v2(
[self.first_decoder_input, self.embeded_dec_inputs], axis=1)
self.dec_cell = LSTMCell(
self.hidden_dim,
initializer=self.initializer)
if self.num_layers > 1:
cells = [self.dec_cell] * self.num_layers
self.dec_cell = MultiRNNCell(cells)
self.dec_pred_logits, _, _ = decoder_rnn(
self.dec_cell, self.embeded_dec_inputs,
self.enc_outputs, self.enc_final_states,
self.dec_seq_length, self.hidden_dim,
self.num_glimpse, batch_size, is_train=True,
initializer=self.initializer)
self.dec_pred_prob = tf.nn.softmax(
self.dec_pred_logits, 2, name="dec_pred_prob")
self.dec_pred = tf.argmax(
self.dec_pred_logits, 2, name="dec_pred")
with tf.variable_scope("dencoder", reuse=True):
self.dec_inference_logits, _, _ = decoder_rnn(
self.dec_cell, self.first_decoder_input,
self.enc_outputs, self.enc_final_states,
self.dec_seq_length, self.hidden_dim,
self.num_glimpse, batch_size, is_train=False,
initializer=self.initializer,
max_length=self.max_dec_length + int(self.use_terminal_symbol))
self.dec_inference_prob = tf.nn.softmax(
self.dec_inference_logits, 2, name="dec_inference_logits")
self.dec_inference = tf.argmax(
self.dec_inference_logits, 2, name="dec_inference")
def _build_optim(self):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.dec_targets, logits=self.dec_pred_logits)
inference_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.dec_targets, logits=self.dec_inference_logits)
def apply_mask(op):
length = tf.cast(op[:1], tf.int32)
loss = op[1:]
return tf.multiply(loss, tf.ones(length, dtype=tf.float32))
batch_loss = tf.div(
tf.reduce_sum(tf.multiply(losses, self.mask)),
tf.reduce_sum(self.mask), name="batch_loss")
batch_inference_loss = tf.div(
tf.reduce_sum(tf.multiply(losses, self.mask)),
tf.reduce_sum(self.mask), name="batch_inference_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
self.total_loss = total_loss
self.target_cross_entropy_losses = losses
self.total_inference_loss = batch_inference_loss
self.lr = tf.train.exponential_decay(
self.lr_start, self.global_step, self.lr_decay_step,
self.lr_decay_rate, staircase=True, name="learning_rate")
optimizer = tf.train.AdamOptimizer(self.lr)
if self.max_grad_norm != None:
grads_and_vars = optimizer.compute_gradients(self.total_loss)
for idx, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[idx] = (tf.clip_by_norm(grad, self.max_grad_norm), var)
self.optim = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
else:
self.optim = optimizer.minimize(self.total_loss, global_step=self.global_step)
|
|
import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import textwrap
import builtins
import pkg_resources
from distutils.errors import DistutilsError
from pkg_resources import working_set
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
__all__ = [
"AbstractSandbox",
"DirectorySandbox",
"SandboxViolation",
"run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
os.makedirs(replacement, exist_ok=True)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
raise exc.with_traceback(self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name
for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
with save_path():
hide_setuptools()
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
_MODULES_TO_HIDE = {
'setuptools',
'distutils',
'pkg_resources',
'Cython',
'_distutils_hack',
}
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
base_module = mod_name.split('.', 1)[0]
return base_module in _MODULES_TO_HIDE
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
_distutils_hack = sys.modules.get('_distutils_hack', None)
if _distutils_hack is not None:
_distutils_hack.remove_shim()
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
with DirectorySandbox(setup_dir):
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name
for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def __enter__(self):
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
def __exit__(self, exc_type, exc_value, traceback):
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func()
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat",
"listdir",
"chdir",
"open",
"chmod",
"chown",
"mkdir",
"remove",
"unlink",
"rmdir",
"utime",
"lchown",
"chroot",
"lstat",
"startfile",
"mkfifo",
"mknod",
"pathconf",
"access",
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw),
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull]
else:
_EXCEPTIONS = []
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys(
[
"open",
"chmod",
"chown",
"mkdir",
"remove",
"unlink",
"rmdir",
"utime",
"lchown",
"chroot",
"mkfifo",
"mknod",
"tempnam",
]
)
_exception_patterns = []
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path)) for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception) for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath) for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_,
[
getattr(_os, a, 0)
for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()
],
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
tmpl = textwrap.dedent(
"""
SandboxViolation: {cmd}{args!r} {kwargs}
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
"""
).lstrip()
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
|
|
import json
from django.db.models import Q
from corehq import Domain, toggles
from corehq.apps.accounting.models import Feature, SoftwareProduct, BillingAccount, SoftwarePlanVersion, \
Subscription, Subscriber, BillingContactInfo, SoftwarePlan
from corehq.apps.accounting.utils import fmt_feature_rate_dict, fmt_product_rate_dict
from corehq.apps.hqwebapp.async_handler import BaseAsyncHandler, AsyncHandlerError
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.users.models import WebUser
class BaseRateAsyncHandler(BaseAsyncHandler):
"""
Subclass this for interacting with RatesManager.
"""
allowed_actions = [
'apply',
'create',
]
@property
def name(self):
return self.data.get('name')
@property
def rate_type(self):
return self.data.get('rate_type')
@property
def rate_id(self):
return self.data.get('rate_id')
@property
def create_response(self):
raise NotImplementedError("create_response is required")
@property
def apply_response(self):
raise NotImplementedError("apply_response is required")
class FeatureRateAsyncHandler(BaseRateAsyncHandler):
slug = 'features_handler'
@property
def create_response(self):
if Feature.objects.filter(name=self.name).count() > 0:
raise AsyncHandlerError("Feature '%s' already exists, and likely already "
"in this Software Plan Version." % self.name)
new_feature, _ = Feature.objects.get_or_create(
name=self.name,
feature_type=self.rate_type,
)
return fmt_feature_rate_dict(new_feature)
@property
def apply_response(self):
try:
feature = Feature.objects.get(id=self.rate_id)
return fmt_feature_rate_dict(feature)
except Feature.DoesNotExist:
raise AsyncHandlerError("could not find an existing feature")
class SoftwareProductRateAsyncHandler(BaseRateAsyncHandler):
slug = 'products_handler'
@property
def create_response(self):
if SoftwareProduct.objects.filter(name=self.name).count() > 0:
raise AsyncHandlerError("Product '%s' already exists, and likely already "
"in this Software Plan Version." % self.name)
new_product, _ = SoftwareProduct.objects.get_or_create(
name=self.name,
product_type=self.rate_type
)
return fmt_product_rate_dict(new_product)
@property
def apply_response(self):
try:
product = SoftwareProduct.objects.get(id=self.rate_id)
return fmt_product_rate_dict(product)
except SoftwareProduct.DoesNotExist:
raise AsyncHandlerError("could not find an existing product")
class BaseSelect2AsyncHandler(BaseAsyncHandler):
@property
def search_string(self):
return self.data.get('searchString')
@property
def existing(self):
return self.data.getlist('existing[]')
def _fmt_success(self, response):
success = json.dumps({
'results': [{
'id': r[0],
'text': r[1],
} for r in response]
}, cls=LazyEncoder)
return success
class Select2RateAsyncHandler(BaseSelect2AsyncHandler):
"""
Handles the async responses for the select2 widget in the Features & Rates portion
of the SoftwarePlanVersion form.
"""
slug = 'select2_rate'
allowed_actions = [
'feature_id',
'product_id',
]
@property
def feature_id_response(self):
features = Feature.objects
if self.existing:
features = features.exclude(name__in=self.existing)
if self.search_string:
features = features.filter(name__startswith=self.search_string)
return [(f.id, f.name, f.feature_type) for f in features.all()]
@property
def product_id_response(self):
products = SoftwareProduct.objects
if self.existing:
products = products.exclude(name__in=self.existing)
if self.search_string:
products = products.filter(name__startswith=self.search_string)
return [(p.id, p.name, p.product_type) for p in products.all()]
def _fmt_success(self, response):
return json.dumps({
'results': [
{
'id': r[0],
'name': r[1],
'rate_type': r[2],
'text': '%s (%s)' % (r[1], r[2]),
'isExisting': True,
} for r in response]
})
class Select2BillingInfoHandler(BaseSelect2AsyncHandler):
slug = 'select2_billing'
allowed_actions = [
'country',
'active_accounts',
'domain',
'account',
'plan_version',
'new_plan_version',
]
@property
def country_response(self):
from django_countries.data import COUNTRIES
countries = sorted(COUNTRIES.items(), key=lambda x: x[1].encode('utf-8'))
if self.search_string:
return filter(lambda x: x[1].lower().startswith(self.search_string.lower()), countries)
return countries
@property
def active_accounts_response(self):
accounts = BillingAccount.objects.filter(is_active=True)
if self.search_string:
accounts = accounts.filter(name__contains=self.search_string)
return [(a.id, a.name) for a in accounts]
@property
def domain_response(self):
domain_names = [domain['key'] for domain in Domain.get_all(include_docs=False)]
if self.search_string:
domain_names = filter(lambda x: x.lower().startswith(self.search_string.lower()), domain_names)
return [(name, name) for name in domain_names]
@property
def account_response(self):
accounts = BillingAccount.objects
if self.search_string:
accounts = accounts.filter(name__contains=self.search_string)
return [(a.id, a.name) for a in accounts.order_by('name')]
@property
def plan_version_response(self):
edition = self.data.get('additionalData[edition]')
product = self.data.get('additionalData[product]')
plan_versions = SoftwarePlanVersion.objects.filter(
plan__edition=edition
).filter(product_rates__product__product_type=product)
if self.search_string:
plan_versions = plan_versions.filter(
plan__name__contains=self.search_string)
return [(p.id, p.__str__()) for p in plan_versions.order_by('plan__name')]
@property
def new_plan_version_response(self):
current_version = int(self.data.get('additionalData[current_version]'))
plan_versions = filter(lambda x: x[0] != current_version,
self.plan_version_response)
return plan_versions
class Select2InvoiceTriggerHandler(BaseSelect2AsyncHandler):
slug = 'select2_billing'
allowed_actions = [
'domain',
]
@property
def domain_response(self):
domain_names = [domain['key'] for domain in Domain.get_all(include_docs=False)]
if self.search_string:
domain_names = filter(lambda x: x.lower().startswith(self.search_string.lower()), domain_names)
return [(d, d) for d in domain_names]
class BaseSingleOptionFilterAsyncHandler(BaseAsyncHandler):
@property
def query(self):
raise NotImplementedError("must return a queryset")
@property
def search_string(self):
return self.data.get('q', None)
@property
def page(self):
return int(self.data.get('page', 1))
@property
def paginated_data(self):
start = (self.page - 1) * self.limit
end = self.page * self.limit
return self.query.all()[start:end]
@property
def limit(self):
return self.data.get('limit', 10)
@property
def total(self):
return self.query.count()
@staticmethod
def _fmt_select2_data(data_id, data_text):
return {
'id': data_id,
'text': data_text,
}
def _fmt_success(self, data):
return json.dumps({
'success': True,
'limit': self.limit,
'page': self.page,
'total': self.total,
'items': data,
})
class SubscriberFilterAsyncHandler(BaseSingleOptionFilterAsyncHandler):
slug = 'subscriber_filter'
allowed_actions = [
'subscriber',
]
@property
def query(self):
query = Subscriber.objects.exclude(domain=None).order_by('domain')
if self.search_string:
query = query.filter(domain__istartswith=self.search_string)
return query
@property
def subscriber_response(self):
return [self._fmt_select2_data(s.domain, s.domain)
for s in self.paginated_data]
class SubscriptionFilterAsyncHandler(BaseSingleOptionFilterAsyncHandler):
slug = 'subscription_filter'
allowed_actions = [
'contract_id',
]
@property
def query(self):
query = Subscription.objects
if self.action == 'contract_id':
query = query.exclude(
salesforce_contract_id=None
).exclude(salesforce_contract_id='').order_by('salesforce_contract_id')
if self.search_string:
query = query.filter(
salesforce_contract_id__istartswith=self.search_string
)
return query
@property
def contract_id_response(self):
return [self._fmt_select2_data(
s.salesforce_contract_id, s.salesforce_contract_id)
for s in self.paginated_data]
class AccountFilterAsyncHandler(BaseSingleOptionFilterAsyncHandler):
slug = 'account_filter'
allowed_actions = [
'account_name',
'account_id',
'dimagi_contact',
]
@property
def query(self):
query = BillingAccount.objects.order_by('name')
if self.action == 'account_name' and self.search_string:
query = query.filter(name__icontains=self.search_string)
if self.action == 'account_id':
query = query.exclude(
salesforce_account_id=None
).exclude(
salesforce_account_id=''
).order_by('salesforce_account_id')
if self.search_string:
query = query.filter(
salesforce_account_id__istartswith=self.search_string)
if self.action == 'dimagi_contact':
query = query.exclude(
dimagi_contact=None
).exclude(
dimagi_contact=''
).order_by('dimagi_contact')
if self.search_string:
query = query.filter(
dimagi_contact__icontains=self.search_string)
return query
@property
def account_name_response(self):
return [self._fmt_select2_data(a.name, a.name)
for a in self.paginated_data]
@property
def account_id_response(self):
return [self._fmt_select2_data(a.salesforce_account_id,
a.salesforce_account_id)
for a in self.paginated_data]
@property
def dimagi_contact_response(self):
return [self._fmt_select2_data(a.dimagi_contact, a.dimagi_contact)
for a in self.paginated_data]
class BillingContactInfoAsyncHandler(BaseSingleOptionFilterAsyncHandler):
slug = 'billing_contact_filter'
allowed_actions = [
'contact_name'
]
@property
def query(self):
query = BillingContactInfo.objects.exclude(
first_name='', last_name='').order_by('first_name', 'last_name')
if self.search_string:
query = query.filter(
Q(first_name__istartswith=self.search_string) |
Q(last_name__istartswith=self.search_string)
)
return query
@property
def contact_name_response(self):
return [self._fmt_select2_data(c.full_name, c.full_name)
for c in self.paginated_data]
class SoftwarePlanAsyncHandler(BaseSingleOptionFilterAsyncHandler):
slug = 'software_plan_filter'
allowed_actions = [
'name',
]
@property
def query(self):
query = SoftwarePlan.objects.order_by('name')
if self.search_string:
query = query.filter(name__icontains=self.search_string)
return query
@property
def name_response(self):
return [self._fmt_select2_data(p.name, p.name)
for p in self.paginated_data]
class DomainFilterAsyncHandler(BaseSingleOptionFilterAsyncHandler):
slug = 'domain_filter'
allowed_actions = [
'domain_name',
]
@property
def query(self):
db = Domain.get_db()
startkey = self.search_string
endkey = "{}Z".format(self.search_string) if startkey else ''
query = db.view(
'domain/domains',
reduce=False,
startkey=startkey,
endkey=endkey,
limit=20,
)
return query
@property
def domain_name_response(self):
return [self._fmt_select2_data(p['key'], p['key']) for p in self.paginated_data]
|
|
"""Allows the creation of a sensor that filters state property."""
import logging
import statistics
from collections import deque, Counter
from numbers import Number
from functools import partial
from copy import copy
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_ENTITY_ID, ATTR_UNIT_OF_MEASUREMENT, ATTR_ENTITY_ID,
ATTR_ICON, STATE_UNKNOWN, STATE_UNAVAILABLE)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.decorator import Registry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
from homeassistant.components import history
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
FILTER_NAME_RANGE = 'range'
FILTER_NAME_LOWPASS = 'lowpass'
FILTER_NAME_OUTLIER = 'outlier'
FILTER_NAME_THROTTLE = 'throttle'
FILTER_NAME_TIME_THROTTLE = 'time_throttle'
FILTER_NAME_TIME_SMA = 'time_simple_moving_average'
FILTERS = Registry()
CONF_FILTERS = 'filters'
CONF_FILTER_NAME = 'filter'
CONF_FILTER_WINDOW_SIZE = 'window_size'
CONF_FILTER_PRECISION = 'precision'
CONF_FILTER_RADIUS = 'radius'
CONF_FILTER_TIME_CONSTANT = 'time_constant'
CONF_FILTER_LOWER_BOUND = 'lower_bound'
CONF_FILTER_UPPER_BOUND = 'upper_bound'
CONF_TIME_SMA_TYPE = 'type'
TIME_SMA_LAST = 'last'
WINDOW_SIZE_UNIT_NUMBER_EVENTS = 1
WINDOW_SIZE_UNIT_TIME = 2
DEFAULT_WINDOW_SIZE = 1
DEFAULT_PRECISION = 2
DEFAULT_FILTER_RADIUS = 2.0
DEFAULT_FILTER_TIME_CONSTANT = 10
NAME_TEMPLATE = "{} filter"
ICON = 'mdi:chart-line-variant'
FILTER_SCHEMA = vol.Schema({
vol.Optional(CONF_FILTER_PRECISION,
default=DEFAULT_PRECISION): vol.Coerce(int),
})
FILTER_OUTLIER_SCHEMA = FILTER_SCHEMA.extend({
vol.Required(CONF_FILTER_NAME): FILTER_NAME_OUTLIER,
vol.Optional(CONF_FILTER_WINDOW_SIZE,
default=DEFAULT_WINDOW_SIZE): vol.Coerce(int),
vol.Optional(CONF_FILTER_RADIUS,
default=DEFAULT_FILTER_RADIUS): vol.Coerce(float),
})
FILTER_LOWPASS_SCHEMA = FILTER_SCHEMA.extend({
vol.Required(CONF_FILTER_NAME): FILTER_NAME_LOWPASS,
vol.Optional(CONF_FILTER_WINDOW_SIZE,
default=DEFAULT_WINDOW_SIZE): vol.Coerce(int),
vol.Optional(CONF_FILTER_TIME_CONSTANT,
default=DEFAULT_FILTER_TIME_CONSTANT): vol.Coerce(int),
})
FILTER_RANGE_SCHEMA = vol.Schema({
vol.Required(CONF_FILTER_NAME): FILTER_NAME_RANGE,
vol.Optional(CONF_FILTER_LOWER_BOUND): vol.Coerce(float),
vol.Optional(CONF_FILTER_UPPER_BOUND): vol.Coerce(float),
})
FILTER_TIME_SMA_SCHEMA = FILTER_SCHEMA.extend({
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_SMA,
vol.Optional(CONF_TIME_SMA_TYPE,
default=TIME_SMA_LAST): vol.In(
[TIME_SMA_LAST]),
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(cv.time_period,
cv.positive_timedelta)
})
FILTER_THROTTLE_SCHEMA = FILTER_SCHEMA.extend({
vol.Required(CONF_FILTER_NAME): FILTER_NAME_THROTTLE,
vol.Optional(CONF_FILTER_WINDOW_SIZE,
default=DEFAULT_WINDOW_SIZE): vol.Coerce(int),
})
FILTER_TIME_THROTTLE_SCHEMA = FILTER_SCHEMA.extend({
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_THROTTLE,
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(cv.time_period,
cv.positive_timedelta)
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_FILTERS): vol.All(cv.ensure_list,
[vol.Any(FILTER_OUTLIER_SCHEMA,
FILTER_LOWPASS_SCHEMA,
FILTER_TIME_SMA_SCHEMA,
FILTER_THROTTLE_SCHEMA,
FILTER_TIME_THROTTLE_SCHEMA,
FILTER_RANGE_SCHEMA)])
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the template sensors."""
name = config.get(CONF_NAME)
entity_id = config.get(CONF_ENTITY_ID)
filters = [FILTERS[_filter.pop(CONF_FILTER_NAME)](
entity=entity_id, **_filter)
for _filter in config[CONF_FILTERS]]
async_add_entities([SensorFilter(name, entity_id, filters)])
class SensorFilter(Entity):
"""Representation of a Filter Sensor."""
def __init__(self, name, entity_id, filters):
"""Initialize the sensor."""
self._name = name
self._entity = entity_id
self._unit_of_measurement = None
self._state = None
self._filters = filters
self._icon = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def filter_sensor_state_listener(entity, old_state, new_state,
update_ha=True):
"""Handle device state changes."""
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
return
temp_state = new_state
try:
for filt in self._filters:
filtered_state = filt.filter_state(copy(temp_state))
_LOGGER.debug("%s(%s=%s) -> %s", filt.name,
self._entity,
temp_state.state,
"skip" if filt.skip_processing else
filtered_state.state)
if filt.skip_processing:
return
temp_state = filtered_state
except ValueError:
_LOGGER.error("Could not convert state: %s to number",
self._state)
return
self._state = temp_state.state
if self._icon is None:
self._icon = new_state.attributes.get(
ATTR_ICON, ICON)
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT)
if update_ha:
self.async_schedule_update_ha_state()
if 'recorder' in self.hass.config.components:
history_list = []
largest_window_items = 0
largest_window_time = timedelta(0)
# Determine the largest window_size by type
for filt in self._filters:
if filt.window_unit == WINDOW_SIZE_UNIT_NUMBER_EVENTS\
and largest_window_items < filt.window_size:
largest_window_items = filt.window_size
elif filt.window_unit == WINDOW_SIZE_UNIT_TIME\
and largest_window_time < filt.window_size:
largest_window_time = filt.window_size
# Retrieve the largest window_size of each type
if largest_window_items > 0:
filter_history = await self.hass.async_add_job(partial(
history.get_last_state_changes, self.hass,
largest_window_items, entity_id=self._entity))
history_list.extend(
[state for state in filter_history[self._entity]])
if largest_window_time > timedelta(seconds=0):
start = dt_util.utcnow() - largest_window_time
filter_history = await self.hass.async_add_job(partial(
history.state_changes_during_period, self.hass,
start, entity_id=self._entity))
history_list.extend(
[state for state in filter_history[self._entity]
if state not in history_list])
# Sort the window states
history_list = sorted(history_list, key=lambda s: s.last_updated)
_LOGGER.debug("Loading from history: %s",
[(s.state, s.last_updated) for s in history_list])
# Replay history through the filter chain
prev_state = None
for state in history_list:
filter_sensor_state_listener(
self._entity, prev_state, state, False)
prev_state = state
async_track_state_change(
self.hass, self._entity, filter_sensor_state_listener)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {
ATTR_ENTITY_ID: self._entity
}
return state_attr
class FilterState:
"""State abstraction for filter usage."""
def __init__(self, state):
"""Initialize with HA State object."""
self.timestamp = state.last_updated
try:
self.state = float(state.state)
except ValueError:
self.state = state.state
def set_precision(self, precision):
"""Set precision of Number based states."""
if isinstance(self.state, Number):
self.state = round(float(self.state), precision)
def __str__(self):
"""Return state as the string representation of FilterState."""
return str(self.state)
def __repr__(self):
"""Return timestamp and state as the representation of FilterState."""
return "{} : {}".format(self.timestamp, self.state)
class Filter:
"""Filter skeleton.
Args:
window_size (int): size of the sliding window that holds previous
values
precision (int): round filtered value to precision value
entity (string): used for debugging only
"""
def __init__(self, name, window_size=1, precision=None, entity=None):
"""Initialize common attributes."""
if isinstance(window_size, int):
self.states = deque(maxlen=window_size)
self.window_unit = WINDOW_SIZE_UNIT_NUMBER_EVENTS
else:
self.states = deque(maxlen=0)
self.window_unit = WINDOW_SIZE_UNIT_TIME
self.precision = precision
self._name = name
self._entity = entity
self._skip_processing = False
self._window_size = window_size
self._store_raw = False
@property
def window_size(self):
"""Return window size."""
return self._window_size
@property
def name(self):
"""Return filter name."""
return self._name
@property
def skip_processing(self):
"""Return wether the current filter_state should be skipped."""
return self._skip_processing
def _filter_state(self, new_state):
"""Implement filter."""
raise NotImplementedError()
def filter_state(self, new_state):
"""Implement a common interface for filters."""
filtered = self._filter_state(FilterState(new_state))
filtered.set_precision(self.precision)
if self._store_raw:
self.states.append(copy(FilterState(new_state)))
else:
self.states.append(copy(filtered))
new_state.state = filtered.state
return new_state
@FILTERS.register(FILTER_NAME_RANGE)
class RangeFilter(Filter):
"""Range filter.
Determines if new state is in the range of upper_bound and lower_bound.
If not inside, lower or upper bound is returned instead.
Args:
upper_bound (float): band upper bound
lower_bound (float): band lower bound
"""
def __init__(self, entity,
lower_bound=None, upper_bound=None):
"""Initialize Filter."""
super().__init__(FILTER_NAME_RANGE, entity=entity)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._stats_internal = Counter()
def _filter_state(self, new_state):
"""Implement the range filter."""
if (self._upper_bound is not None
and new_state.state > self._upper_bound):
self._stats_internal['erasures_up'] += 1
_LOGGER.debug("Upper outlier nr. %s in %s: %s",
self._stats_internal['erasures_up'],
self._entity, new_state)
new_state.state = self._upper_bound
elif (self._lower_bound is not None
and new_state.state < self._lower_bound):
self._stats_internal['erasures_low'] += 1
_LOGGER.debug("Lower outlier nr. %s in %s: %s",
self._stats_internal['erasures_low'],
self._entity, new_state)
new_state.state = self._lower_bound
return new_state
@FILTERS.register(FILTER_NAME_OUTLIER)
class OutlierFilter(Filter):
"""BASIC outlier filter.
Determines if new state is in a band around the median.
Args:
radius (float): band radius
"""
def __init__(self, window_size, precision, entity, radius):
"""Initialize Filter."""
super().__init__(FILTER_NAME_OUTLIER, window_size, precision, entity)
self._radius = radius
self._stats_internal = Counter()
self._store_raw = True
def _filter_state(self, new_state):
"""Implement the outlier filter."""
median = statistics.median([s.state for s in self.states]) \
if self.states else 0
if (len(self.states) == self.states.maxlen and
abs(new_state.state - median) >
self._radius):
self._stats_internal['erasures'] += 1
_LOGGER.debug("Outlier nr. %s in %s: %s",
self._stats_internal['erasures'],
self._entity, new_state)
new_state.state = median
return new_state
@FILTERS.register(FILTER_NAME_LOWPASS)
class LowPassFilter(Filter):
"""BASIC Low Pass Filter.
Args:
time_constant (int): time constant.
"""
def __init__(self, window_size, precision, entity, time_constant):
"""Initialize Filter."""
super().__init__(FILTER_NAME_LOWPASS, window_size, precision, entity)
self._time_constant = time_constant
def _filter_state(self, new_state):
"""Implement the low pass filter."""
if not self.states:
return new_state
new_weight = 1.0 / self._time_constant
prev_weight = 1.0 - new_weight
new_state.state = prev_weight * self.states[-1].state +\
new_weight * new_state.state
return new_state
@FILTERS.register(FILTER_NAME_TIME_SMA)
class TimeSMAFilter(Filter):
"""Simple Moving Average (SMA) Filter.
The window_size is determined by time, and SMA is time weighted.
Args:
type (enum): type of algorithm used to connect discrete values
"""
def __init__(self, window_size, precision, entity,
type): # pylint: disable=redefined-builtin
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_SMA, window_size, precision, entity)
self._time_window = window_size
self.last_leak = None
self.queue = deque()
def _leak(self, left_boundary):
"""Remove timeouted elements."""
while self.queue:
if self.queue[0].timestamp + self._time_window <= left_boundary:
self.last_leak = self.queue.popleft()
else:
return
def _filter_state(self, new_state):
"""Implement the Simple Moving Average filter."""
self._leak(new_state.timestamp)
self.queue.append(copy(new_state))
moving_sum = 0
start = new_state.timestamp - self._time_window
prev_state = self.last_leak or self.queue[0]
for state in self.queue:
moving_sum += (state.timestamp-start).total_seconds()\
* prev_state.state
start = state.timestamp
prev_state = state
new_state.state = moving_sum / self._time_window.total_seconds()
return new_state
@FILTERS.register(FILTER_NAME_THROTTLE)
class ThrottleFilter(Filter):
"""Throttle Filter.
One sample per window.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_THROTTLE, window_size, precision, entity)
def _filter_state(self, new_state):
"""Implement the throttle filter."""
if not self.states or len(self.states) == self.states.maxlen:
self.states.clear()
self._skip_processing = False
else:
self._skip_processing = True
return new_state
@FILTERS.register(FILTER_NAME_TIME_THROTTLE)
class TimeThrottleFilter(Filter):
"""Time Throttle Filter.
One sample per time period.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_THROTTLE,
window_size, precision, entity)
self._time_window = window_size
self._last_emitted_at = None
def _filter_state(self, new_state):
"""Implement the filter."""
window_start = new_state.timestamp - self._time_window
if not self._last_emitted_at or self._last_emitted_at <= window_start:
self._last_emitted_at = new_state.timestamp
self._skip_processing = False
else:
self._skip_processing = True
return new_state
|
|
# -*- coding: utf-8 -*-
# This software is distributed under the two-clause BSD license.
# Copyright (c) The django-ldapdb project
import collections
import re
import ldap
from django.db.models import aggregates
from django.db.models.sql import compiler
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.db.models.sql.where import AND, OR, WhereNode
from ldapdb import escape_ldap_filter
from ldapdb.models.fields import ListField
_ORDER_BY_LIMIT_OFFSET_RE = re.compile(
r'(?:\bORDER BY\b\s+(.+?))?\s*(?:\bLIMIT\b\s+(-?\d+))?\s*(?:\bOFFSET\b\s+(\d+))?$')
class LdapDBError(Exception):
"""Base class for LDAPDB errors."""
LdapLookup = collections.namedtuple('LdapLookup', ['base', 'scope', 'filterstr'])
def query_as_ldap(query, compiler, connection):
"""Convert a django.db.models.sql.query.Query to a LdapLookup."""
if query.is_empty():
return
if query.model._meta.model_name == 'migration' and not hasattr(query.model, 'object_classes'):
# FIXME(rbarrois): Support migrations
return
# FIXME(rbarrois): this could be an extra Where clause
filterstr = ''.join(['(objectClass=%s)' % cls for cls in
query.model.object_classes])
# FIXME(rbarrois): Remove this code as part of #101
if (len(query.where.children) == 1
and not isinstance(query.where.children[0], WhereNode)
and query.where.children[0].lhs.target.column == 'dn'):
lookup = query.where.children[0]
if lookup.lookup_name != 'exact':
raise LdapDBError("Unsupported dn lookup: %s" % lookup.lookup_name)
return LdapLookup(
base=lookup.rhs,
scope=ldap.SCOPE_BASE,
filterstr='(&%s)' % filterstr,
)
sql, params = compiler.compile(query.where)
if sql:
filterstr += '(%s)' % (sql % tuple(escape_ldap_filter(param) for param in params))
return LdapLookup(
base=query.model.base_dn,
scope=query.model.search_scope,
filterstr='(&%s)' % filterstr,
)
def where_node_as_ldap(where, compiler, connection):
"""Parse a django.db.models.sql.where.WhereNode.
Returns:
(clause, [params]): the filter clause, with a list of unescaped parameters.
"""
bits, params = [], []
for item in where.children:
if isinstance(item, WhereNode):
clause, clause_params = compiler.compile(item)
else:
clause, clause_params = item.as_sql(compiler, connection)
bits.append(clause)
params.extend(clause_params)
if not bits:
return '', []
# FIXME(rbarrois): shouldn't we flatten recursive AND / OR?
if len(bits) == 1:
clause = bits[0]
elif where.connector == AND:
clause = '&' + ''.join('(%s)' % bit for bit in bits)
elif where.connector == OR:
clause = '|' + ''.join('(%s)' % bit for bit in bits)
else:
raise LdapDBError("Unhandled WHERE connector: %s" % where.connector)
if where.negated:
clause = ('!(%s)' % clause)
return clause, params
class SQLCompiler(compiler.SQLCompiler):
"""LDAP-based SQL compiler."""
def compile(self, node, *args, **kwargs):
"""Parse a WhereNode to a LDAP filter string."""
if isinstance(node, WhereNode):
return where_node_as_ldap(node, self, self.connection)
return super().compile(node, *args, **kwargs)
def execute_sql(self, result_type=compiler.SINGLE, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
if result_type != compiler.SINGLE:
raise Exception("LDAP does not support MULTI queries")
# Setup self.select, self.klass_info, self.annotation_col_map
# All expected from ModelIterable.__iter__
self.pre_sql_setup()
lookup = query_as_ldap(self.query, compiler=self, connection=self.connection)
if lookup is None:
return
try:
vals = self.connection.search_s(
base=lookup.base,
scope=lookup.scope,
filterstr=lookup.filterstr,
attrlist=['dn'],
)
# Flatten iterator
vals = list(vals)
except ldap.NO_SUCH_OBJECT:
vals = []
if not vals:
return None
output = []
self.setup_query()
for e in self.select:
if isinstance(e[0], aggregates.Count):
# Check if the SQL query has a limit value and append
# that value, else append the length of the return values
# from LDAP.
sql = self.as_sql()[0]
if hasattr(self.query, 'subquery') and self.query.subquery:
sql = self.query.subquery
m = _ORDER_BY_LIMIT_OFFSET_RE.search(sql)
limit = m.group(2)
offset = m.group(3)
if limit and int(limit) >= 0:
output.append(int(limit))
elif offset:
output.append(len(vals) - int(offset))
else:
output.append(len(vals))
else:
output.append(e[0])
return output
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
lookup = query_as_ldap(self.query, compiler=self, connection=self.connection)
if lookup is None:
return
if len(self.query.select):
fields = [x.field for x in self.query.select]
else:
fields = self.query.model._meta.fields
attrlist = [x.db_column for x in fields if x.db_column]
try:
vals = self.connection.search_s(
base=lookup.base,
scope=lookup.scope,
filterstr=lookup.filterstr,
attrlist=attrlist,
)
except ldap.NO_SUCH_OBJECT:
return
# perform sorting
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.model._meta.ordering
for fieldname in reversed(ordering):
if fieldname.startswith('-'):
sort_field = fieldname[1:]
reverse = True
else:
sort_field = fieldname
reverse = False
if sort_field == 'pk':
sort_field = self.query.model._meta.pk.name
field = self.query.model._meta.get_field(sort_field)
if sort_field == 'dn':
vals = sorted(vals, key=lambda pair: pair[0], reverse=reverse)
else:
def get_key(obj):
attr = field.from_ldap(
obj[1].get(field.db_column, []),
connection=self.connection,
)
if hasattr(attr, 'lower'):
attr = attr.lower()
return attr
vals = sorted(vals, key=get_key, reverse=reverse)
# process results
pos = 0
results = []
for dn, attrs in vals:
# FIXME : This is not optimal, we retrieve more results than we
# need but there is probably no other options as we can't perform
# ordering server side.
if (self.query.low_mark and pos < self.query.low_mark) or \
(self.query.high_mark is not None
and pos >= self.query.high_mark):
pos += 1
continue
row = []
self.setup_query()
for e in self.select:
if isinstance(e[0], aggregates.Count):
value = 0
input_field = e[0].get_source_expressions()[0].field
if input_field.attname == 'dn':
value = 1
elif hasattr(input_field, 'from_ldap'):
result = input_field.from_ldap(
attrs.get(input_field.db_column, []),
connection=self.connection)
if result:
value = 1
if isinstance(input_field, ListField):
value = len(result)
row.append(value)
else:
if e[0].field.attname == 'dn':
row.append(dn)
elif hasattr(e[0].field, 'from_ldap'):
row.append(e[0].field.from_ldap(
attrs.get(e[0].field.db_column, []),
connection=self.connection))
else:
row.append(None)
if self.query.distinct:
if row in results:
continue
else:
results.append(row)
yield row
pos += 1
def has_results(self):
import inspect
iterator = self.results_iter()
if inspect.isgenerator(iterator):
try:
next(iterator)
return True
except StopIteration:
return False
else:
return False
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
def execute_sql(self, result_type=compiler.MULTI):
lookup = query_as_ldap(self.query, compiler=self, connection=self.connection)
if not lookup:
return
try:
vals = self.connection.search_s(
base=lookup.base,
scope=lookup.scope,
filterstr=lookup.filterstr,
attrlist=['dn'],
)
except ldap.NO_SUCH_OBJECT:
return
# FIXME : there is probably a more efficient way to do this
for dn, attrs in vals:
self.connection.delete_s(dn)
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
def execute_sql(self, result_type=compiler.SINGLE):
# Return only number values through the aggregate compiler
output = super().execute_sql(result_type)
return filter(lambda a: isinstance(a, int), output)
|
|
import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.db.models.lookups import Exact, In
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
# For performance, only add a from_db_value() method if the base field
# implements it.
if hasattr(self.base_field, 'from_db_value'):
self.from_db_value = self._from_db_value
super(ArrayField, self).__init__(**kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field.clone(),
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, str):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def _from_db_value(self, value, expression, connection, context):
if value is None:
return value
return [
self.base_field.from_db_value(item, expression, connection, context)
for item in value
]
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
if val is None:
values.append(None)
else:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
if '_' not in name:
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for index, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super(ArrayField, self).run_validators(value)
for index, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super(ArrayContains, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super(ArrayContainedBy, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayExact(Exact):
def as_sql(self, qn, connection):
sql, params = super(ArrayExact, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super(ArrayOverlap, self).as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
# Distinguish NULL and empty arrays
return (
'CASE WHEN %(lhs)s IS NULL THEN NULL ELSE '
'coalesce(array_length(%(lhs)s, 1), 0) END'
) % {'lhs': lhs}, params
@ArrayField.register_lookup
class ArrayInLookup(In):
def get_prep_lookup(self):
values = super(ArrayInLookup, self).get_prep_lookup()
# In.process_rhs() expects values to be hashable, so convert lists
# to tuples.
prepared_values = []
for value in values:
if hasattr(value, 'resolve_expression'):
prepared_values.append(value)
else:
prepared_values.append(tuple(value))
return prepared_values
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory:
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory:
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
|
|
import abc
from functools import partial
import numpy
from numpy.testing import (assert_array_equal, assert_array_almost_equal)
from .utils import (
create_data_container, compare_data_containers, compare_lattice_nodes)
from ..cuds.lattice import make_triclinic_lattice
from ..cuds.lattice_items import LatticeNode
from ..core import CUBA
from ..core.data_container import DataContainer
from ..cuds.primitive_cell import (BravaisLattice, PrimitiveCell)
class CheckLatticeContainer(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.addTypeEqualityFunc(
LatticeNode, partial(compare_lattice_nodes, testcase=self))
self.primitive_cell = PrimitiveCell.for_cubic_lattice(0.2)
self.size = (5, 10, 15)
self.origin = (-2.0, 0.0, 1.0)
self.container = self.container_factory(
'my_name', self.primitive_cell, self.size, self.origin)
@abc.abstractmethod
def container_factory(self, name, primitive_cell, size, origin):
""" Create and return a lattice.
"""
@abc.abstractmethod
def supported_cuba(self):
""" Return a list of CUBA keys to use for restricted containers.
"""
def test_lattice_properties(self):
container = self.container
# check values
self.assertEqual(container.primitive_cell.bravais_lattice,
BravaisLattice.CUBIC)
self.assertEqual(container.name, 'my_name')
assert_array_equal(container.size, self.size)
assert_array_equal(container.origin, self.origin)
# check read-only
with self.assertRaises(AttributeError):
container.primitive_cell.bravais_lattice = BravaisLattice.CUBIC
with self.assertRaises(AttributeError):
container.size = self.size
with self.assertRaises(AttributeError):
container.origin = self.origin
with self.assertRaises(AttributeError):
container.primitive_cell = self.primitive_cell
def test_container_name(self):
# given/when
container = self.container
# then
self.assertEqual(container.name, 'my_name')
def test_container_name_update(self):
# given
container = self.container
# when
container.name = 'new'
# then
self.assertEqual(container.name, 'new')
def test_container_data(self):
# when
container = self.container
# then
self.assertEqual(container.data, DataContainer())
def test_container_data_update(self):
# given
container = self.container
data = create_data_container(restrict=self.supported_cuba())
# when
container.data = data
# then
self.assertEqual(container.data, data)
self.assertIsNot(container.data, data)
def test_container_data_update_with_unsupported_cuba(self):
# given
container = self.container
data = create_data_container()
expected_data = create_data_container(restrict=self.supported_cuba())
# when
container.data = data
# then
self.assertEqual(container.data, expected_data)
self.assertIsNot(container.data, expected_data)
class CheckLatticeNodeOperations(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.addTypeEqualityFunc(
LatticeNode, partial(compare_lattice_nodes, testcase=self))
self.primitive_cell = PrimitiveCell.for_cubic_lattice(0.2)
self.size = (5, 10, 15)
self.origin = (-2.0, 0.0, 1.0)
self.container = self.container_factory(
'my_name', self.primitive_cell, self.size, self.origin)
@abc.abstractmethod
def container_factory(self, name, primitive_cell, size, origin):
""" Create and return a lattice.
"""
@abc.abstractmethod
def supported_cuba(self):
""" Return a list of CUBA keys to use for restricted containers.
"""
def test_iter_nodes(self):
container = self.container
# number of nodes
number_of_nodes = sum(1 for node in container.iter(
item_type=CUBA.NODE))
self.assertEqual(number_of_nodes, numpy.prod(self.size))
# data
for node in container.iter(item_type=CUBA.NODE):
self.assertEqual(node.data, DataContainer())
# indexes
x, y, z = numpy.meshgrid(
range(self.size[0]), range(self.size[1]), range(self.size[2]))
expected = set(zip(x.flat, y.flat, z.flat))
indexes = {node.index for node in container.iter(item_type=CUBA.NODE)}
self.assertEqual(indexes, expected)
def test_iter_nodes_subset(self):
container = self.container
x, y, z = numpy.meshgrid(
range(2, self.size[0]),
range(self.size[1]-4),
range(3, self.size[2], 2))
expected = set(zip(x.flat, y.flat, z.flat))
# data
for node in container.iter(expected, item_type=CUBA.NODE):
self.assertEqual(node.data, DataContainer())
# indexes
indexes = {node.index for node in container.iter(expected,
item_type=CUBA.NODE)}
self.assertEqual(indexes, expected)
def test_get_node(self):
container = self.container
index = 2, 3, 4
node = container.get(index)
expected = LatticeNode(index)
self.assertEqual(node, expected)
# check that mutating the node does not change internal info
node.data = create_data_container()
self.assertNotEqual(container.get(index), node)
def test_get_node_with_invalid_index(self):
container = self.container
index = 2, 300, 4
with self.assertRaises(IndexError):
container.get(index)
index = 2, 3, -4
with self.assertRaises(IndexError):
container.get(index)
def test_update_nodes_with_invalid_index(self):
container = self.container
index = 2, 3, 4
node = container.get(index)
node.index = 2, 300, 4
with self.assertRaises(IndexError):
container.update((node,))
node.index = 2, 3, -4
with self.assertRaises(IndexError):
container.update((node,))
def test_update_nodes(self):
container = self.container
indices = ((2, 3, 4), (1, 2, 3))
nodes = [container.get(index) for index in indices]
for node in nodes:
node.data = create_data_container(restrict=self.supported_cuba())
container.update(nodes)
for n in xrange(len(indices)):
index = indices[n]
new_node = container.get(index)
self.assertEqual(new_node, nodes[n])
# Check that `new_node` is not the same instance as `node`
self.assertIsNot(new_node, nodes[n])
def test_update_nodes_with_extra_keywords(self):
container = self.container
indices = ((2, 3, 4), (1, 2, 3))
nodes = [container.get(index) for index in indices]
# Update with full DataContainer.
for node in nodes:
node.data = create_data_container()
container.update(nodes)
for n in xrange(len(indices)):
index = indices[n]
new_node = container.get(index)
# We expect only the supported CUBA to be stored.
expected = LatticeNode(
index=nodes[n].index,
data=create_data_container(restrict=self.supported_cuba()))
self.assertEqual(new_node, expected)
# Check that `new_node` is not the same instance as `node`
self.assertIsNot(new_node, nodes[n])
def test_count_of_nodes(self):
# given
container = self.container
# then
count_original = reduce(lambda x, y: x*y, self.size)
count_container = container.count_of(CUBA.NODE)
self.assertEqual(count_original, count_container)
def test_count_of_nodes_passing_unsupported_type(self):
# given
container = self.container
# then
with self.assertRaises(ValueError):
container.count_of(CUBA.EDGE)
class CheckLatticeNodeCoordinates(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def container_factory(self, name, primitive_cell, size, origin):
""" Create and return a lattice.
"""
@abc.abstractmethod
def supported_cuba(self):
""" Return a list of CUBA keys to use for restricted containers.
"""
def test_get_coordinate(self):
""" ABCLattice.get_coordinate is the same for all lattices, therefore
tested only once.
"""
default = make_triclinic_lattice(
'Lattice3', (0.2, 0.4, 0.9), (0.8, 0.4, 0.5), (5, 10, 15),
(-2.0, 0.0, 1.0))
container = self.container_factory(
default.name, default.primitive_cell, default.size,
default.origin)
p1 = default.primitive_cell.p1
p2 = default.primitive_cell.p2
p3 = default.primitive_cell.p3
x, y, z = numpy.meshgrid(range(
default.size[0]), range(default.size[1]), range(default.size[2]))
indexes = zip(x.flat, y.flat, z.flat)
expected = zip(x.ravel()*p1[0] + y.ravel()*p2[0] + z.ravel()*p3[0] +
default.origin[0], x.ravel()*p1[1] + y.ravel()*p2[1] +
z.ravel()*p3[1] + default.origin[1], x.ravel()*p1[2] +
y.ravel()*p2[2] + z.ravel()*p3[2] + default.origin[2])
for i, index in enumerate(indexes):
assert_array_almost_equal(container.get_coordinate(index),
expected[i])
|
|
import functools
from importlib import import_module
from django.utils.html import conditional_escape
from django.utils.inspect import getargspec
from django.utils.itercompat import is_iterable
from .base import Node, Template, token_kwargs
from .exceptions import TemplateSyntaxError
class InvalidTemplateLibrary(Exception):
pass
class Library:
"""
A class for registering template tags and filters. Compiled filter and
template tag functions are stored in the filters and tags attributes.
The filter, simple_tag, and inclusion_tag methods provide a convenient
way to register callables as tags.
"""
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise ValueError(
"Unsupported arguments to Library.tag: (%r, %r)" %
(name, compile_function),
)
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
"""
Register a callable as a template filter. Example:
@register.filter
def lower(value):
return value.lower()
"""
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it, e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise ValueError(
"Unsupported arguments to Library.filter: (%r, %r)" %
(name, filter_func),
)
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
"""
Register a callable as a compiled template tag. Example:
@register.simple_tag
def hello(*args, **kwargs):
return 'world'
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == 'as':
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name
)
return SimpleNode(func, takes_context, args, kwargs, target_var)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_tag")
def inclusion_tag(self, filename, func=None, takes_context=None, name=None):
"""
Register a callable as an inclusion tag:
@register.inclusion_tag('results.html')
def show_results(poll):
choices = poll.choice_set.all()
return {'choices': choices}
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name,
)
return InclusionNode(
func, takes_context, args, kwargs, filename,
)
self.tag(function_name, compile_func)
return func
return dec
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode and InclusionNode.
Manages the positional and keyword arguments to be passed to the decorated
function.
"""
def __init__(self, func, takes_context, args, kwargs):
self.func = func
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class SimpleNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, target_var):
super(SimpleNode, self).__init__(func, takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
output = self.func(*resolved_args, **resolved_kwargs)
if self.target_var is not None:
context[self.target_var] = output
return ''
if context.autoescape:
output = conditional_escape(output)
return output
class InclusionNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, filename):
super(InclusionNode, self).__init__(func, takes_context, args, kwargs)
self.filename = filename
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = self.func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(self.filename, Template):
t = self.filename
elif isinstance(getattr(self.filename, 'template', None), Template):
t = self.filename.template
elif not isinstance(self.filename, str) and is_iterable(self.filename):
t = context.template.engine.select_template(self.filename)
else:
t = context.template.engine.get_template(self.filename)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because inclusion tags are
# often used for forms, and we need instructions for using CSRF
# protection to be as simple as possible.
csrf_token = context.get('csrf_token')
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parse bits for template tag helpers simple_tag and inclusion_tag, in
particular by detecting syntax errors and by extracting positional and
keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.popitem()
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def import_library(name):
"""
Load a Library object from a template tag module.
"""
try:
module = import_module(name)
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (name, e)
)
try:
return module.register
except AttributeError:
raise InvalidTemplateLibrary(
"Module %s does not have a variable named 'register'" % name,
)
|
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Cinder API.
"""
from __future__ import print_function
import argparse
import glob
import imp
import itertools
import os
import pkgutil
import sys
import logging
import six
from cinderclient import client
from cinderclient import exceptions as exc
import cinderclient.extension
from cinderclient.openstack.common import strutils
from cinderclient import utils
from cinderclient.v1 import shell as shell_v1
from cinderclient.v2 import shell as shell_v2
DEFAULT_OS_VOLUME_API_VERSION = "1"
DEFAULT_CINDER_ENDPOINT_TYPE = 'publicURL'
DEFAULT_CINDER_SERVICE_TYPE = 'volume'
logger = logging.getLogger(__name__)
class CinderClientArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(CinderClientArgumentParser, self).__init__(*args, **kwargs)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
"""
self.print_usage(sys.stderr)
#FIXME(lzyeval): if changes occur in argparse.ArgParser._check_value
choose_from = ' (choose from'
progparts = self.prog.partition(' ')
self.exit(2, "error: %(errmsg)s\nTry '%(mainp)s help %(subp)s'"
" for more information.\n" %
{'errmsg': message.split(choose_from)[0],
'mainp': progparts[0],
'subp': progparts[2]})
class OpenStackCinderShell(object):
def get_base_parser(self):
parser = CinderClientArgumentParser(
prog='cinder',
description=__doc__.strip(),
epilog='See "cinder help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=OpenStackHelpFormatter,
)
# Global arguments
parser.add_argument('-h', '--help',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--version',
action='version',
version=cinderclient.__version__)
parser.add_argument('--debug',
action='store_true',
default=utils.env('CINDERCLIENT_DEBUG',
default=False),
help="Print debugging output")
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=utils.env('OS_USERNAME',
'CINDER_USERNAME'),
help='Defaults to env[OS_USERNAME].')
parser.add_argument('--os_username',
help=argparse.SUPPRESS)
parser.add_argument('--os-password',
metavar='<auth-password>',
default=utils.env('OS_PASSWORD',
'CINDER_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_argument('--os_password',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=utils.env('OS_TENANT_NAME',
'CINDER_PROJECT_ID'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-id',
metavar='<auth-tenant-id>',
default=utils.env('OS_TENANT_ID',
'CINDER_TENANT_ID'),
help='Defaults to env[OS_TENANT_ID].')
parser.add_argument('--os_tenant_id',
help=argparse.SUPPRESS)
parser.add_argument('--os-auth-url',
metavar='<auth-url>',
default=utils.env('OS_AUTH_URL',
'CINDER_URL'),
help='Defaults to env[OS_AUTH_URL].')
parser.add_argument('--os_auth_url',
help=argparse.SUPPRESS)
parser.add_argument('--os-region-name',
metavar='<region-name>',
default=utils.env('OS_REGION_NAME',
'CINDER_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME].')
parser.add_argument('--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument('--service-type',
metavar='<service-type>',
help='Defaults to volume for most actions')
parser.add_argument('--service_type',
help=argparse.SUPPRESS)
parser.add_argument('--service-name',
metavar='<service-name>',
default=utils.env('CINDER_SERVICE_NAME'),
help='Defaults to env[CINDER_SERVICE_NAME]')
parser.add_argument('--service_name',
help=argparse.SUPPRESS)
parser.add_argument('--volume-service-name',
metavar='<volume-service-name>',
default=utils.env('CINDER_VOLUME_SERVICE_NAME'),
help='Defaults to env[CINDER_VOLUME_SERVICE_NAME]')
parser.add_argument('--volume_service_name',
help=argparse.SUPPRESS)
parser.add_argument('--endpoint-type',
metavar='<endpoint-type>',
default=utils.env('CINDER_ENDPOINT_TYPE',
default=DEFAULT_CINDER_ENDPOINT_TYPE),
help='Defaults to env[CINDER_ENDPOINT_TYPE] or '
+ DEFAULT_CINDER_ENDPOINT_TYPE + '.')
parser.add_argument('--endpoint_type',
help=argparse.SUPPRESS)
parser.add_argument('--os-volume-api-version',
metavar='<volume-api-ver>',
default=utils.env('OS_VOLUME_API_VERSION',
default=DEFAULT_OS_VOLUME_API_VERSION),
help='Accepts 1 or 2,defaults '
'to env[OS_VOLUME_API_VERSION].')
parser.add_argument('--os_volume_api_version',
help=argparse.SUPPRESS)
parser.add_argument('--os-cacert',
metavar='<ca-certificate>',
default=utils.env('OS_CACERT', default=None),
help='Specify a CA bundle file to use in '
'verifying a TLS (https) server certificate. '
'Defaults to env[OS_CACERT]')
parser.add_argument('--insecure',
default=utils.env('CINDERCLIENT_INSECURE',
default=False),
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--retries',
metavar='<retries>',
type=int,
default=0,
help='Number of retries.')
# FIXME(dtroyer): The args below are here for diablo compatibility,
# remove them in folsum cycle
# alias for --os-username, left in for backwards compatibility
parser.add_argument('--username',
help=argparse.SUPPRESS)
# alias for --os-region_name, left in for backwards compatibility
parser.add_argument('--region_name',
help=argparse.SUPPRESS)
# alias for --os-password, left in for backwards compatibility
parser.add_argument('--apikey', '--password', dest='apikey',
default=utils.env('CINDER_API_KEY'),
help=argparse.SUPPRESS)
# alias for --os-tenant-name, left in for backward compatibility
parser.add_argument('--projectid', '--tenant_name', dest='projectid',
default=utils.env('CINDER_PROJECT_ID'),
help=argparse.SUPPRESS)
# alias for --os-auth-url, left in for backward compatibility
parser.add_argument('--url', '--auth_url', dest='url',
default=utils.env('CINDER_URL'),
help=argparse.SUPPRESS)
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'1.1': shell_v1,
'2': shell_v2,
}[version]
except KeyError:
actions_module = shell_v1
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, self)
for extension in self.extensions:
self._find_actions(subparsers, extension.module)
self._add_bash_completion_subparser(subparsers)
return parser
def _discover_extensions(self, version):
extensions = []
for name, module in itertools.chain(
self._discover_via_python_path(version),
self._discover_via_contrib_path(version)):
extension = cinderclient.extension.Extension(name, module)
extensions.append(extension)
return extensions
def _discover_via_python_path(self, version):
for (module_loader, name, ispkg) in pkgutil.iter_modules():
if name.endswith('python_cinderclient_ext'):
if not hasattr(module_loader, 'load_module'):
# Python 2.6 compat: actually get an ImpImporter obj
module_loader = module_loader.find_module(name)
module = module_loader.load_module(name)
yield name, module
def _discover_via_contrib_path(self, version):
module_path = os.path.dirname(os.path.abspath(__file__))
version_str = "v%s" % version.replace('.', '_')
ext_path = os.path.join(module_path, version_str, 'contrib')
ext_glob = os.path.join(ext_path, "*.py")
for ext_path in glob.iglob(ext_glob):
name = os.path.basename(ext_path)[:-3]
if name == "__init__":
continue
module = imp.load_source(name, ext_path)
yield name, module
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser(
'bash_completion',
add_help=False,
formatter_class=OpenStackHelpFormatter)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hypen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(
command,
help=help,
description=desc,
add_help=False,
formatter_class=OpenStackHelpFormatter)
subparser.add_argument('-h', '--help',
action='help',
help=argparse.SUPPRESS,)
self.subcommands[command] = subparser
for (args, kwargs) in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def setup_debugging(self, debug):
if not debug:
return
streamhandler = logging.StreamHandler()
streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
streamhandler.setFormatter(logging.Formatter(streamformat))
logger.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
def main(self, argv):
# Parse args once to find version and debug settings
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
self.setup_debugging(options.debug)
# build available subcommands based on version
self.extensions = self._discover_extensions(
options.os_volume_api_version)
self._run_extension_hooks('__pre_parse_args__')
subcommand_parser = self.get_subcommand_parser(
options.os_volume_api_version)
self.parser = subcommand_parser
if options.help or not argv:
subcommand_parser.print_help()
return 0
args = subcommand_parser.parse_args(argv)
self._run_extension_hooks('__post_parse_args__', args)
# Short-circuit and deal with help right away.
if args.func == self.do_help:
self.do_help(args)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
(os_username, os_password, os_tenant_name, os_auth_url,
os_region_name, os_tenant_id, endpoint_type, insecure,
service_type, service_name, volume_service_name,
username, apikey, projectid, url, region_name, cacert) = (
args.os_username, args.os_password,
args.os_tenant_name, args.os_auth_url,
args.os_region_name, args.os_tenant_id,
args.endpoint_type, args.insecure,
args.service_type, args.service_name,
args.volume_service_name, args.username,
args.apikey, args.projectid,
args.url, args.region_name, args.os_cacert)
if not endpoint_type:
endpoint_type = DEFAULT_CINDER_ENDPOINT_TYPE
if not service_type:
service_type = DEFAULT_CINDER_SERVICE_TYPE
service_type = utils.get_service_type(args.func) or service_type
#FIXME(usrleon): Here should be restrict for project id same as
# for os_username or os_password but for compatibility it is not.
if not utils.isunauthenticated(args.func):
if not os_username:
if not username:
raise exc.CommandError(
"You must provide a username "
"via either --os-username or env[OS_USERNAME]")
else:
os_username = username
if not os_password:
if not apikey:
raise exc.CommandError("You must provide a password "
"via either --os-password or via "
"env[OS_PASSWORD]")
else:
os_password = apikey
if not (os_tenant_name or os_tenant_id):
if not projectid:
raise exc.CommandError("You must provide a tenant_id "
"via either --os-tenant-id or "
"env[OS_TENANT_ID]")
else:
os_tenant_name = projectid
if not os_auth_url:
if not url:
raise exc.CommandError(
"You must provide an auth url "
"via either --os-auth-url or env[OS_AUTH_URL]")
else:
os_auth_url = url
if not os_region_name and region_name:
os_region_name = region_name
if not (os_tenant_name or os_tenant_id):
raise exc.CommandError(
"You must provide a tenant_id "
"via either --os-tenant-id or env[OS_TENANT_ID]")
if not os_auth_url:
raise exc.CommandError(
"You must provide an auth url "
"via either --os-auth-url or env[OS_AUTH_URL]")
self.cs = client.Client(options.os_volume_api_version, os_username,
os_password, os_tenant_name, os_auth_url,
insecure, region_name=os_region_name,
tenant_id=os_tenant_id,
endpoint_type=endpoint_type,
extensions=self.extensions,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
retries=options.retries,
http_log_debug=args.debug,
cacert=cacert)
try:
if not utils.isunauthenticated(args.func):
self.cs.authenticate()
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Cinder credentials.")
except exc.AuthorizationFailure:
raise exc.CommandError("Unable to authorize user")
endpoint_api_version = self.cs.get_volume_api_version_from_endpoint()
if endpoint_api_version != options.os_volume_api_version:
msg = (("Volume API version is set to %s "
"but you are accessing a %s endpoint. "
"Change its value via either --os-volume-api-version "
"or env[OS_VOLUME_API_VERSION]")
% (options.os_volume_api_version, endpoint_api_version))
raise exc.InvalidAPIVersion(msg)
args.func(self.cs, args)
def _run_extension_hooks(self, hook_type, *args, **kwargs):
"""Run hooks for all registered extensions."""
for extension in self.extensions:
extension.run_hooks(hook_type, *args, **kwargs)
def do_bash_completion(self, args):
"""Print arguments for bash_completion.
Prints all of the commands and options to stdout so that the
cinder.bash_completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for sc_str, sc in list(self.subcommands.items()):
commands.add(sc_str)
for option in list(sc._optionals._option_string_actions.keys()):
options.add(option)
commands.remove('bash-completion')
commands.remove('bash_completion')
print(' '.join(commands | options))
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""
Display help about this program or one of its subcommands.
"""
if args.command:
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
self.parser.print_help()
# I'm picky about my shell help.
class OpenStackHelpFormatter(argparse.HelpFormatter):
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(OpenStackHelpFormatter, self).start_section(heading)
def main():
try:
if sys.version_info >= (3, 0):
OpenStackCinderShell().main(sys.argv[1:])
else:
OpenStackCinderShell().main(map(strutils.safe_decode,
sys.argv[1:]))
except KeyboardInterrupt:
print("... terminating cinder client", file=sys.stderr)
sys.exit(130)
except Exception as e:
logger.debug(e, exc_info=1)
message = e.message
if not isinstance(message, six.string_types):
message = str(message)
print("ERROR: %s" % strutils.safe_encode(message), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
|
from .models import Incident, Crisis, ResponseUnit, PokemonDB, Pokemon, Trainer, Shelter # noqa
from .serializers import IncidentSerializer, CrisisSerializer, \
ResponseUnitSerializer, PokemonSerializer, PokemonDBSerializer, \
TrainerSerializer, UserSerializer, ShelterSerializer, LoginSerializer # noqa
from rest_framework import generics
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.authtoken.models import Token
from rest_framework.authentication import TokenAuthentication, BasicAuthentication # noqa
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated # noqa
from rest_framework.views import APIView
from django.http import HttpResponse
from django.template import loader
from cms.email.email import send_mailv4_to_responseunit
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'user': reverse('cms:user_list', request=request, format=format),
'incident': reverse('cms:incident_list', request=request, format=format), # noqa
'crisis': reverse('cms:crisis_list', request=request, format=format),
'responseunit': reverse('cms:responseunit_list', request=request, format=format), # noqa
'pokemon': reverse('cms:pokemon_list', request=request, format=format),
'pokemondb': reverse('cms:pokemondb_list', request=request, format=format), # noqa
'shelter': reverse('cms:shelter_list', request=request, format=format),
})
class Auth(APIView):
authentication_classes = (TokenAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
content = {
'user': str(request.user),
'auth': str(request.auth)
}
return Response(content)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class CurrentUser(APIView):
"""
Get current user from request (based on Authorization token).
=> Used to do re-login eg. after page reload.
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
currentUser = request.user
groups = currentUser.groups.all().values('name')
if currentUser and groups:
group_names = [v for v in groups.values()]
return Response(
{
'username': currentUser.username,
'groups': group_names
},
status=status.HTTP_200_OK
)
return Response(
{'error': 'Could not get current user'},
status=status.HTTP_400_BAD_REQUEST
)
class LoginView(APIView):
def post(self, request, format=None):
serializer = LoginSerializer(data=request.data)
if serializer.is_valid():
user = authenticate(
username=serializer.validated_data['username'],
password=serializer.validated_data['password']
)
if user:
groups = user.groups.all().values('name')
group_names = [v for v in groups.values()]
token, created = Token.objects.get_or_create(user=user)
return Response(
{
'token': token.key,
'username': user.username,
'groups': group_names
},
status=status.HTTP_200_OK
)
return Response(
{'error': 'LOGIN_INVALID_CREDENTIALS'},
status=status.HTTP_400_BAD_REQUEST
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class IncidentList(generics.ListCreateAPIView):
queryset = Incident.objects.all()
serializer_class = IncidentSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
# permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
# def perform_create(self, serializer):
# serializer.save(owner=self.request.user)
class IncidentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Incident.objects.all()
serializer_class = IncidentSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
class HandleIncident(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
incident = Incident.objects.get(pk=pk)
# First try to attach RU with same area and type
RUsByAreaAndType = ResponseUnit.objects.filter(area=incident.area, speciality=incident.type) # noqa
if RUsByAreaAndType:
incident.handle_by = RUsByAreaAndType[0]
incident.save()
try:
print('[EMAIL] send response unit email')
send_mailv4_to_responseunit(
incident, [incident.handle_by.email])
except Exception as e:
print(e)
print("response unit email not sending")
serializer = IncidentSerializer(incident)
return Response(serializer.data)
# Then by only type
RUsByType = ResponseUnit.objects.filter(speciality=incident.type) # noqa
if RUsByType:
incident.handle_by = RUsByType[0]
incident.save()
try:
print('[EMAIL] send response unit email')
send_mailv4_to_responseunit(
incident, [incident.handle_by.email])
except Exception as e:
print(e)
print("response unit email not sending")
serializer = IncidentSerializer(incident)
return Response(serializer.data)
return Response(
{'error': 'could not attach response unit'},
status=status.HTTP_400_BAD_REQUEST
)
class CrisisList(generics.ListCreateAPIView):
queryset = Crisis.objects.all()
serializer_class = CrisisSerializer
class CrisisDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Crisis.objects.all()
serializer_class = CrisisSerializer
class CurrentCrisis(APIView):
"""
Get the current crisis (inactive or active).
If crisis does not exist, create new current crisis.
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request):
currentCrisis, created = Crisis.objects.get_or_create(
status='ACT',
defaults={
'title': 'crisis',
'description': 'automatically created crisis'
},
)
serializer = CrisisSerializer(currentCrisis)
return Response(serializer.data)
class ResponseUnitList(generics.ListCreateAPIView):
queryset = ResponseUnit.objects.all()
serializer_class = ResponseUnitSerializer
class ResponseUnitDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = ResponseUnit.objects.all()
serializer_class = ResponseUnitSerializer
class PokemonDBList(generics.ListCreateAPIView):
queryset = PokemonDB.objects.all()
serializer_class = PokemonDBSerializer
class PokemonDBDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = PokemonDB.objects.all()
serializer_class = PokemonDBSerializer
class PokemonList(generics.ListCreateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class TrainerList(generics.ListCreateAPIView):
queryset = Trainer.objects.all()
serializer_class = TrainerSerializer
class TrainerDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Trainer.objects.all()
serializer_class = TrainerSerializer
class ShelterList(generics.ListAPIView):
queryset = Shelter.objects.all()
serializer_class = ShelterSerializer
class ShelterDetails(generics.RetrieveUpdateDestroyAPIView):
queryset = Shelter.objects.all()
serializer_class = ShelterSerializer
def report(request):
incident_list = Incident.objects.all()
context = {
'incident_list': incident_list,
}
template = loader.get_template()
return HttpResponse(template.render(context, request))
|
|
import unittest
import merge_active_shadow
from tools.api_proto_plugin import type_context as api_type_context
from google.protobuf import descriptor_pb2
from google.protobuf import text_format
class MergeActiveShadowTest(unittest.TestCase):
# Dummy type context for tests that don't care about this.
def fakeTypeContext(self):
fake_source_code_info = descriptor_pb2.SourceCodeInfo()
source_code_info = api_type_context.SourceCodeInfo('fake', fake_source_code_info)
return api_type_context.TypeContext(source_code_info, 'fake_package')
# Poor man's text proto equivalence. Tensorflow has better tools for this,
# i.e. assertProto2Equal.
def assertTextProtoEq(self, lhs, rhs):
self.assertMultiLineEqual(lhs.strip(), rhs.strip())
def testAdjustReservedRange(self):
"""AdjustReservedRange removes specified skip_reserved_numbers."""
desc_pb_text = """
reserved_range {
start: 41
end: 41
}
reserved_range {
start: 42
end: 42
}
reserved_range {
start: 43
end: 44
}
reserved_range {
start: 50
end: 51
}
"""
desc = descriptor_pb2.DescriptorProto()
text_format.Merge(desc_pb_text, desc)
target = descriptor_pb2.DescriptorProto()
merge_active_shadow.AdjustReservedRange(target, desc.reserved_range, [42, 43])
target_pb_text = """
reserved_range {
start: 41
end: 41
}
reserved_range {
start: 50
end: 51
}
"""
self.assertTextProtoEq(target_pb_text, str(target))
def testMergeActiveShadowEnum(self):
"""MergeActiveShadowEnum recovers shadow values."""
active_pb_text = """
value {
number: 1
name: "foo"
}
value {
number: 0
name: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE"
}
value {
number: 3
name: "bar"
}
reserved_name: "baz"
reserved_range {
start: 2
end: 3
}
"""
active_proto = descriptor_pb2.EnumDescriptorProto()
text_format.Merge(active_pb_text, active_proto)
shadow_pb_text = """
value {
number: 1
name: "foo"
}
value {
number: 0
name: "wow"
}
value {
number: 3
name: "bar"
}
value {
number: 2
name: "hidden_envoy_deprecated_baz"
}
value {
number: 4
name: "hidden_envoy_deprecated_huh"
}
"""
shadow_proto = descriptor_pb2.EnumDescriptorProto()
text_format.Merge(shadow_pb_text, shadow_proto)
target_proto = descriptor_pb2.EnumDescriptorProto()
merge_active_shadow.MergeActiveShadowEnum(active_proto, shadow_proto, target_proto)
target_pb_text = """
value {
name: "foo"
number: 1
}
value {
name: "wow"
number: 0
}
value {
name: "bar"
number: 3
}
value {
name: "hidden_envoy_deprecated_baz"
number: 2
}
"""
self.assertTextProtoEq(target_pb_text, str(target_proto))
def testMergeActiveShadowMessageComments(self):
"""MergeActiveShadowMessage preserves comment field correspondence."""
active_pb_text = """
field {
number: 9
name: "oneof_1_0"
oneof_index: 0
}
field {
number: 1
name: "simple_field_0"
}
field {
number: 0
name: "oneof_2_0"
oneof_index: 2
}
field {
number: 8
name: "oneof_2_1"
oneof_index: 2
}
field {
number: 3
name: "oneof_0_0"
oneof_index: 1
}
field {
number: 4
name: "newbie"
}
field {
number: 7
name: "oneof_3_0"
oneof_index: 3
}
reserved_name: "missing_oneof_field_0"
reserved_name: "missing_oneof_field_1"
reserved_name: "missing_oneof_field_2"
oneof_decl {
name: "oneof_0"
}
oneof_decl {
name: "oneof_1"
}
oneof_decl {
name: "oneof_2"
}
oneof_decl {
name: "oneof_3"
}
"""
active_proto = descriptor_pb2.DescriptorProto()
text_format.Merge(active_pb_text, active_proto)
active_source_code_info_text = """
location {
path: [4, 1, 2, 4]
leading_comments: "field_4"
}
location {
path: [4, 1, 2, 5]
leading_comments: "field_5"
}
location {
path: [4, 1, 2, 3]
leading_comments: "field_3"
}
location {
path: [4, 1, 2, 0]
leading_comments: "field_0"
}
location {
path: [4, 1, 2, 1]
leading_comments: "field_1"
}
location {
path: [4, 0, 2, 2]
leading_comments: "ignore_0"
}
location {
path: [4, 1, 2, 6]
leading_comments: "field_6"
}
location {
path: [4, 1, 2, 2]
leading_comments: "field_2"
}
location {
path: [3]
leading_comments: "ignore_1"
}
"""
active_source_code_info = descriptor_pb2.SourceCodeInfo()
text_format.Merge(active_source_code_info_text, active_source_code_info)
shadow_pb_text = """
field {
number: 10
name: "hidden_envoy_deprecated_missing_oneof_field_0"
oneof_index: 0
}
field {
number: 11
name: "hidden_envoy_deprecated_missing_oneof_field_1"
oneof_index: 3
}
field {
number: 11
name: "hidden_envoy_deprecated_missing_oneof_field_2"
oneof_index: 2
}
oneof_decl {
name: "oneof_0"
}
oneof_decl {
name: "oneof_1"
}
oneof_decl {
name: "oneof_2"
}
oneof_decl {
name: "some_removed_oneof"
}
oneof_decl {
name: "oneof_3"
}
"""
shadow_proto = descriptor_pb2.DescriptorProto()
text_format.Merge(shadow_pb_text, shadow_proto)
target_proto = descriptor_pb2.DescriptorProto()
source_code_info = api_type_context.SourceCodeInfo('fake', active_source_code_info)
fake_type_context = api_type_context.TypeContext(source_code_info, 'fake_package')
merge_active_shadow.MergeActiveShadowMessage(fake_type_context.ExtendMessage(1, "foo", False),
active_proto, shadow_proto, target_proto)
target_pb_text = """
field {
name: "oneof_1_0"
number: 9
oneof_index: 0
}
field {
name: "hidden_envoy_deprecated_missing_oneof_field_0"
number: 10
oneof_index: 0
}
field {
name: "simple_field_0"
number: 1
}
field {
name: "oneof_2_0"
number: 0
oneof_index: 2
}
field {
name: "oneof_2_1"
number: 8
oneof_index: 2
}
field {
name: "hidden_envoy_deprecated_missing_oneof_field_2"
number: 11
oneof_index: 2
}
field {
name: "oneof_0_0"
number: 3
oneof_index: 1
}
field {
name: "newbie"
number: 4
}
field {
name: "oneof_3_0"
number: 7
oneof_index: 3
}
field {
name: "hidden_envoy_deprecated_missing_oneof_field_1"
number: 11
oneof_index: 4
}
oneof_decl {
name: "oneof_0"
}
oneof_decl {
name: "oneof_1"
}
oneof_decl {
name: "oneof_2"
}
oneof_decl {
name: "oneof_3"
}
oneof_decl {
name: "some_removed_oneof"
}
"""
target_source_code_info_text = """
location {
path: 4
path: 1
path: 2
path: 6
leading_comments: "field_4"
}
location {
path: 4
path: 1
path: 2
path: 7
leading_comments: "field_5"
}
location {
path: 4
path: 1
path: 2
path: 4
leading_comments: "field_3"
}
location {
path: 4
path: 1
path: 2
path: 0
leading_comments: "field_0"
}
location {
path: 4
path: 1
path: 2
path: 2
leading_comments: "field_1"
}
location {
path: 4
path: 0
path: 2
path: 2
leading_comments: "ignore_0"
}
location {
path: 4
path: 1
path: 2
path: 8
leading_comments: "field_6"
}
location {
path: 4
path: 1
path: 2
path: 3
leading_comments: "field_2"
}
location {
path: 3
leading_comments: "ignore_1"
}
"""
self.maxDiff = None
self.assertTextProtoEq(target_pb_text, str(target_proto))
self.assertTextProtoEq(target_source_code_info_text,
str(fake_type_context.source_code_info.proto))
def testMergeActiveShadowMessage(self):
"""MergeActiveShadowMessage recovers shadow fields with oneofs."""
active_pb_text = """
field {
number: 1
name: "foo"
}
field {
number: 0
name: "bar"
oneof_index: 2
}
field {
number: 3
name: "baz"
}
field {
number: 4
name: "newbie"
}
reserved_name: "wow"
reserved_range {
start: 2
end: 3
}
oneof_decl {
name: "ign"
}
oneof_decl {
name: "ign2"
}
oneof_decl {
name: "some_oneof"
}
"""
active_proto = descriptor_pb2.DescriptorProto()
text_format.Merge(active_pb_text, active_proto)
shadow_pb_text = """
field {
number: 1
name: "foo"
}
field {
number: 0
name: "bar"
}
field {
number: 3
name: "baz"
}
field {
number: 2
name: "hidden_envoy_deprecated_wow"
oneof_index: 0
}
oneof_decl {
name: "some_oneof"
}
"""
shadow_proto = descriptor_pb2.DescriptorProto()
text_format.Merge(shadow_pb_text, shadow_proto)
target_proto = descriptor_pb2.DescriptorProto()
merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,
target_proto)
target_pb_text = """
field {
name: "foo"
number: 1
}
field {
name: "bar"
number: 0
oneof_index: 2
}
field {
name: "hidden_envoy_deprecated_wow"
number: 2
oneof_index: 2
}
field {
name: "baz"
number: 3
}
field {
name: "newbie"
number: 4
}
oneof_decl {
name: "ign"
}
oneof_decl {
name: "ign2"
}
oneof_decl {
name: "some_oneof"
}
"""
self.assertTextProtoEq(target_pb_text, str(target_proto))
def testMergeActiveShadowMessageNoShadowMessage(self):
"""MergeActiveShadowMessage doesn't require a shadow message for new nested active messages."""
active_proto = descriptor_pb2.DescriptorProto()
shadow_proto = descriptor_pb2.DescriptorProto()
active_proto.nested_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,
target_proto)
self.assertEqual(target_proto.nested_type[0].name, 'foo')
def testMergeActiveShadowMessageNoShadowEnum(self):
"""MergeActiveShadowMessage doesn't require a shadow enum for new nested active enums."""
active_proto = descriptor_pb2.DescriptorProto()
shadow_proto = descriptor_pb2.DescriptorProto()
active_proto.enum_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,
target_proto)
self.assertEqual(target_proto.enum_type[0].name, 'foo')
def testMergeActiveShadowMessageMissing(self):
"""MergeActiveShadowMessage recovers missing messages from shadow."""
active_proto = descriptor_pb2.DescriptorProto()
shadow_proto = descriptor_pb2.DescriptorProto()
shadow_proto.nested_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
merge_active_shadow.MergeActiveShadowMessage(self.fakeTypeContext(), active_proto, shadow_proto,
target_proto)
self.assertEqual(target_proto.nested_type[0].name, 'foo')
def testMergeActiveShadowFileMissing(self):
"""MergeActiveShadowFile recovers missing messages from shadow."""
active_proto = descriptor_pb2.FileDescriptorProto()
shadow_proto = descriptor_pb2.FileDescriptorProto()
shadow_proto.message_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)
self.assertEqual(target_proto.message_type[0].name, 'foo')
def testMergeActiveShadowFileNoShadowMessage(self):
"""MergeActiveShadowFile doesn't require a shadow message for new active messages."""
active_proto = descriptor_pb2.FileDescriptorProto()
shadow_proto = descriptor_pb2.FileDescriptorProto()
active_proto.message_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)
self.assertEqual(target_proto.message_type[0].name, 'foo')
def testMergeActiveShadowFileNoShadowEnum(self):
"""MergeActiveShadowFile doesn't require a shadow enum for new active enums."""
active_proto = descriptor_pb2.FileDescriptorProto()
shadow_proto = descriptor_pb2.FileDescriptorProto()
active_proto.enum_type.add().name = 'foo'
target_proto = descriptor_pb2.DescriptorProto()
target_proto = merge_active_shadow.MergeActiveShadowFile(active_proto, shadow_proto)
self.assertEqual(target_proto.enum_type[0].name, 'foo')
# TODO(htuch): add some test for recursion.
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import re, os, cStringIO, time, cgi, string, urlparse
from twisted import copyright
from twisted.python import htmlizer, text
from twisted.web import microdom, domhelpers
import process, latex, indexer, numberer, htmlbook
from twisted.python.util import InsensitiveDict
# relative links to html files
def fixLinks(document, ext):
"""
Rewrite links to XHTML lore input documents so they point to lore XHTML
output documents.
Any node with an C{href} attribute which does not contain a value starting
with C{http}, C{https}, C{ftp}, or C{mailto} and which does not have a
C{class} attribute of C{absolute} or which contains C{listing} and which
does point to an URL ending with C{html} will have that attribute value
rewritten so that the filename extension is C{ext} instead of C{html}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@return: C{None}
"""
supported_schemes=['http', 'https', 'ftp', 'mailto']
for node in domhelpers.findElementsWithAttribute(document, 'href'):
href = node.getAttribute("href")
if urlparse.urlparse(href)[0] in supported_schemes:
continue
if node.getAttribute("class", "") == "absolute":
continue
if node.getAttribute("class", "").find('listing') != -1:
continue
# This is a relative link, so it should be munged.
if href.endswith('html') or href[:href.rfind('#')].endswith('html'):
fname, fext = os.path.splitext(href)
if '#' in fext:
fext = ext+'#'+fext.split('#', 1)[1]
else:
fext = ext
node.setAttribute("href", fname + fext)
def addMtime(document, fullpath):
"""
Set the last modified time of the given document.
@type document: A DOM Node or Document
@param document: The output template which defines the presentation of the
last modified time.
@type fullpath: C{str}
@param fullpath: The file name from which to take the last modified time.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class","mtime"):
node.appendChild(microdom.Text(time.ctime(os.path.getmtime(fullpath))))
def _getAPI(node):
"""
Retrieve the fully qualified Python name represented by the given node.
The name is represented by one or two aspects of the node: the value of the
node's first child forms the end of the name. If the node has a C{base}
attribute, that attribute's value is prepended to the node's value, with
C{.} separating the two parts.
@rtype: C{str}
@return: The fully qualified Python name.
"""
base = ""
if node.hasAttribute("base"):
base = node.getAttribute("base") + "."
return base+node.childNodes[0].nodeValue
def fixAPI(document, url):
"""
Replace API references with links to API documentation.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@return: C{None}
"""
# API references
for node in domhelpers.findElementsWithAttribute(document, "class", "API"):
fullname = _getAPI(node)
node2 = microdom.Element('a', {'href': url%fullname, 'title': fullname})
node2.childNodes = node.childNodes
node.childNodes = [node2]
node.removeAttribute('base')
def fontifyPython(document):
"""
Syntax color any node in the given document which contains a Python source
listing.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
def matcher(node):
return (node.nodeName == 'pre' and node.hasAttribute('class') and
node.getAttribute('class') == 'python')
for node in domhelpers.findElements(document, matcher):
fontifyPythonNode(node)
def fontifyPythonNode(node):
"""
Syntax color the given node containing Python source code.
@return: C{None}
"""
oldio = cStringIO.StringIO()
latex.getLatexText(node, oldio.write,
entities={'lt': '<', 'gt': '>', 'amp': '&'})
oldio = cStringIO.StringIO(oldio.getvalue().strip()+'\n')
newio = cStringIO.StringIO()
htmlizer.filter(oldio, newio, writer=htmlizer.SmallerHTMLWriter)
newio.seek(0)
newel = microdom.parse(newio).documentElement
newel.setAttribute("class", "python")
node.parentNode.replaceChild(newel, node)
def addPyListings(document, dir):
"""
Insert Python source listings into the given document from files in the
given directory based on C{py-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{py-listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
If a node has a C{skipLines} attribute, its value will be parsed as an
integer and that many lines will be skipped at the beginning of the source
file.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced Python listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"py-listing"):
filename = node.getAttribute("href")
outfile = cStringIO.StringIO()
lines = map(string.rstrip, open(os.path.join(dir, filename)).readlines())
data = '\n'.join(lines[int(node.getAttribute('skipLines', 0)):])
data = cStringIO.StringIO(text.removeLeadingTrailingBlanks(data))
htmlizer.filter(data, outfile, writer=htmlizer.SmallerHTMLWriter)
val = outfile.getvalue()
_replaceWithListing(node, val, filename, "py-listing")
def _replaceWithListing(node, val, filename, class_):
captionTitle = domhelpers.getNodeText(node)
if captionTitle == os.path.basename(filename):
captionTitle = 'Source listing'
text = ('<div class="%s">%s<div class="caption">%s - '
'<a href="%s"><span class="filename">%s</span></a></div></div>' %
(class_, val, captionTitle, filename, filename))
newnode = microdom.parseString(text).documentElement
node.parentNode.replaceChild(newnode, node)
def addHTMLListings(document, dir):
"""
Insert HTML source listings into the given document from files in the given
directory based on C{html-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{html-listing}
will have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced HTML listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"html-listing"):
filename = node.getAttribute("href")
val = ('<pre class="htmlsource">\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "html-listing")
def addPlainListings(document, dir):
"""
Insert text listings into the given document from files in the given
directory based on C{listing} nodes.
Any node in C{document} with a C{class} attribute set to C{listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced text listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"listing"):
filename = node.getAttribute("href")
val = ('<pre>\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "listing")
def getHeaders(document):
"""
Return all H2 and H3 nodes in the given document.
@type document: A DOM Node or Document
@rtype: C{list}
"""
return domhelpers.findElements(
document,
lambda n, m=re.compile('h[23]$').match: m(n.nodeName))
def generateToC(document):
"""
Create a table of contents for the given document.
@type document: A DOM Node or Document
@rtype: A DOM Node
@return: a Node containing a table of contents based on the headers of the
given document.
"""
toc, level, id = '\n<ol>\n', 0, 0
for element in getHeaders(document):
elementLevel = int(element.tagName[1])-2
toc += (level-elementLevel)*'</ul>\n'
toc += (elementLevel-level)*'<ul>'
toc += '<li><a href="#auto%d">' % id
toc += domhelpers.getNodeText(element)
toc += '</a></li>\n'
level = elementLevel
anchor = microdom.parseString('<a name="auto%d" />' % id).documentElement
element.childNodes.append(anchor)
id += 1
toc += '</ul>\n' * level
toc += '</ol>\n'
return microdom.parseString(toc).documentElement
def putInToC(document, toc):
"""
Insert the given table of contents into the given document.
The node with C{class} attribute set to C{toc} has its children replaced
with C{toc}.
@type document: A DOM Node or Document
@type toc: A DOM Node
"""
tocOrig = domhelpers.findElementsWithAttribute(document, 'class', 'toc')
if tocOrig:
tocOrig= tocOrig[0]
tocOrig.childNodes = [toc]
def removeH1(document):
"""
Replace all C{h1} nodes in the given document with empty C{span} nodes.
C{h1} nodes mark up document sections and the output template is given an
opportunity to present this information in a different way.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
h1 = domhelpers.findNodesNamed(document, 'h1')
empty = microdom.Element('span')
for node in h1:
node.parentNode.replaceChild(empty, node)
def footnotes(document):
"""
Find footnotes in the given document, move them to the end of the body, and
generate links to them.
A footnote is any node with a C{class} attribute set to C{footnote}.
Footnote links are generated as superscript. Footnotes are collected in a
C{ol} node at the end of the document.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
footnotes = domhelpers.findElementsWithAttribute(document, "class",
"footnote")
if not footnotes:
return
footnoteElement = microdom.Element('ol')
id = 1
for footnote in footnotes:
href = microdom.parseString('<a href="#footnote-%(id)d">'
'<super>%(id)d</super></a>'
% vars()).documentElement
text = ' '.join(domhelpers.getNodeText(footnote).split())
href.setAttribute('title', text)
target = microdom.Element('a', attributes={'name': 'footnote-%d' % id})
target.childNodes = [footnote]
footnoteContent = microdom.Element('li')
footnoteContent.childNodes = [target]
footnoteElement.childNodes.append(footnoteContent)
footnote.parentNode.replaceChild(href, footnote)
id += 1
body = domhelpers.findNodesNamed(document, "body")[0]
header = microdom.parseString('<h2>Footnotes</h2>').documentElement
body.childNodes.append(header)
body.childNodes.append(footnoteElement)
def notes(document):
"""
Find notes in the given document and mark them up as such.
A note is any node with a C{class} attribute set to C{note}.
(I think this is a very stupid feature. When I found it I actually
exclaimed out loud. -exarkun)
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
notes = domhelpers.findElementsWithAttribute(document, "class", "note")
notePrefix = microdom.parseString('<strong>Note: </strong>').documentElement
for note in notes:
note.childNodes.insert(0, notePrefix)
def compareMarkPos(a, b):
"""
Perform in every way identically to L{cmp} for valid inputs.
XXX - replace this with L{cmp}
"""
linecmp = cmp(a[0], b[0])
if linecmp:
return linecmp
return cmp(a[1], b[1])
def comparePosition(firstElement, secondElement):
"""
Compare the two elements given by their position in the document or
documents they were parsed from.
@type firstElement: C{twisted.web.microdom.Element}
@type secondElement: C{twisted.web.microdom.Element}
@return: C{-1}, C{0}, or C{1}, with the same meanings as the return value
of L{cmp}.
"""
return compareMarkPos(firstElement._markpos, secondElement._markpos)
def findNodeJustBefore(target, nodes):
"""
Find the node in C{nodes} which appeared immediately before C{target} in
the input document.
@type target: L{twisted.web.microdom.Element}
@type nodes: C{list} of L{twisted.web.microdom.Element}
@return: An element from C{nodes}
"""
result = None
for node in nodes:
if comparePosition(target, node) < 0:
return result
result = node
return result
def getFirstAncestorWithSectionHeader(entry):
"""
Visit the ancestors of C{entry} until one with at least one C{h2} child
node is found, then return all of that node's C{h2} child nodes.
@type entry: A DOM Node
@param entry: The node from which to begin traversal. This node itself is
excluded from consideration.
@rtype: C{list} of DOM Nodes
@return: All C{h2} nodes of the ultimately selected parent node.
"""
for a in domhelpers.getParents(entry)[1:]:
headers = domhelpers.findNodesNamed(a, "h2")
if len(headers) > 0:
return headers
return []
def getSectionNumber(header):
"""
Retrieve the section number of the given node.
@type header: A DOM Node or L{None}
@param header: The section from which to extract a number. The section
number is the value of this node's first child.
@return: C{None} or a C{str} giving the section number.
"""
if not header:
return None
return header.childNodes[0].value.strip()
def getSectionReference(entry):
"""
Find the section number which contains the given node.
This function looks at the given node's ancestry until it finds a node
which defines a section, then returns that section's number.
@type entry: A DOM Node
@param entry: The node for which to determine the section.
@rtype: C{str}
@return: The section number, as returned by C{getSectionNumber} of the
first ancestor of C{entry} which defines a section, as determined by
L{getFirstAncestorWithSectionHeader}.
"""
headers = getFirstAncestorWithSectionHeader(entry)
myHeader = findNodeJustBefore(entry, headers)
return getSectionNumber(myHeader)
def index(document, filename, chapterReference):
"""
Extract index entries from the given document and store them for later use
and insert named anchors so that the index can link back to those entries.
Any node with a C{class} attribute set to C{index} is considered an index
entry.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type filename: C{str}
@param filename: A link to the output for the given document which will be
included in the index to link to any index entry found here.
@type chapterReference: ???
@param chapterReference: ???
@return: C{None}
"""
entries = domhelpers.findElementsWithAttribute(document, "class", "index")
if not entries:
return
i = 0;
for entry in entries:
i += 1
anchor = 'index%02d' % i
if chapterReference:
ref = getSectionReference(entry) or chapterReference
else:
ref = 'link'
indexer.addEntry(filename, anchor, entry.attributes['value'], ref)
# does nodeName even affect anything?
entry.nodeName = entry.tagName = entry.endTagName = 'a'
entry.attributes = InsensitiveDict({'name': anchor})
def setIndexLink(template, indexFilename):
"""
Insert a link to an index document.
Any node with a C{class} attribute set to C{index-link} will have its tag
name changed to C{a} and its C{href} attribute set to C{indexFilename}.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type indexFilename: C{str}
@param indexFilename: The address of the index document to which to link.
If any C{False} value, this function will do nothing.
@return: C{None}
"""
if not indexFilename:
return
indexLinks = domhelpers.findElementsWithAttribute(template, "class", "index-link")
for link in indexLinks:
link.nodeName = link.tagName = link.endTagName = 'a'
link.attributes = InsensitiveDict({'href': indexFilename})
def numberDocument(document, chapterNumber):
"""
Number the sections of the given document.
A dot-separated chapter, section number is added to the beginning of each
section, as defined by C{h2} nodes.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document.
@return: C{None}
"""
i = 1
for node in domhelpers.findNodesNamed(document, "h2"):
node.childNodes = [microdom.Text("%s.%d " % (chapterNumber, i))] + node.childNodes
i += 1
def fixRelativeLinks(document, linkrel):
"""
Replace relative links in C{str} and C{href} attributes with links relative
to C{linkrel}.
@type document: A DOM Node or Document
@param document: The output template.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
"""
for attr in 'src', 'href':
for node in domhelpers.findElementsWithAttribute(document, attr):
href = node.getAttribute(attr)
if not href.startswith('http') and not href.startswith('/'):
node.setAttribute(attr, linkrel+node.getAttribute(attr))
def setTitle(template, title, chapterNumber):
"""
Add title and chapter number information to the template document.
The title is added to the end of the first C{title} tag and the end of the
first tag with a C{class} attribute set to C{title}. If specified, the
chapter is inserted before the title.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type title: C{list} of DOM Nodes
@param title: Nodes from the input document defining its title.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document. If not applicable, any C{False} value will result in this
information being omitted.
@return: C{None}
"""
for nodeList in (domhelpers.findNodesNamed(template, "title"),
domhelpers.findElementsWithAttribute(template, "class",
'title')):
if nodeList:
if numberer.getNumberSections() and chapterNumber:
nodeList[0].childNodes.append(microdom.Text('%s. ' % chapterNumber))
nodeList[0].childNodes.extend(title)
def setAuthors(template, authors):
"""
Add author information to the template document.
Names and contact information for authors are added to each node with a
C{class} attribute set to C{authors} and to the template head as C{link}
nodes.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type authors: C{list} of two-tuples of C{str}
@param authors: List of names and contact information for the authors of
the input document.
@return: C{None}
"""
# First, similarly to setTitle, insert text into an <div class="authors">
text = ''
for name, href in authors:
# FIXME: Do proper quoting/escaping (is it ok to use
# xml.sax.saxutils.{escape,quoteattr}?)
anchor = '<a href="%s">%s</a>' % (href, name)
if (name, href) == authors[-1]:
if len(authors) == 1:
text = anchor
else:
text += 'and ' + anchor
else:
text += anchor + ','
childNodes = microdom.parseString('<span>' + text +'</span>').childNodes
for node in domhelpers.findElementsWithAttribute(template,
"class", 'authors'):
node.childNodes.extend(childNodes)
# Second, add appropriate <link rel="author" ...> tags to the <head>.
head = domhelpers.findNodesNamed(template, 'head')[0]
authors = [microdom.parseString('<link rel="author" href="%s" title="%s"/>'
% (href, name)).childNodes[0]
for name, href in authors]
head.childNodes.extend(authors)
def setVersion(template, version):
"""
Add a version indicator to the given template.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type version: C{str}
@param version: The version string to add to the template.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template, "class",
"version"):
node.appendChild(microdom.Text(version))
def getOutputFileName(originalFileName, outputExtension, index=None):
"""
Return a filename which is the same as C{originalFileName} except for the
extension, which is replaced with C{outputExtension}.
For example, if C{originalFileName} is C{'/foo/bar.baz'} and
C{outputExtension} is C{'quux'}, the return value will be
C{'/foo/bar.quux'}.
@type originalFileName: C{str}
@type outputExtension: C{stR}
@param index: ignored, never passed.
@rtype: C{str}
"""
return os.path.splitext(originalFileName)[0]+outputExtension
def munge(document, template, linkrel, dir, fullpath, ext, url, config, outfileGenerator=getOutputFileName):
"""
Mutate C{template} until it resembles C{document}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type template: A DOM Node or Document
@param template: The template document which defines the desired
presentation format of the content.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type dir: C{str}
@param dir: The directory in which to search for source listing files.
@type fullpath: C{str}
@param fullpath: The file name which contained the input document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type config: C{dict}
@param config: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
fixRelativeLinks(template, linkrel)
addMtime(template, fullpath)
removeH1(document)
if not config.get('noapi', False):
fixAPI(document, url)
fontifyPython(document)
fixLinks(document, ext)
addPyListings(document, dir)
addHTMLListings(document, dir)
addPlainListings(document, dir)
putInToC(template, generateToC(document))
footnotes(document)
notes(document)
setIndexLink(template, indexer.getIndexFilename())
setVersion(template, config.get('version', ''))
# Insert the document into the template
chapterNumber = htmlbook.getNumber(fullpath)
title = domhelpers.findNodesNamed(document, 'title')[0].childNodes
setTitle(template, title, chapterNumber)
if numberer.getNumberSections() and chapterNumber:
numberDocument(document, chapterNumber)
index(document, outfileGenerator(os.path.split(fullpath)[1], ext),
htmlbook.getReference(fullpath))
authors = domhelpers.findNodesNamed(document, 'link')
authors = [(node.getAttribute('title',''), node.getAttribute('href', ''))
for node in authors if node.getAttribute('rel', '') == 'author']
setAuthors(template, authors)
body = domhelpers.findNodesNamed(document, "body")[0]
tmplbody = domhelpers.findElementsWithAttribute(template, "class",
"body")[0]
tmplbody.childNodes = body.childNodes
tmplbody.setAttribute("class", "content")
def parseFileAndReport(filename):
"""
Parse and return the contents of the given lore XHTML document.
@type filename: C{str}
@param filename: The name of a file containing a lore XHTML document to
load.
@raise process.ProcessingFailure: When the contents of the specified file
cannot be parsed.
@rtype: A DOM Document
@return: The document contained in C{filename}.
"""
try:
return microdom.parse(open(filename))
except microdom.MismatchedTags, e:
raise process.ProcessingFailure(
"%s:%s: begin mismatched tags <%s>/</%s>" %
(e.begLine, e.begCol, e.got, e.expect),
"%s:%s: end mismatched tags <%s>/</%s>" %
(e.endLine, e.endCol, e.got, e.expect))
except microdom.ParseError, e:
raise process.ProcessingFailure("%s:%s:%s" % (e.line, e.col, e.message))
except IOError, e:
raise process.ProcessingFailure(e.strerror + ", filename was '" + filename + "'")
def makeSureDirectoryExists(filename):
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
os.makedirs(dirname)
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
"""
Process the input document at C{filename} and write an output document.
@type filename: C{str}
@param filename: The path to the input file which will be processed.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type templ: A DOM Node or Document
@param templ: The template on which the output document will be based.
This is mutated and then serialized to the output file.
@type options: C{dict}
@param options: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
doc = parseFileAndReport(filename)
clonedNode = templ.cloneNode(1)
munge(doc, clonedNode, linkrel, os.path.dirname(filename), filename, ext,
url, options, outfileGenerator)
newFilename = outfileGenerator(filename, ext)
makeSureDirectoryExists(newFilename)
clonedNode.writexml(open(newFilename, 'wb'))
|
|
#!/usr/bin/env python
# encoding: utf-8
"""Pyvona : an IVONA python library
Author: Zachary Bears
Contact Email: [email protected]
Note: Full operation of this library requires the requests and pygame libraries
"""
import datetime
import hashlib
import hmac
import json
import tempfile
import contextlib
class PyvonaException(Exception):
pass
try:
import pygame
except ImportError:
pygame_available = False
else:
pygame_available = True
try:
import requests
requests.packages.urllib3.disable_warnings()
except ImportError:
msg = 'The requests library is essential for Pyvona operation. '
msg += 'Without it, Pyvona will not function correctly.'
raise PyvonaException(msg)
_amazon_date_format = '%Y%m%dT%H%M%SZ'
_date_format = '%Y%m%d'
def create_voice(access_key, secret_key):
"""Creates and returns a voice object to interact with
"""
return Voice(access_key, secret_key)
class Voice(object):
"""An object that contains all the required methods for interacting
with the IVONA text-to-speech system
"""
voice_name = None
speech_rate = None
sentence_break = None
paragraph_break = None
_codec = "ogg"
region_options = {
'us-east': 'us-east-1',
'us-west': 'us-west-2',
'eu-west': 'eu-west-1',
}
access_key = None
secret_key = None
algorithm = 'AWS4-HMAC-SHA256'
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
_region = None
_host = None
@property
def region(self):
return self._region
@region.setter
def region(self, region_name):
self._region = self.region_options.get(region_name, 'us-east-1')
self._host = 'tts.{}.ivonacloud.com'.format(self._region)
@property
def codec(self):
return self._codec
@codec.setter
def codec(self, codec):
if codec not in ["mp3", "ogg"]:
raise PyvonaException(
"Invalid codec specified. Please choose 'mp3' or 'ogg'")
self._codec = codec
@contextlib.contextmanager
def use_ogg_codec(self):
current_codec = self.codec
self.codec = "ogg"
try:
yield
finally:
self.codec = current_codec
def fetch_voice_ogg(self, text_to_speak, filename):
"""Fetch an ogg file for given text and save it to the given file name
"""
with self.use_ogg_codec():
self.fetch_voice(text_to_speak, filename)
def fetch_voice(self, text_to_speak, filename):
"""Fetch a voice file for given text and save it to the given file name
"""
file_extension = ".{codec}".format(codec=self.codec)
filename += file_extension if not filename.endswith(
file_extension) else ""
with open(filename, 'wb') as f:
self.fetch_voice_fp(text_to_speak, f)
def fetch_voice_fp(self, text_to_speak, fp):
"""Fetch a voice file for given text and save it to the given file pointer
"""
r = self._send_amazon_auth_packet_v4(
'POST', 'tts', 'application/json', '/CreateSpeech', '',
self._generate_payload(text_to_speak), self._region, self._host)
if r.content.startswith(b'{'):
raise PyvonaException('Error fetching voice: {}'.format(r.content))
else:
fp.write(r.content)
def speak(self, text_to_speak):
"""Speak a given text
"""
if not pygame_available:
raise PyvonaException(
"Pygame not installed. Please install to use speech.")
with tempfile.SpooledTemporaryFile() as f:
with self.use_ogg_codec():
self.fetch_voice_fp(text_to_speak, f)
f.seek(0)
if not pygame.mixer.get_init():
pygame.mixer.init()
channel = pygame.mixer.Channel(5)
sound = pygame.mixer.Sound(f)
channel.play(sound)
while channel.get_busy():
pass
def list_voices(self):
"""Returns all the possible voices
"""
r = self._send_amazon_auth_packet_v4(
'POST', 'tts', 'application/json', '/ListVoices', '', '',
self._region, self._host)
return r.json()
def _generate_payload(self, text_to_speak):
return json.dumps({
'Input': {
"Type":"application/ssml+xml",
'Data': text_to_speak
},
'OutputFormat': {
'Codec': self.codec.upper()
},
'Parameters': {
'Rate': self.speech_rate,
'SentenceBreak': self.sentence_break,
'ParagraphBreak': self.paragraph_break
},
'Voice': {
'Name': self.voice_name
}
})
def _send_amazon_auth_packet_v4(self, method, service, content_type,
canonical_uri, canonical_querystring,
request_parameters, region, host):
"""Send a packet to a given amazon server using Amazon's signature Version 4,
Returns the resulting response object
"""
# Create date for headers and the credential string
t = datetime.datetime.utcnow()
amazon_date = t.strftime(_amazon_date_format)
date_stamp = t.strftime(_date_format)
# Step 1: Create canonical request
payload_hash = self._sha_hash(request_parameters)
canonical_headers = 'content-type:{}\n'.format(content_type)
canonical_headers += 'host:{}\n'.format(host)
canonical_headers += 'x-amz-content-sha256:{}\n'.format(payload_hash)
canonical_headers += 'x-amz-date:{}\n'.format(amazon_date)
canonical_request = '\n'.join([
method, canonical_uri, canonical_querystring, canonical_headers,
self.signed_headers, payload_hash])
# Step 2: Create the string to sign
credential_scope = '{}/{}/{}/aws4_request'.format(
date_stamp, region, service)
string_to_sign = '\n'.join([
self.algorithm, amazon_date, credential_scope,
self._sha_hash(canonical_request)])
# Step 3: Calculate the signature
signing_key = self._get_signature_key(
self.secret_key, date_stamp, region, service)
signature = hmac.new(
signing_key, string_to_sign.encode('utf-8'),
hashlib.sha256).hexdigest()
# Step 4: Create the signed packet
endpoint = 'https://{}{}'.format(host, canonical_uri)
authorization_header = '{} Credential={}/{}, ' +\
'SignedHeaders={}, Signature={}'
authorization_header = authorization_header.format(
self.algorithm, self.access_key, credential_scope,
self.signed_headers, signature)
headers = {
'Host': host,
'Content-type': content_type,
'X-Amz-Date': amazon_date,
'Authorization': authorization_header,
'x-amz-content-sha256': payload_hash,
'Content-Length': len(request_parameters)
}
# Send the packet and return the response
return requests.post(endpoint, data=request_parameters,
headers=headers)
def _sha_hash(self, to_hash):
return hashlib.sha256(to_hash.encode('utf-8')).hexdigest()
def _sign(self, key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def _get_signature_key(self, key, date_stamp, region_name, service_name):
k_date = self._sign(('AWS4{}'.format(key)).encode('utf-8'), date_stamp)
k_region = self._sign(k_date, region_name)
k_service = self._sign(k_region, service_name)
k_signing = self._sign(k_service, 'aws4_request')
return k_signing
def __init__(self, access_key, secret_key):
"""Set initial voice object parameters
"""
self.region = 'us-east'
self.voice_name = 'Brian'
self.access_key = access_key
self.secret_key = secret_key
self.speech_rate = 'medium'
self.sentence_break = 400
self.paragraph_break = 650
|
|
# Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import six
from six.moves import range
from six.moves import urllib
import webob
from cinder.api import common
from cinder.api import extensions
from cinder.api.v2 import volumes
from cinder import consistencygroup as consistencygroupAPI
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import fake_notifier
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}'
DEFAULT_AZ = "zone1:host1"
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.flags(host='fake',
notification_driver=[fake_notifier.__name__])
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.maxDiff = None
self.ctxt = context.RequestContext('admin', 'fakeproject', True)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
vol = self._vol_in_request_body()
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller()
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_type(self, mock_validate):
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=CONF.default_volume_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
vol = self._vol_in_request_body(volume_type="FakeTypeName")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when type name isn't valid
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
# Use correct volume type name
vol.update(dict(volume_type=CONF.default_volume_type))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
volume_id = res_dict['volume']['id']
self.assertEqual(1, len(res_dict))
# Use correct volume type id
vol.update(dict(volume_type=db_vol_type['id']))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
volume_id = res_dict['volume']['id']
self.assertEqual(1, len(res_dict))
self.stubs.Set(volume_api.API, 'get_all',
lambda *args, **kwargs:
[stubs.stub_volume(volume_id,
volume_type={'name': vol_type})])
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
res_dict = self.controller.detail(req)
self.assertTrue(mock_validate.called)
def _vol_in_request_body(self,
size=stubs.DEFAULT_VOL_SIZE,
name=stubs.DEFAULT_VOL_NAME,
description=stubs.DEFAULT_VOL_DESCRIPTION,
availability_zone=DEFAULT_AZ,
snapshot_id=None,
source_volid=None,
source_replica=None,
consistencygroup_id=None,
volume_type=None,
image_ref=None,
image_id=None):
vol = {"size": size,
"name": name,
"description": description,
"availability_zone": availability_zone,
"snapshot_id": snapshot_id,
"source_volid": source_volid,
"source_replica": source_replica,
"consistencygroup_id": consistencygroup_id,
"volume_type": volume_type,
}
if image_id is not None:
vol['image_id'] = image_id
elif image_ref is not None:
vol['imageRef'] = image_ref
return vol
def _expected_vol_from_controller(
self,
size=stubs.DEFAULT_VOL_SIZE,
availability_zone=DEFAULT_AZ,
description=stubs.DEFAULT_VOL_DESCRIPTION,
name=stubs.DEFAULT_VOL_NAME,
consistencygroup_id=None,
source_volid=None,
snapshot_id=None,
metadata=None,
attachments=None,
volume_type=stubs.DEFAULT_VOL_TYPE,
status=stubs.DEFAULT_VOL_STATUS,
with_migration_status=False):
metadata = metadata or {}
attachments = attachments or []
volume = {'volume':
{'attachments': attachments,
'availability_zone': availability_zone,
'bootable': 'false',
'consistencygroup_id': consistencygroup_id,
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'description': description,
'id': stubs.DEFAULT_VOL_ID,
'links':
[{'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'},
{'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'}],
'metadata': metadata,
'name': name,
'replication_status': 'disabled',
'multiattach': False,
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'status': status,
'user_id': 'fakeuser',
'volume_type': volume_type,
'encrypted': False}}
if with_migration_status:
volume['volume']['migration_status'] = None
return volume
def _expected_volume_api_create_kwargs(self, snapshot=None,
availability_zone=DEFAULT_AZ,
source_volume=None):
return {'metadata': None,
'snapshot': snapshot,
'source_volume': source_volume,
'source_replica': None,
'consistencygroup': None,
'availability_zone': availability_zone,
'scheduler_hints': None,
'multiattach': False,
}
@mock.patch.object(volume_api.API, 'get_snapshot', autospec=True)
@mock.patch.object(volume_api.API, 'create', autospec=True)
def test_volume_creation_from_snapshot(self, create, get_snapshot):
create.side_effect = stubs.stub_volume_create
get_snapshot.side_effect = stubs.stub_snapshot_get
snapshot_id = stubs.TEST_SNAPSHOT_UUID
vol = self._vol_in_request_body(snapshot_id=stubs.TEST_SNAPSHOT_UUID)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller(snapshot_id=snapshot_id)
self.assertEqual(ex, res_dict)
context = req.environ['cinder.context']
get_snapshot.assert_called_once_with(self.controller.volume_api,
context, snapshot_id)
kwargs = self._expected_volume_api_create_kwargs(
stubs.stub_snapshot(snapshot_id))
create.assert_called_once_with(self.controller.volume_api, context,
vol['size'], stubs.DEFAULT_VOL_NAME,
stubs.DEFAULT_VOL_DESCRIPTION, **kwargs)
@mock.patch.object(volume_api.API, 'get_snapshot', autospec=True)
def test_volume_creation_fails_with_invalid_snapshot(self, get_snapshot):
get_snapshot.side_effect = stubs.stub_snapshot_get
snapshot_id = "fake_id"
vol = self._vol_in_request_body(snapshot_id=snapshot_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when snapshot cannot be found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_snapshot.assert_called_once_with(self.controller.volume_api,
context, snapshot_id)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
@mock.patch.object(volume_api.API, 'create', autospec=True)
def test_volume_creation_from_source_volume(self, create, get_volume):
get_volume.side_effect = functools.partial(stubs.stub_volume_get,
viewable_admin_meta=True)
create.side_effect = stubs.stub_volume_create
source_volid = '2f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(source_volid=source_volid)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = self._expected_vol_from_controller(source_volid=source_volid)
self.assertEqual(ex, res_dict)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_volid)
kwargs = self._expected_volume_api_create_kwargs(
source_volume=stubs.stub_volume(source_volid))
create.assert_called_once_with(self.controller.volume_api, context,
vol['size'], stubs.DEFAULT_VOL_NAME,
stubs.DEFAULT_VOL_DESCRIPTION, **kwargs)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_volume(self,
get_volume):
get_volume.side_effect = stubs.stub_volume_get_notfound
source_volid = "fake_id"
vol = self._vol_in_request_body(source_volid=source_volid)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when source volume cannot be found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_volid)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_replica(self,
get_volume):
get_volume.side_effect = stubs.stub_volume_get_notfound
source_replica = "fake_id"
vol = self._vol_in_request_body(source_replica=source_replica)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when source replica cannot be found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_replica)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_replication_status(
self, get_volume):
get_volume.side_effect = stubs.stub_volume_get
source_replica = '2f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(source_replica=source_replica)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 400 when replication status is disabled.
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_replica)
@mock.patch.object(consistencygroupAPI.API, 'get', autospec=True)
def test_volume_creation_fails_with_invalid_consistency_group(self,
get_cg):
get_cg.side_effect = stubs.stub_consistencygroup_get_notfound
consistencygroup_id = '4f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(
consistencygroup_id=consistencygroup_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when consistency group is not found.
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_cg.assert_called_once_with(self.controller.consistencygroup_api,
context, consistencygroup_id)
def test_volume_creation_fails_with_bad_size(self):
vol = self._vol_in_request_body(size="")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_creation_fails_with_bad_availability_zone(self):
vol = self._vol_in_request_body(availability_zone="zonen:hostn")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req, body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_image_ref(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(
availability_zone="nova",
image_ref="c905cedb-7281-47e4-8a62-f26bc5fc4c77")
ex = self._expected_vol_from_controller(availability_zone="nova")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
def test_volume_create_with_image_ref_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_ref=1234)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_ref_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_ref="12345")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_ref_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_ref="")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_image_id(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(
availability_zone="nova",
image_id="c905cedb-7281-47e4-8a62-f26bc5fc4c77")
ex = self._expected_vol_from_controller(availability_zone="nova")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_id=1234)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_id="12345")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="cinder",
image_id="")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_create_with_image_name(self, mock_validate):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "Fedora-x86_64-20-20140618-sda"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="nova",
image_ref=test_id)
ex = self._expected_vol_from_controller(availability_zone="nova")
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
self.assertTrue(mock_validate.called)
def test_volume_create_with_image_name_has_multiple(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "multi"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="nova",
image_ref=test_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_name_no_match(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "MissingName"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = self._vol_in_request_body(availability_zone="nova",
image_ref=test_id)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, '1', body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ, name="Updated Test Name",
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_deprecation(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"display_name": "Updated Test Name",
"display_description": "Updated Test Description",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, '1', body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ, name="Updated Test Name",
description="Updated Test Description",
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_deprecation_key_priority(self, mock_validate):
"""Test current update keys have priority over deprecated keys."""
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"name": "New Name",
"description": "New Description",
"display_name": "Not Shown Name",
"display_description": "Not Shown Description",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, '1', body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
name="New Name", description="New Description",
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_metadata(self, mock_validate):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, '1', body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'attached_mode': 'rw', 'readonly': 'False',
'qos_max_iops': 2000})
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_volume_update_with_admin_metadata(self, mock_validate):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, '1', body)
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ, volume_type=None,
status='in-use', name='Updated Test Name',
attachments=[{
'id': '1',
'attachment_id': attachment['id'],
'volume_id': stubs.DEFAULT_VOL_ID,
'server_id': stubs.FAKE_UUID,
'host_name': None,
'device': '/',
}],
metadata={'key': 'value', 'readonly': 'True'},
with_migration_status=True)
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, '1', body)
def test_update_invalid_body(self):
body = {
'name': 'missing top level volume key'
}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, '1', body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, '1', body)
def test_volume_list_summary(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.index(req)
expected = {
'volumes': [
{
'name': stubs.DEFAULT_VOL_NAME,
'id': '1',
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/'
'1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
res_dict = self.controller.detail(req)
exp_vol = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'attached_mode': 'rw', 'readonly': 'False'})
expected = {'volumes': [exp_vol['volume']]}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.detail(req)
exp_vol = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
status="in-use", volume_type=None,
attachments=[{'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': stubs.DEFAULT_VOL_ID}],
metadata={'key': 'value', 'readonly': 'True'},
with_migration_status=True)
expected = {'volumes': [exp_vol['volume']]}
self.assertEqual(expected, res_dict)
def test_volume_index_with_marker(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?marker=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(2, len(volumes))
self.assertEqual(1, volumes[0]['id'])
self.assertEqual(2, volumes[1]['id'])
def test_volume_index_limit(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes'
'?limit=1&name=foo'
'&sort=id1:asc')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
# Ensure that the next link is correctly formatted, it should
# contain the same limit, filter, and sort information as the
# original request as well as a marker; this ensures that the
# caller can simply use the "next" link and that they do not
# need to manually insert the limit and sort information.
links = res_dict['volumes_links']
self.assertEqual('next', links[0]['rel'])
href_parts = urllib.parse.urlparse(links[0]['href'])
self.assertEqual('/v2/fakeproject/volumes', href_parts.path)
params = urllib.parse.parse_qs(href_parts.query)
self.assertEqual(str(volumes[0]['id']), params['marker'][0])
self.assertEqual('1', params['limit'][0])
self.assertEqual('foo', params['name'][0])
self.assertEqual('id1:asc', params['sort'][0])
def test_volume_index_limit_negative(self):
req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def test_volume_index_limit_non_int(self):
req = fakes.HTTPRequest.blank('/v2/volumes?limit=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def test_volume_index_limit_marker(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual('1', volumes[0]['id'])
def _create_db_volumes(self, num_volumes):
volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i)
for i in range(num_volumes)]
for vol in volumes:
self.addCleanup(db.volume_destroy, self.ctxt, vol.id)
volumes.reverse()
return volumes
def test_volume_index_limit_offset(self):
created_volumes = self._create_db_volumes(2)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req)
def test_volume_detail_with_marker(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(2, len(volumes))
self.assertEqual(1, volumes[0]['id'])
self.assertEqual(2, volumes[1]['id'])
def test_volume_detail_limit(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
# Ensure that the next link is correctly formatted
links = res_dict['volumes_links']
self.assertEqual('next', links[0]['rel'])
href_parts = urllib.parse.urlparse(links[0]['href'])
self.assertEqual('/v2/fakeproject/volumes/detail', href_parts.path)
params = urllib.parse.parse_qs(href_parts.query)
self.assertTrue('marker' in params)
self.assertEqual('1', params['limit'][0])
def test_volume_detail_limit_negative(self):
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
def test_volume_detail_limit_non_int(self):
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
def test_volume_detail_limit_marker(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual('1', volumes[0]['id'])
def test_volume_detail_limit_offset(self):
created_volumes = self._create_db_volumes(2)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1',
use_admin_context=True)
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(created_volumes[1].id, volumes[0]['id'])
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail,
req)
def test_volume_with_limit_zero(self):
def stub_volume_get_all(context, marker, limit, **kwargs):
return []
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=0')
res_dict = self.controller.index(req)
expected = {'volumes': []}
self.assertEqual(expected, res_dict)
def _validate_next_link(self, detailed, item_count, osapi_max_limit, limit,
should_link_exist):
keys_fns = (('volumes', self.controller.index),
('volumes/detail', self.controller.detail))
key, fn = keys_fns[detailed]
req_string = '/v2/%s?all_tenants=1' % key
if limit:
req_string += '&limit=%s' % limit
req = fakes.HTTPRequest.blank(req_string, use_admin_context=True)
link_return = [{"rel": "next", "href": "fake_link"}]
self.flags(osapi_max_limit=osapi_max_limit)
def get_pagination_params(params, max_limit=CONF.osapi_max_limit,
original_call=common.get_pagination_params):
return original_call(params, max_limit)
def _get_limit_param(params, max_limit=CONF.osapi_max_limit,
original_call=common._get_limit_param):
return original_call(params, max_limit)
with mock.patch.object(common, 'get_pagination_params',
get_pagination_params), \
mock.patch.object(common, '_get_limit_param',
_get_limit_param), \
mock.patch.object(common.ViewBuilder, '_generate_next_link',
return_value=link_return):
res_dict = fn(req)
self.assertEqual(item_count, len(res_dict['volumes']))
self.assertEqual(should_link_exist, 'volumes_links' in res_dict)
def test_volume_default_limit(self):
self.stubs.UnsetAll()
self._create_db_volumes(3)
# Verify both the index and detail queries
for detailed in (True, False):
# Number of volumes less than max, do not include
self._validate_next_link(detailed, item_count=3, osapi_max_limit=4,
limit=None, should_link_exist=False)
# Number of volumes equals the max, next link will be included
self._validate_next_link(detailed, item_count=3, osapi_max_limit=3,
limit=None, should_link_exist=True)
# Number of volumes more than the max, include next link
self._validate_next_link(detailed, item_count=2, osapi_max_limit=2,
limit=None, should_link_exist=True)
# Limit lower than max but doesn't limit, no next link
self._validate_next_link(detailed, item_count=3, osapi_max_limit=5,
limit=4, should_link_exist=False)
# Limit lower than max and limits, we have next link
self._validate_next_link(detailed, item_count=2, osapi_max_limit=4,
limit=2, should_link_exist=True)
# Limit higher than max and max limits, we have next link
self._validate_next_link(detailed, item_count=2, osapi_max_limit=2,
limit=4, should_link_exist=True)
# Limit higher than max but none of them limiting, no next link
self._validate_next_link(detailed, item_count=3, osapi_max_limit=4,
limit=5, should_link_exist=False)
def test_volume_list_default_filters(self):
"""Tests that the default filters from volume.api.API.get_all are set.
1. 'no_migration_status'=True for non-admins and get_all_by_project is
invoked.
2. 'no_migration_status' is not included for admins.
3. When 'all_tenants' is not specified, then it is removed and
get_all_by_project is invoked for admins.
3. When 'all_tenants' is specified, then it is removed and get_all
is invoked for admins.
"""
# Non-admin, project function should be called with no_migration_status
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
self.assertEqual(True, filters['no_migration_targets'])
self.assertFalse('all_tenants' in filters)
return [stubs.stub_volume(1, display_name='vol1')]
def stub_volume_get_all(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=0):
return []
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
# all_tenants does not matter for non-admin
for params in ['', '?all_tenants=1']:
req = fakes.HTTPRequest.blank('/v2/volumes%s' % params)
resp = self.controller.index(req)
self.assertEqual(1, len(resp['volumes']))
self.assertEqual('vol1', resp['volumes'][0]['name'])
# Admin, all_tenants is not set, project function should be called
# without no_migration_status
def stub_volume_get_all_by_project2(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
self.assertFalse('no_migration_targets' in filters)
return [stubs.stub_volume(1, display_name='vol2')]
def stub_volume_get_all2(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=0):
return []
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project2)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all2)
req = fakes.HTTPRequest.blank('/v2/volumes', use_admin_context=True)
resp = self.controller.index(req)
self.assertEqual(1, len(resp['volumes']))
self.assertEqual('vol2', resp['volumes'][0]['name'])
# Admin, all_tenants is set, get_all function should be called
# without no_migration_status
def stub_volume_get_all_by_project3(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False,
offset=0):
return []
def stub_volume_get_all3(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False, offset=0):
self.assertFalse('no_migration_targets' in filters)
self.assertFalse('all_tenants' in filters)
return [stubs.stub_volume(1, display_name='vol3')]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project3)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all3)
req = fakes.HTTPRequest.blank('/v2/volumes?all_tenants=1',
use_admin_context=True)
resp = self.controller.index(req)
self.assertEqual(1, len(resp['volumes']))
self.assertEqual('vol3', resp['volumes'][0]['name'])
def test_volume_show(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, '1')
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'attached_mode': 'rw', 'readonly': 'False'})
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volume
self.assertIsNotNone(req.cached_resource_by_id('1'))
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, '1')
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
metadata={'readonly': 'False'})
self.assertEqual(expected, res_dict)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1)
# Finally test that nothing was cached
self.assertIsNone(req.cached_resource_by_id('1'))
def test_volume_show_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v2/volumes/1')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.show(req, '1')
expected = self._expected_vol_from_controller(
availability_zone=stubs.DEFAULT_AZ,
volume_type=None, status='in-use',
attachments=[{
'id': '1',
'attachment_id': attachment['id'],
'volume_id': stubs.DEFAULT_VOL_ID,
'server_id': stubs.FAKE_UUID,
'host_name': None,
'device': '/'}],
metadata={'key': 'value', 'readonly': 'True'},
with_migration_status=True)
self.assertEqual(expected, res_dict)
def test_volume_show_with_encrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id='fake_id')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(True, res_dict['volume']['encrypted'])
def test_volume_show_with_unencrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id=None)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(False, res_dict['volume']['encrypted'])
def test_volume_delete(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(202, resp.status_int)
def test_volume_delete_attached(self):
def stub_volume_attached(self, context, volume, force=False):
raise exception.VolumeAttached(volume_id=volume['id'])
self.stubs.Set(volume_api.API, "delete", stub_volume_attached)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
exp = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete,
req, 1)
expect_msg = "Volume cannot be deleted while in attached state"
self.assertEqual(expect_msg, six.text_type(exp))
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1)
def test_admin_list_volumes_limited_to_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v2/fake/volumes',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def _create_volume_bad_request(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_volume_bad_request(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._create_volume_bad_request(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._create_volume_bad_request(body=body)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_string(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'display_name': 'Volume-573108026'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'Volume-573108026'},
viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_list(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'id': "['1', '2', '3']"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'id': ['1', '2', '3']}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_expression(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'name': "d-"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'd-'}, viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_status(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'status': 'available'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'status': 'available'}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_metadata(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'metadata': "{'fake_key': 'fake_value'}"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'metadata': {'fake_key': 'fake_value'}},
viewable_admin_meta=True, offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_availability_zone(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'availability_zone': 'nova'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'availability_zone': 'nova'}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_invalid_filter(self, get_all):
req = mock.MagicMock()
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'invalid_filter': 'invalid',
'availability_zone': 'nova'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'availability_zone': 'nova'}, viewable_admin_meta=True,
offset=0)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_sort_by_name(self, get_all):
"""Name in client means display_name in database."""
req = mock.MagicMock()
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
req.environ = {'cinder.context': ctxt}
req.params = {'sort': 'name'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
ctxt, None, CONF.osapi_max_limit,
sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['display_name'], filters={}, offset=0)
def test_get_volume_filter_options_using_config(self):
self.override_config('query_volume_filters', ['name', 'status',
'metadata'])
self.assertEqual(['name', 'status', 'metadata'],
self.controller._get_volume_filter_options())
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(NS + 'volume', tree.tag)
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'name', 'description', 'volume_type', 'bootable',
'snapshot_id', 'source_volid'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertIn(gr_child.get("key"), not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get('key'))
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
bootable=False,
created_at=timeutils.utcnow(),
attachments=[
dict(
id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo'
)
],
name='vol_name',
description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
source_volid='source_volid',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [
dict(
id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
bootable=True,
created_at=timeutils.utcnow(),
attachments=[
dict(
id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1'
)
],
name='vol1_name',
description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
source_volid=None,
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
bootable=False,
created_at=timeutils.utcnow(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
name='vol2_name',
description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
source_volid=None,
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
},
}
self.assertEqual(expected, request['body'])
def test_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
},
}
self.assertEqual(expected, request['body'])
def test_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
},
}
self.assertEqual(expected, request['body'])
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEqual(expected, request['body'])
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEqual(expected, request['body'])
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(expected, request['body'])
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(expected, request['body'])
def test_imageref(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
imageRef="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_snapshot_id(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
snapshot_id="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_source_volid(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
source_volid="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import merchant_center_link
from google.ads.googleads.v8.services.types import merchant_center_link_service
from .transports.base import (
MerchantCenterLinkServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import MerchantCenterLinkServiceGrpcTransport
class MerchantCenterLinkServiceClientMeta(type):
"""Metaclass for the MerchantCenterLinkService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[MerchantCenterLinkServiceTransport]]
_transport_registry["grpc"] = MerchantCenterLinkServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[MerchantCenterLinkServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MerchantCenterLinkServiceClient(
metaclass=MerchantCenterLinkServiceClientMeta
):
"""This service allows management of links between Google Ads
and Google Merchant Center.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MerchantCenterLinkServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MerchantCenterLinkServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MerchantCenterLinkServiceTransport:
"""Return the transport used by the client instance.
Returns:
MerchantCenterLinkServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def merchant_center_link_path(
customer_id: str, merchant_center_id: str,
) -> str:
"""Return a fully-qualified merchant_center_link string."""
return "customers/{customer_id}/merchantCenterLinks/{merchant_center_id}".format(
customer_id=customer_id, merchant_center_id=merchant_center_id,
)
@staticmethod
def parse_merchant_center_link_path(path: str) -> Dict[str, str]:
"""Parse a merchant_center_link path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/merchantCenterLinks/(?P<merchant_center_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MerchantCenterLinkServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the merchant center link service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MerchantCenterLinkServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MerchantCenterLinkServiceTransport):
# transport is a MerchantCenterLinkServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = MerchantCenterLinkServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def list_merchant_center_links(
self,
request: merchant_center_link_service.ListMerchantCenterLinksRequest = None,
*,
customer_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> merchant_center_link_service.ListMerchantCenterLinksResponse:
r"""Returns Merchant Center links available for this customer.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.ListMerchantCenterLinksRequest`):
The request object. Request message for
[MerchantCenterLinkService.ListMerchantCenterLinks][google.ads.googleads.v8.services.MerchantCenterLinkService.ListMerchantCenterLinks].
customer_id (:class:`str`):
Required. The ID of the customer onto
which to apply the Merchant Center link
list operation.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.ListMerchantCenterLinksResponse:
Response message for
[MerchantCenterLinkService.ListMerchantCenterLinks][google.ads.googleads.v8.services.MerchantCenterLinkService.ListMerchantCenterLinks].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a merchant_center_link_service.ListMerchantCenterLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, merchant_center_link_service.ListMerchantCenterLinksRequest
):
request = merchant_center_link_service.ListMerchantCenterLinksRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_merchant_center_links
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def get_merchant_center_link(
self,
request: merchant_center_link_service.GetMerchantCenterLinkRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> merchant_center_link.MerchantCenterLink:
r"""Returns the Merchant Center link in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetMerchantCenterLinkRequest`):
The request object. Request message for
[MerchantCenterLinkService.GetMerchantCenterLink][google.ads.googleads.v8.services.MerchantCenterLinkService.GetMerchantCenterLink].
resource_name (:class:`str`):
Required. Resource name of the
Merchant Center link.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.MerchantCenterLink:
A data sharing connection, proposed
or in use, between a Google Ads Customer
and a Merchant Center account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a merchant_center_link_service.GetMerchantCenterLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, merchant_center_link_service.GetMerchantCenterLinkRequest
):
request = merchant_center_link_service.GetMerchantCenterLinkRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_merchant_center_link
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_merchant_center_link(
self,
request: merchant_center_link_service.MutateMerchantCenterLinkRequest = None,
*,
customer_id: str = None,
operation: merchant_center_link_service.MerchantCenterLinkOperation = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> merchant_center_link_service.MutateMerchantCenterLinkResponse:
r"""Updates status or removes a Merchant Center link.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `FieldMaskError <>`__
`HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__
`RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateMerchantCenterLinkRequest`):
The request object. Request message for
[MerchantCenterLinkService.MutateMerchantCenterLink][google.ads.googleads.v8.services.MerchantCenterLinkService.MutateMerchantCenterLink].
customer_id (:class:`str`):
Required. The ID of the customer
being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operation (:class:`google.ads.googleads.v8.services.types.MerchantCenterLinkOperation`):
Required. The operation to perform on
the link
This corresponds to the ``operation`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateMerchantCenterLinkResponse:
Response message for Merchant Center
link mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operation]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a merchant_center_link_service.MutateMerchantCenterLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
merchant_center_link_service.MutateMerchantCenterLinkRequest,
):
request = merchant_center_link_service.MutateMerchantCenterLinkRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operation is not None:
request.operation = operation
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_merchant_center_link
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("MerchantCenterLinkServiceClient",)
|
|
import datetime
import random
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.hashcompat import sha_constructor
from django.utils.translation import ugettext_lazy as _
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
# Little hack that checks for a settings.SEND_MAIL variable
# If True, it generates a code and sends an email with the activation link
# Otherwise the user will be activated right away without needing an email confirmation
send_email = getattr(settings, 'SEND_MAIL', True)
if send_email:
new_user.is_active = False
registration_profile = self.create_profile(new_user)
registration_profile.send_activation_email(site)
else:
new_user.is_active = True
new_user.save()
return new_user
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = sha_constructor(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = sha_constructor(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime.datetime.now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
|
from math import ceil
from dash.orgs.models import Org
from django.contrib.auth.models import User
from django.db import connection, models
from django.db.models import Index, Q, Sum
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy as _
from casepro.cases.models import CaseAction, Partner
from casepro.msgs.models import Label
from casepro.utils import date_range
from casepro.utils.export import BaseExport
def datetime_to_date(dt, org):
"""
Convert a datetime to a date using the given org's timezone
"""
return dt.astimezone(org.timezone).date()
class BaseCount(models.Model):
"""
Tracks total counts of different items (e.g. replies, messages) in different scopes (e.g. org, user)
"""
TYPE_INCOMING = "I"
TYPE_INBOX = "N"
TYPE_ARCHIVED = "A"
TYPE_REPLIES = "R"
TYPE_CASE_OPENED = "C"
TYPE_CASE_CLOSED = "D"
id = models.BigAutoField(auto_created=True, primary_key=True)
squash_sql = """
WITH removed as (
DELETE FROM %(table_name)s WHERE %(delete_cond)s RETURNING "count"
)
INSERT INTO %(table_name)s(%(insert_cols)s, "count", "is_squashed")
VALUES (%(insert_vals)s, GREATEST(0, (SELECT SUM("count") FROM removed)), TRUE);"""
item_type = models.CharField(max_length=1)
scope = models.CharField(max_length=32)
count = models.IntegerField()
is_squashed = models.BooleanField(default=False)
@staticmethod
def encode_scope(*args):
types = []
for arg in args:
# request.user is actually a SimpleLazyObject proxy
if isinstance(arg, User) and isinstance(arg, SimpleLazyObject):
arg = User(pk=arg.pk)
types.append(type(arg))
if types == [Org]:
return "org:%d" % args[0].pk
elif types == [Partner]:
return "partner:%d" % args[0].pk
elif types == [Org, User]:
return "org:%d:user:%d" % (args[0].pk, args[1].pk)
elif types == [Label]:
return "label:%d" % args[0].pk
else: # pragma: no cover
raise ValueError("Unsupported scope: %s" % ",".join([t.__name__ for t in types]))
@classmethod
def squash(cls):
"""
Squashes counts so that there is a single count per item_type + scope combination
"""
unsquashed_values = cls.objects.filter(is_squashed=False)
unsquashed_values = unsquashed_values.values(*cls.squash_over).distinct(*cls.squash_over)
for unsquashed in unsquashed_values:
with connection.cursor() as cursor:
sql = cls.squash_sql % {
"table_name": cls._meta.db_table,
"delete_cond": " AND ".join(['"%s" = %%s' % f for f in cls.squash_over]),
"insert_cols": ", ".join(['"%s"' % f for f in cls.squash_over]),
"insert_vals": ", ".join(["%s"] * len(cls.squash_over)),
}
params = [unsquashed[f] for f in cls.squash_over]
cursor.execute(sql, params + params)
class CountSet(object):
"""
A queryset of counts which can be aggregated in different ways
"""
def __init__(self, counts, scopes):
self.counts = counts
self.scopes = scopes
def total(self):
"""
Calculates the overall total over a set of counts
"""
total = self.counts.aggregate(total=Sum("count"))
return total["total"] if total["total"] is not None else 0
def scope_totals(self):
"""
Calculates per-scope totals over a set of counts
"""
totals = list(self.counts.values_list("scope").annotate(replies=Sum("count")))
total_by_encoded_scope = {t[0]: t[1] for t in totals}
total_by_scope = {}
for encoded_scope, scope in self.scopes.items():
total_by_scope[scope] = total_by_encoded_scope.get(encoded_scope, 0)
return total_by_scope
class Meta:
abstract = True
class BaseSecondTotal(BaseCount):
"""
Tracks total seconds and counts of different items (e.g. time since assigned ) in different scopes (e.g. org, user)
"""
TYPE_TILL_REPLIED = "A"
TYPE_TILL_CLOSED = "C"
squash_sql = """
WITH removed as (
DELETE FROM %(table_name)s WHERE %(delete_cond)s RETURNING "count", "seconds"
)
INSERT INTO %(table_name)s(%(insert_cols)s, "count", "seconds", "is_squashed")
VALUES (
%(insert_vals)s,
GREATEST(0, (SELECT SUM("count") FROM removed)),
COALESCE((SELECT SUM("seconds") FROM removed), 0),
TRUE
);"""
seconds = models.BigIntegerField()
class CountSet(BaseCount.CountSet):
"""
A queryset of counts which can be aggregated in different ways
"""
def average(self):
"""
Calculates the overall total over a set of counts
"""
totals = self.counts.aggregate(total=Sum("count"), seconds=Sum("seconds"))
if totals["seconds"] is None or totals["total"] is None:
return 0
average = float(totals["seconds"]) / totals["total"]
return average
def seconds(self):
"""
Calculates the overall total of seconds over a set of counts
"""
total = self.counts.aggregate(total_seconds=Sum("seconds"))
return total["total_seconds"] if total["total_seconds"] is not None else 0
def scope_averages(self):
"""
Calculates per-scope averages over a set of counts
"""
totals = list(self.counts.values("scope").annotate(cases=Sum("count"), seconds=Sum("seconds")))
total_by_encoded_scope = {t["scope"]: (t["cases"], t["seconds"]) for t in totals}
average_by_scope = {}
for encoded_scope, scope in self.scopes.items():
cases, seconds = total_by_encoded_scope.get(encoded_scope, (1, 0))
average_by_scope[scope] = float(seconds) / cases
return average_by_scope
def day_totals(self):
"""
Calculates per-day totals over a set of counts
"""
return list(
self.counts.values_list("day").annotate(cases=Sum("count"), seconds=Sum("seconds")).order_by("day")
)
def month_totals(self):
"""
Calculates per-month totals over a set of counts
"""
counts = self.counts.extra(select={"month": 'EXTRACT(month FROM "day")'})
return list(
counts.values_list("month").annotate(cases=Sum("count"), seconds=Sum("seconds")).order_by("month")
)
class Meta:
abstract = True
class TotalCount(BaseCount):
"""
Tracks total counts of different items (e.g. replies, messages) in different scopes (e.g. org, user)
"""
squash_over = ("item_type", "scope")
@classmethod
def record_item(cls, item_type, *scope_args):
cls.objects.create(item_type=item_type, scope=cls.encode_scope(*scope_args), count=1)
@classmethod
def get_by_org(cls, orgs, item_type):
return cls._get_count_set(item_type, {cls.encode_scope(o): o for o in orgs})
@classmethod
def get_by_partner(cls, partners, item_type):
return cls._get_count_set(item_type, {cls.encode_scope(p): p for p in partners})
@classmethod
def get_by_user(cls, org, users, item_type):
return cls._get_count_set(item_type, {cls.encode_scope(org, u): u for u in users})
@classmethod
def get_by_label(cls, labels, item_type):
return cls._get_count_set(item_type, {cls.encode_scope(l): l for l in labels})
@classmethod
def _get_count_set(cls, item_type, scopes):
counts = cls.objects.filter(item_type=item_type)
if scopes:
counts = counts.filter(scope__in=scopes.keys())
return BaseCount.CountSet(counts, scopes)
class Meta:
index_together = ("item_type", "scope")
indexes = [
Index(name="stats_totalcount_unsquashed", fields=("item_type", "scope"), condition=Q(is_squashed=False))
]
class DailyCount(BaseCount):
"""
Tracks per-day counts of different items (e.g. replies, messages) in different scopes (e.g. org, user)
"""
day = models.DateField(help_text=_("The day this count is for"))
squash_over = ("day", "item_type", "scope")
@classmethod
def record_item(cls, day, item_type, *scope_args):
cls.objects.create(day=day, item_type=item_type, scope=cls.encode_scope(*scope_args), count=1)
@classmethod
def record_removal(cls, day, item_type, *scope_args):
cls.objects.create(day=day, item_type=item_type, scope=cls.encode_scope(*scope_args), count=-1)
@classmethod
def get_by_org(cls, orgs, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(o): o for o in orgs}, since, until)
@classmethod
def get_by_partner(cls, partners, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(p): p for p in partners}, since, until)
@classmethod
def get_by_user(cls, org, users, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(org, u): u for u in users}, since, until)
@classmethod
def get_by_label(cls, labels, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(l): l for l in labels}, since, until)
@classmethod
def _get_count_set(cls, item_type, scopes, since, until):
counts = cls.objects.filter(item_type=item_type)
if scopes:
counts = counts.filter(scope__in=scopes.keys())
if since:
counts = counts.filter(day__gte=since)
if until:
counts = counts.filter(day__lt=until)
return DailyCount.CountSet(counts, scopes)
class CountSet(BaseCount.CountSet):
"""
A queryset of counts which can be aggregated in different ways
"""
def day_totals(self):
"""
Calculates per-day totals over a set of counts
"""
return list(self.counts.values_list("day").annotate(total=Sum("count")).order_by("day"))
def month_totals(self):
"""
Calculates per-month totals over a set of counts
"""
counts = self.counts.extra(select={"month": 'EXTRACT(month FROM "day")'})
return list(counts.values_list("month").annotate(replies=Sum("count")).order_by("month"))
class Meta:
index_together = ("item_type", "scope", "day")
indexes = [
Index(
name="stats_dailycount_unsquashed",
fields=("item_type", "scope", "day"),
condition=Q(is_squashed=False),
)
]
class DailyCountExport(BaseExport):
"""
Exports based on daily counts. Each row is date and columns are different scopes.
"""
TYPE_LABEL = "L"
TYPE_PARTNER = "P"
TYPE_USER = "U"
type = models.CharField(max_length=1)
since = models.DateField()
until = models.DateField()
directory = "daily_count_export"
download_view = "statistics.dailycountexport_read"
@classmethod
def create(cls, org, user, of_type, since, until):
return cls.objects.create(org=org, created_by=user, type=of_type, since=since, until=until)
def render_book(self, book):
if self.type == self.TYPE_LABEL:
sheet = book.add_sheet(str(_("Incoming Messages")))
labels = list(Label.get_all(self.org).order_by("name"))
# get each label's day counts and organise by label and day
totals_by_label = {}
for label in labels:
totals = DailyCount.get_by_label(
[label], DailyCount.TYPE_INCOMING, self.since, self.until
).day_totals()
totals_by_label[label] = {t[0]: t[1] for t in totals}
self.write_row(sheet, 0, ["Date"] + [l.name for l in labels])
row = 1
for day in date_range(self.since, self.until):
totals = [totals_by_label.get(l, {}).get(day, 0) for l in labels]
self.write_row(sheet, row, [day] + totals)
row += 1
elif self.type == self.TYPE_USER:
replies_sheet = book.add_sheet(str(_("Replies Sent")))
cases_opened_sheet = book.add_sheet(str(_("Cases Opened")))
cases_closed_sheet = book.add_sheet(str(_("Cases Closed")))
users = self.org.get_org_users().order_by("profile__full_name")
replies_totals_by_user = {}
cases_opened_by_user = {}
cases_closed_by_user = {}
for user in users:
replies_totals = DailyCount.get_by_user(
self.org, [user], DailyCount.TYPE_REPLIES, self.since, self.until
).day_totals()
cases_opened_totals = DailyCount.get_by_user(
self.org, [user], DailyCount.TYPE_CASE_OPENED, self.since, self.until
).day_totals()
cases_closed_totals = DailyCount.get_by_user(
self.org, [user], DailyCount.TYPE_CASE_CLOSED, self.since, self.until
).day_totals()
replies_totals_by_user[user] = {t[0]: t[1] for t in replies_totals}
cases_opened_by_user[user] = {t[0]: t[1] for t in cases_opened_totals}
cases_closed_by_user[user] = {t[0]: t[1] for t in cases_closed_totals}
self.write_row(replies_sheet, 0, ["Date"] + [u.get_full_name() for u in users])
self.write_row(cases_opened_sheet, 0, ["Date"] + [u.get_full_name() for u in users])
self.write_row(cases_closed_sheet, 0, ["Date"] + [u.get_full_name() for u in users])
row = 1
for day in date_range(self.since, self.until):
replies_totals = [replies_totals_by_user.get(u, {}).get(day, 0) for u in users]
cases_opened_totals = [cases_opened_by_user.get(u, {}).get(day, 0) for u in users]
cases_closed_totals = [cases_closed_by_user.get(u, {}).get(day, 0) for u in users]
self.write_row(replies_sheet, row, [day] + replies_totals)
self.write_row(cases_opened_sheet, row, [day] + cases_opened_totals)
self.write_row(cases_closed_sheet, row, [day] + cases_closed_totals)
row += 1
elif self.type == self.TYPE_PARTNER:
replies_sheet = book.add_sheet(str(_("Replies Sent")))
ave_sheet = book.add_sheet(str(_("Average Reply Time")))
ave_closed_sheet = book.add_sheet(str(_("Average Closed Time")))
cases_opened_sheet = book.add_sheet(str(_("Cases Opened")))
cases_closed_sheet = book.add_sheet(str(_("Cases Closed")))
partners = list(Partner.get_all(self.org).order_by("name"))
# get each partner's day counts and organise by partner and day
replies_totals_by_partner = {}
cases_opened_by_partner = {}
cases_closed_by_partner = {}
replied_averages_by_partner = {}
closed_averages_by_partner = {}
for partner in partners:
replies_totals = DailyCount.get_by_partner(
[partner], DailyCount.TYPE_REPLIES, self.since, self.until
).day_totals()
cases_opened_totals = DailyCount.get_by_partner(
[partner], DailyCount.TYPE_CASE_OPENED, self.since, self.until
).day_totals()
cases_closed_totals = DailyCount.get_by_partner(
[partner], DailyCount.TYPE_CASE_CLOSED, self.since, self.until
).day_totals()
replies_totals_by_partner[partner] = {t[0]: t[1] for t in replies_totals}
cases_opened_by_partner[partner] = {t[0]: t[1] for t in cases_opened_totals}
cases_closed_by_partner[partner] = {t[0]: t[1] for t in cases_closed_totals}
replied_second_totals = DailySecondTotalCount.get_by_partner(
[partner], DailySecondTotalCount.TYPE_TILL_REPLIED, self.since, self.until
).day_totals()
replied_averages_by_partner[partner] = {t[0]: (float(t[2]) / t[1]) for t in replied_second_totals}
closed_second_totals = DailySecondTotalCount.get_by_partner(
[partner], DailySecondTotalCount.TYPE_TILL_CLOSED, self.since, self.until
).day_totals()
closed_averages_by_partner[partner] = {t[0]: (float(t[2]) / t[1]) for t in closed_second_totals}
self.write_row(replies_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(cases_opened_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(cases_closed_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(ave_sheet, 0, ["Date"] + [p.name for p in partners])
self.write_row(ave_closed_sheet, 0, ["Date"] + [p.name for p in partners])
row = 1
for day in date_range(self.since, self.until):
replies_totals = [replies_totals_by_partner.get(l, {}).get(day, 0) for l in partners]
cases_opened_totals = [cases_opened_by_partner.get(l, {}).get(day, 0) for l in partners]
cases_closed_totals = [cases_closed_by_partner.get(l, {}).get(day, 0) for l in partners]
replied_averages = [replied_averages_by_partner.get(l, {}).get(day, 0) for l in partners]
closed_averages = [closed_averages_by_partner.get(l, {}).get(day, 0) for l in partners]
self.write_row(replies_sheet, row, [day] + replies_totals)
self.write_row(cases_opened_sheet, row, [day] + cases_opened_totals)
self.write_row(cases_closed_sheet, row, [day] + cases_closed_totals)
self.write_row(ave_sheet, row, [day] + replied_averages)
self.write_row(ave_closed_sheet, row, [day] + closed_averages)
row += 1
class DailySecondTotalCount(BaseSecondTotal):
"""
Tracks total seconds and count of different items in different scopes (e.g. org, user)
"""
day = models.DateField(help_text=_("The day this count is for"))
squash_over = ("day", "item_type", "scope")
@classmethod
def record_item(cls, day, seconds, item_type, *scope_args):
cls.objects.create(day=day, item_type=item_type, scope=cls.encode_scope(*scope_args), count=1, seconds=seconds)
@classmethod
def get_by_org(cls, orgs, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(o): o for o in orgs}, since, until)
@classmethod
def get_by_partner(cls, partners, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(p): p for p in partners}, since, until)
@classmethod
def get_by_user(cls, org, users, item_type, since=None, until=None):
return cls._get_count_set(item_type, {cls.encode_scope(org, u): u for u in users}, since, until)
@classmethod
def _get_count_set(cls, item_type, scopes, since, until):
counts = cls.objects.filter(item_type=item_type)
if scopes:
counts = counts.filter(scope__in=scopes.keys())
if since:
counts = counts.filter(day__gte=since)
if until:
counts = counts.filter(day__lt=until)
return DailySecondTotalCount.CountSet(counts, scopes)
def record_case_closed_time(close_action):
org = close_action.case.org
user = close_action.created_by
partner = close_action.case.assignee
case = close_action.case
day = datetime_to_date(close_action.created_on, close_action.case.org)
# count the time to close on an org level
td = close_action.created_on - case.opened_on
seconds_since_open = ceil(td.total_seconds())
DailySecondTotalCount.record_item(day, seconds_since_open, DailySecondTotalCount.TYPE_TILL_CLOSED, org)
# count the time since case was last assigned to this partner till it was closed
if user.partners.filter(id=partner.id).exists():
# count the time since this case was (re)assigned to this partner
try:
action = case.actions.filter(action=CaseAction.REASSIGN, assignee=partner).latest("created_on")
start_date = action.created_on
except CaseAction.DoesNotExist:
start_date = case.opened_on
td = close_action.created_on - start_date
seconds_since_open = ceil(td.total_seconds())
DailySecondTotalCount.record_item(day, seconds_since_open, DailySecondTotalCount.TYPE_TILL_CLOSED, partner)
|
|
import logging
import os
import re
from tempfile import mkdtemp
from os import getcwd
from simgen.utils import searchpath
GITHUB_URL_PATTERN = r'https://(?P<domain>.+?)/(?P<owner>.+?)/(?P<repo>.+?)(.git)?(?P<path>/.+)?$'
_PATTERN = re.compile(GITHUB_URL_PATTERN)
log = logging.getLogger(__file__)
class Error(Exception):
pass
# def _sync_repo(local_root_dir, repo_org, repo_name, parent_repo_org=None, parent_repo_name=None):
# """Clone github repo to local folder, or pull if a local repo already exists"""
# # adapted from https://gist.github.com/kennethreitz/619473
#
# repo_org = repo_org.lower()
# repo_name = repo_name.lower()
#
# # store the current directory
# cwd = os.getcwd()
#
# if not local_root_dir:
# local_root_dir = mkdtemp()
#
# # create local root directory (safely)
# try:
# os.makedirs(local_root_dir)
# except OSError:
# pass
# os.chdir(local_root_dir)
#
# # create org directory (safely)
# try:
# os.makedirs(repo_org)
# except OSError:
# pass
#
# # enter org dir
# os.chdir(repo_org)
#
# if os.path.exists(repo_name):
# # do a pull
# os.chdir(repo_name)
# repo_dir = os.getcwd()
# print('Updating repo: {}'.format(repo_name))
# os.system('git pull')
#
# if parent_repo_org and parent_repo_name:
# print('Adding upstream: {}/{}'.format(parent_repo_org,parent_repo_name))
# os.system('git remote add upstream [email protected]:{}/{}.git'.format(parent_repo_org,parent_repo_name))
#
# os.chdir('..')
#
# else:
# # do a clone
# print('Cloning repo: {}/{}'.format(repo_org, repo_name))
# os.system('git clone [email protected]:{}/{}.git'.format(repo_org, repo_name))
# print ('git clone [email protected]:{}/{}.git'.format(repo_org, repo_name))
#
# os.chdir(repo_name)
# repo_dir = os.getcwd()
#
# if parent_repo_org and parent_repo_name:
# print('Adding upstream: {}/{}'.format(parent_repo_org, parent_repo_name))
# os.system('git remote add upstream [email protected]:{}/{}.git'.format(parent_repo_org, parent_repo_name))
#
# os.chdir('..')
#
# # cd back to the original working directory
# os.chdir(cwd)
#
# # return the repo directory
# return repo_dir
#
#
# def sync_repo(local_root_dir, repo_url, parent_repo_url=None):
# """Clone github repo to local folder, or pull if a local repo already exists"""
#
# if not validate_github_url(repo_url):
# raise Error("Invalid github repo url: {}".format(repo_url))
#
# (owner, repo, path) = parse_github_url(repo_url)
# if not parent_repo_url:
# return _sync_repo(local_root_dir, owner, repo)
# else:
# if not validate_github_url(parent_repo_url):
# raise Error("Invalid github parent repo url: {}".format(parent_repo_url))
#
# (parent_owner, parent_repo, parent_path) = parse_github_url(parent_repo_url)
# return _sync_repo(local_root_dir, owner, repo, parent_repo_org=parent_owner, parent_repo_name=parent_repo)
def parse_github_url(gh_path):
"""Get owner, repo name, and optionally a path to a file therein from a github url"""
result = _PATTERN.match(gh_path)
if not result:
return None
else:
return (result.group('owner').lower(), result.group('repo').lower(), result.group('path'))
def validate_github_url(gh_path):
return parse_github_url(gh_path) is not None
def make_github_url(owner, repo, path=''):
return 'https://github.com/{owner}/{repo}.git{path}'.format(owner=owner.lower(), repo=repo.lower(), path=path)
def split_github_path(gh_path):
"""Given an url that points to a folder or file in github, split it to project URL and path"""
(owner, repo, path) = parse_github_url(gh_path)
return make_github_url(owner, repo), path
# def mixed_to_local_path(mixed_path, local_root_dir=None):
# """Convert mixed path to local path, cloning/pulling github repos as necessary."""
#
# local_path = []
#
# for path_elem in mixed_path:
# if validate_github_url(path_elem):
#
# (repo_url, path_in_repo) = split_github_path(path_elem)
#
# local_repo_dir = sync_repo(local_root_dir, repo_url=repo_url)
# print('Local repo dir: {}'.format(local_repo_dir))
# if local_repo_dir:
# # clone/pull is successfule
# if path_in_repo:
# # make sure we remove the / from the beginning of the path in repo
# local_path.append(os.path.join(local_repo_dir, path_in_repo[1:]))
# else:
# local_path.append(local_repo_dir)
# else:
# # error cloning repo... print error message?
# pass
# else:
# # path element is a local directory
# local_path.append(path_elem)
#
# return local_path
# def find_file(seekName, mixed_path, implicitExt='', local_root_dir=None):
# """Given a pathsep-delimited path string or list of directories or github URLs, find seekName.
# Returns path to seekName if found, otherwise None.
# Also allows for files with implicit extensions (eg, .exe, or ['.yml','.yaml']),
# returning the absolute path of the file found.
# >>> find_file('ls', '/usr/bin:/bin', implicitExt='.exe')
# '/bin/ls'
# """
# local_path = mixed_to_local_path(mixed_path, local_root_dir)
# return searchpath.find_file(seekName, local_path, implicitExt)
class Loader(object):
def __init__(self, local_root_dir=None):
if not local_root_dir:
local_root_dir = mkdtemp()
self.local_root_dir = local_root_dir
log.info('local_root_dir is {}'.format(self.local_root_dir))
self.gh_to_local_map = dict()
def add_repo(self, repo_url, local_repo_path):
if not validate_github_url(repo_url):
raise Error('Not a valid github repo URL: {}'.repo_url)
if not os.path.exists(local_repo_path):
raise Error('Local path does not exist: {}'.local_repo_path)
(repo_org, repo_name, _) = parse_github_url(repo_url)
self.gh_to_local_map[make_github_url(repo_org.lower(), repo_name.lower())] = local_repo_path
def sync_repo(self, repo_url, local_repo_path=None):
if not validate_github_url(repo_url):
raise Error('Not a valid github repo URL: {}'.repo_url)
(repo_org, repo_name, _) = parse_github_url(repo_url)
if local_repo_path is None:
# default to local_root_dir/owner/repo
# store the current directory
cwd = os.getcwd()
if not self.local_root_dir:
self.local_root_dir = mkdtemp()
# create local root directory (safely)
try:
os.makedirs(self.local_root_dir)
except OSError:
pass
os.chdir(self.local_root_dir)
# create owner directory (safely)
try:
os.makedirs(repo_org)
except OSError:
pass
os.chdir(repo_org)
if os.path.exists(repo_name):
# do a pull
os.chdir(repo_name)
print('Updating repo: {}'.format(repo_name))
os.system('git pull')
else:
# do a clone
print('Cloning repo: {}/{}'.format(repo_org, repo_name))
os.system('git clone https://github.com/{}/{}.git'.format(repo_org, repo_name))
print ('git clone https://github.com/{}/{}.git'.format(repo_org, repo_name))
os.chdir(repo_name)
local_repo_path = os.getcwd()
else:
if os.path.exists(local_repo_path):
# do a pull
os.chdir(local_repo_path)
print('Updating repo: {}'.format(repo_name))
os.system('git pull')
else:
# create local_repo_path if it does not exist yet
os.makedirs(local_repo_path, exist_ok=True)
# do a clone
print('Cloning repo: {}/{} to {}'.format(repo_org, repo_name, local_repo_path))
os.system('git clone [email protected]:{}/{}.git {}'.format(repo_org, repo_name, local_repo_path))
print ('git clone [email protected]:{}/{}.git {}'.format(repo_org, repo_name), local_repo_path)
# remember the github to local path mapping
self.add_repo(repo_url, local_repo_path)
os.chdir(cwd)
return local_repo_path
def mixed_to_local_path(self, mixed_path):
"""Convert mixed path to local path, cloning/pulling github repos as necessary."""
if isinstance(mixed_path, (list, tuple)):
path_parts = mixed_path
else:
path_parts = mixed_path.split(os.pathsep)
log.debug("Converting mixed path to local: {}".format(path_parts))
local_path = []
for path_elem in path_parts:
if validate_github_url(path_elem):
# it's a github url
(repo_url, path_in_repo) = split_github_path(path_elem)
if repo_url in self.gh_to_local_map:
local_repo_dir = self.gh_to_local_map[repo_url]
else:
local_repo_dir = self.sync_repo(repo_url)
if path_in_repo:
local_repo_path = os.path.join(local_repo_dir, path_in_repo[1:])
else:
local_repo_path = local_repo_dir
# log.debug('Local repo dir of {} is {}'.format(path_elem, local_repo_path))
local_path.append(local_repo_path)
# log.debug("Local path is: {} ".format(local_path))
else:
# path element is a local directory
local_path.append(path_elem)
return local_path
def find_file(self, file_name, mixed_path=None, implicit_ext=''):
# import pdb; pdb.set_trace()
if not mixed_path:
mixed_path = []
if validate_github_url(file_name):
# if file_name is an absolute github path, split it into base url and path, prepending mixed_path with base_url
repo_url, file_path = split_github_path(file_name)
mixed_path.insert(0, repo_url)
file_name = file_path[1:]
elif file_name.startswith('/'):
# if file_name is an absolute local path, split it into dir and file_name, prepending mixed_path with dir
abs_dir, file_name = os.path.split(file_name)
mixed_path.insert(0, abs_dir)
local_path = self.mixed_to_local_path(mixed_path)
log.debug('Mixed path is: {}'.format(local_path))
log.debug('Local path is: {}'.format(local_path))
return searchpath.find_file(file_name, local_path, implicit_ext)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=0)
r = split_github_path('https://github.com/iModels/concept-creation/n/o')
print(r)
r = split_github_path('https://github.com/iModels/concept-creation.git/n/o')
print(r)
r = split_github_path('https://github.com/iModels/concept-creation')
print(r)
session = Loader()
local_file = session.find_file('Default', ['https://github.com/iModels/concept-creation/code'], implicit_ext=['.yaml', '.yml'])
print('Local file: {}'.format(local_file))
|
|
from twisted.internet.error import ReactorAlreadyInstalledError
from zmq.eventloop import ioloop
ioloop.install()
from tornado.ioloop import IOLoop
import tornado.platform.twisted
try:
tornado.platform.twisted.install()
except ReactorAlreadyInstalledError:
pass
#from jupyter_client.blocking.client import BlockingKernelClient
from .client import BlockingKernelClient
from ipykernel.jsonutil import json_clean
from twisted.python import log
from twisted.internet import threads
from twisted.internet.defer import inlineCallbacks, returnValue, CancelledError, DeferredLock
from twisted.internet.task import LoopingCall
from twisted.internet.error import ConnectionRefusedError
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner, Service
from autobahn import wamp
from autobahn.wamp.exception import ApplicationError
from txzmq import ZmqEndpoint, ZmqFactory, ZmqSubConnection
import json
import sys
import argparse
from pprint import pformat
try:
from queue import Empty # Python 3
except ImportError:
from Queue import Empty # Python 2
if sys.version.startswith("3"):
unicode = str
_zmq_factory = ZmqFactory()
def cleanup(proto):
if hasattr(proto, '_session') and proto._session is not None:
if proto._session.is_attached():
return proto._session.leave()
elif proto._session.is_connected():
return proto._session.disconnect()
class ZmqProxyConnection(ZmqSubConnection):
def __init__(self, endpoint, wamp_session, prefix):
self._endpoint = endpoint
self._wamp = wamp_session
self._prefix = prefix
ZmqSubConnection.__init__(self, _zmq_factory, ZmqEndpoint('connect', endpoint.encode("utf-8")))
self.subscribe(b"")
def gotMessage(self, message, header=""):
# log.msg("[MachineConnection] {} {}".format(header, message))
self._wamp.publish(self._prefix, [str(header), json.loads(message.decode("utf-8"))])
def build_bridge_class(client):
_key = client.session.key.decode("utf-8")
class JupyterClientWampBridge(ApplicationSession):
iopub_deferred = None
prefix_list = set()
machine_connection = None
_lock = DeferredLock()
_has_been_pinged = False
_has_timedout = False
@wamp.register(u"io.timbr.kernel.{}.execute".format(_key))
@inlineCallbacks
def execute(self, *args, **kwargs):
result = yield client.execute(*args, **kwargs)
returnValue(result)
@wamp.register(u"io.timbr.kernel.{}.execute_interactive".format(_key))
@inlineCallbacks
def execute_interactive(self, *args, **kwargs):
result = yield self._lock.run(threads.deferToThread, client.execute_interactive, *args, **kwargs)
returnValue(json_clean(result))
@wamp.register(u"io.timbr.kernel.{}.complete_interactive".format(_key))
@inlineCallbacks
def complete_interactive(self, *args, **kwargs):
result = yield self._lock.run(threads.deferToThread, client.interactive, client.complete, *args, **kwargs)
returnValue(json_clean(result))
@wamp.register(u"io.timbr.kernel.{}.complete".format(_key))
@inlineCallbacks
def complete(self, *args, **kwargs):
result = yield client.complete(*args, **kwargs)
returnValue(result)
@wamp.register(u"io.timbr.kernel.{}.inspect".format(_key))
@inlineCallbacks
def inspect(self, *args, **kwargs):
result = yield client.inspect(*args, **kwargs)
returnValue(result)
@wamp.register(u"io.timbr.kernel.{}.history".format(_key))
@inlineCallbacks
def history(self, *args, **kwargs):
result = yield client.history(*args, **kwargs)
returnValue(result)
@wamp.register(u"io.timbr.kernel.{}.is_complete".format(_key))
@inlineCallbacks
def is_complete(self, *args, **kwargs):
result = yield client.is_complete(*args, **kwargs)
returnValue(result)
@wamp.register(u"io.timbr.kernel.{}.shutdown".format(_key))
@inlineCallbacks
def shutdown(self, *args, **kwargs):
result = yield client.shutdown(*args, **kwargs)
returnValue(result)
@wamp.register(u"io.timbr.kernel.{}.list".format(_key))
def list(self):
return list(self.prefix_list)
# This is relies heavily on shell_channel property on the client
# need to pay attention to Jupyter.client if/when this changes...
@wamp.register(u"io.timbr.kernel.{}.comm_msg".format(_key))
def comm_msg(self, *args, **kwargs):
msg = kwargs.get('msg', {})
log.msg("[comm_msg] {}".format(pformat(json_clean(msg))))
return client.shell_channel.send(msg)
@inlineCallbacks
def proxy_iopub_channel(self):
while True:
try:
msg = client.get_iopub_msg(block=False)
if(not msg["content"].get("metadata", {}).get("echo", False)):
log.msg("[iopub] {}".format(pformat(json_clean(msg))))
yield self.publish(u"io.timbr.kernel.{}.iopub".format(_key), json_clean(msg))
except ValueError as ve:
# This happens when an "invalid signature" is encountered which for us probably
# means that the message did not originate from this kernel
log.msg("ValueError")
except Empty:
yield sleep(0.1)
def proxy_machine_channel(self):
"""
If there is a timbr-machine zeromq pub channel present for this kernel_id it will be
proxied over the WAMP connection at io.timbr.kernel.<kernel_id>.machine
"""
ipc_endpoint = "ipc:///tmp/timbr-machine/{}".format(_key) # NOTE: Breaks Windows compatibility
prefix = "io.timbr.kernel.{}.machine".format(_key)
self.machine_connection = ZmqProxyConnection(ipc_endpoint, self, prefix)
@wamp.register(u"io.timbr.kernel.{}.ping".format(_key))
def ping(self):
self._has_been_pinged = True
return client.is_alive()
@inlineCallbacks
def is_active(self, prefix):
try:
response = yield self.call(u"{}.ping".format(prefix))
# log.msg("Ping response {}".format(response))
returnValue(response)
except ApplicationError:
returnValue(False)
def on_discovery(self, prefix):
self.prefix_list.add(prefix)
@inlineCallbacks
def update_discovery(self):
my_prefix = u"io.timbr.kernel.{}".format(_key)
yield self.publish(u"io.timbr.kernel.discovery", my_prefix)
prefix_list = list(self.prefix_list)
active_prefix_list = []
for prefix in prefix_list:
# log.msg("Checking prefix {}".format(prefix))
# NOTE: Don't think this works and may be sensitive to timeout
is_active = yield self.is_active(prefix)
# log.msg("is_active set to {}".format(is_active))
if is_active is True:
active_prefix_list.append(prefix)
self.prefix_list = set(active_prefix_list)
try:
yield self.register(self.list, u"io.timbr.kernel.list")
except ApplicationError:
pass
# log.msg("Prefix list is now {}".format(str(self.prefix_list)))
returnValue(self.prefix_list)
@inlineCallbacks
def onJoin(self, details):
log.msg("[onJoin] Registering WAMP methods...")
yield self.register(self)
log.msg("[onJoin] ...done.")
log.msg("[onJoin] Updating kernel discovery mechanism")
yield self.subscribe(self.on_discovery, u"io.timbr.kernel.discovery")
self.discovery_task = LoopingCall(self.update_discovery)
self.discovery_task.start(3) # loop every 3 seconds
log.msg("[onJoin] Establishing Pub/Sub Channels...")
try:
self.iopub_deferred.cancel()
except (CancelledError, AttributeError):
pass
finally:
self.iopub_deferred = self.proxy_iopub_channel()
try:
self.machine_connection.shutdown()
except AttributeError:
pass
#finally:
#self.proxy_machine_channel()
log.msg("[onJoin] ...done.")
log.msg(client.hb_channel._running)
@inlineCallbacks
def onLeave(self, details):
try:
yield self.machine_connection.shutdown()
except AttributeError:
pass
yield self.discovery_task.stop()
super(self.__class__, self).onLeave(details)
def onDisconnect(self):
log.msg("[onDisconnect] ...")
log.msg("Attempting to reconnect ...")
return JupyterClientWampBridge
def main():
global _bridge_runner
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="Enable debug output.")
# NOTE: all of these are placeholders
parser.add_argument("--wamp-realm", default=u"jupyter", help='Router realm')
parser.add_argument("--wamp-url", default=u"ws://127.0.0.1:8123", help="WAMP Websocket URL")
parser.add_argument("--token", type=unicode, help="OAuth token to connect to router")
parser.add_argument("--auto-shutdown", action="store_true",
default=False, help="When set, disconnect and cleanup Wamp session when heartbeat times out and then stop the IOLoop")
parser.add_argument("--hb-interval", type=int, default=30, help="The heartbeat interval used when auto-shutdown is set")
parser.add_argument("file", help="Connection file")
args = parser.parse_args()
if args.debug:
try:
log.startLogging(open('/home/gremlin/wamp.log', 'w'), setStdout=False)
except IOError:
pass
with open(args.file) as f:
config = json.load(f)
client = BlockingKernelClient(connection_file=args.file)
client.load_connection_file()
client.start_channels()
_bridge_runner = ApplicationRunner(url=unicode(args.wamp_url), realm=unicode(args.wamp_realm),
headers={"Authorization": "Bearer {}".format(args.token),
"X-Kernel-ID": client.session.key})
log.msg("Connecting to router: %s" % args.wamp_url)
log.msg(" Project Realm: %s" % (args.wamp_realm))
def heartbeat(proto):
if hasattr(proto, '_session') and proto._session is not None:
if not proto._session._has_been_pinged:
proto._session._has_timedout = True
else:
proto._session._has_been_pinged = False
@inlineCallbacks
def reconnector(shutdown_on_timeout):
while True:
try:
hb = None
log.msg("Attempting to connect...")
wampconnection = yield _bridge_runner.run(build_bridge_class(client), start_reactor=False)
hb = LoopingCall(heartbeat, (wampconnection))
hb.start(args.hb_interval, now=False)
log.msg(wampconnection)
yield sleep(10.0) # Give the connection time to set _session
while wampconnection.isOpen():
if shutdown_on_timeout:
if wampconnection._session._has_timedout:
hb.stop()
res = yield cleanup(wampconnection)
returnValue(res)
yield sleep(5.0)
except ConnectionRefusedError as ce:
if hb is not None and hb.running:
hb.stop()
log.msg("ConnectionRefusedError: Trying to reconnect... ")
yield sleep(1.0)
def shutdown(result):
IOLoop.current().stop()
d = reconnector(args.auto_shutdown)
d.addCallback(shutdown)
# start the tornado io loop
IOLoop.current().start()
if __name__ == "__main__":
main()
|
|
from copy import deepcopy
from typing import Dict
from keras.layers import Input
from overrides import overrides
from ...common.models import get_submodel
from ...common.params import Params
from ...data.instances.reading_comprehension import McQuestionPassageInstance
from ...layers.attention import Attention
from ...layers.backend import Envelope
from ...layers.backend import Multiply
from ...layers.wrappers import EncoderWrapper
from ...layers.wrappers import TimeDistributedWithMask
from ...models.reading_comprehension import BidirectionalAttentionFlow
from ...training.models import DeepQaModel
from ...training.text_trainer import TextTrainer
class MultipleChoiceBidaf(TextTrainer):
"""
This class extends Minjoon Seo's `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_,
which was originally applied to predicting spans from a passage, to answering multiple choice
questions.
The approach we're going to take here is to load a BiDAF model directly (literally taking all
of the parameters we need to construct the ``BidirectionalAttentionFlow`` model class),
applying it to a question and passage, and then adding a few layers on top to try to match the
predicted span to the answer options we have.
To match the predicted span to the answer options, we'll first constructed a weighted
representation of the passage, weighted by the likelihood of each word in the passage being a
part of the span. Then we'll compare that representation to a representation for each answer
option.
Input:
- a passage of shape ``(batch_size, num_passage_words)``
- a question of shape ``(batch_size, num_question_words)``
- a set of answer options of shape ``(batch_size, num_options, num_option_words)``
Output:
- a probability distribution over the answer options, of shape ``(batch_size, num_options)``
Parameters
----------
bidaf_params : Dict[str, Any]
These parameters get passed to a
:class:`~deep_qa.models.reading_comprehension.bidirectional_attention.BidirectionalAttentionFlow`
object, which we load. They should be exactly the same as the parameters used to train the
saved model. There is one parameter that must be consistent across the contained BiDAF
model and this ``TextTrainer`` object, so we copy that parameter from the BiDAF params,
overwriting any parameters that you set for this ``MultipleChoiceBidaf`` model. This
parameter is "tokenizer".
train_bidaf : bool, optional (default=``False``)
Should we optimize the weights in the contained BiDAF model, or just the weights that we
define here? TODO(matt): setting this to ``True`` is currently incompatible with saving
and loading the ``MultipleChoiceBidaf`` model. Getting that to work is not a high
priority, as we're assuming you have far less multiple choice data, so you want a smaller
model, anyway.
num_options : int, optional (default=``None``)
For padding. How many options should we pad the data to? If ``None``, this is set from
the data.
num_option_words : int, optional (default=``None``)
For padding. How many words are in each answer option? If ``None``, this is set from
the data.
similarity_function : Dict[str, Any], optional (default={'type': 'bilinear'})
This is the similarity function used to compare an encoded span representation with encoded
option representations. These parameters get passed to a similarity function (see
:mod:`deep_qa.tensors.similarity_functions` for more info on what's acceptable). The
default similarity function with no parameters is a set of linear weights on the
concatenated inputs. Note that the inputs to this similarity function will have `different
sizes`, so the set of functions you can use is constrained (i.e., no dot product, etc.).
Also note that you almost certainly want to have some kind of bilinear interaction, or
linear with a hidden layer, or something, because fundamentally we want to say whether two
vectors are close in some projected space, which can't really be captured by a simple
linear similarity function.
Notes
-----
Porting the code to Keras 2 made this break for some reason that I haven't been able to figure
out yet. I told py.test to skip the test we had for this, so I'm moving it to ``contrib``
until such time as I get the test to actually pass.
"""
# pylint: disable=protected-access
def __init__(self, params: Params):
bidaf_params = params.pop('bidaf_params')
params['tokenizer'] = deepcopy(bidaf_params.get('tokenizer', {}))
self._bidaf_model = BidirectionalAttentionFlow(bidaf_params)
self._bidaf_model.load_model()
self.train_bidaf = params.pop('train_bidaf', False)
self.num_options = params.pop('num_options', None)
self.num_option_words = params.pop('num_option_words', None)
self.similarity_function_params = params.pop('similarity_function', {'type': 'bilinear'})
super(MultipleChoiceBidaf, self).__init__(params)
self.data_indexer = self._bidaf_model.data_indexer
# We need to not add any more words to the vocabulary, or the model will crash, because
# we're using the same embedding layer as BiDAF. So we finalize the data indexer, which
# will give us some warnings when we try to fit the indexer to the training data, but won't
# actually add anything. Also note that this has to happen _after_ we call the superclass
# constructor, or self.data_indexer will get overwritten. TODO(matt): make it so you can
# expand the embedding size after the fact in a loaded model (though that seems really hard
# to do correctly, especially in this setting where we're working directly with a loaded
# Keras model). An alternative would be to have our own embedding layer that's initialized
# from BiDAF's, use that, then use BiDAF for the phrase layer... Either way is pretty
# complicated.
self.data_indexer.finalize()
@overrides
def _build_model(self):
"""
Our basic outline here will be to run the BiDAF model on the question and the passage, then
compute an envelope over the passage for what words BiDAF thought were in the answer span.
Then we'll weight the BiDAF passage, and use the BiDAF encoder to encode the answer
options. Then we'll have a simple similarity function on top to score the similarity
between each answer option and the predicted answer span.
Getting the right stuff out of the BiDAF model is a little tricky. We're going to use the
same approach as done in :meth:`TextTrainer._build_debug_model
<deep_qa.training.trainer.TextTrainer._build_debug_model>`: we won't modify the model at
all, but we'll construct a new model that just changes the outputs to be various layers of
the original model.
"""
question_shape = self._bidaf_model._get_sentence_shape(self._bidaf_model.num_question_words)
question_input = Input(shape=question_shape, dtype='int32', name="question_input")
passage_shape = self._bidaf_model._get_sentence_shape(self._bidaf_model.num_passage_words)
passage_input = Input(shape=passage_shape, dtype='int32', name="passage_input")
options_shape = (self.num_options,) + self._bidaf_model._get_sentence_shape(self.num_option_words)
options_input = Input(shape=options_shape, dtype='int32', name='options_input')
# First we compute a span envelope over the passage, then multiply that by the passage
# representation.
bidaf_passage_model = get_submodel(self._bidaf_model.model,
['question_input', 'passage_input'],
['final_merged_passage', 'span_begin_softmax', 'span_end_softmax'],
train_model=self.train_bidaf,
name="passage_model")
modeled_passage, span_begin, span_end = bidaf_passage_model([question_input, passage_input])
envelope = Envelope()([span_begin, span_end])
weighted_passage = Multiply()([modeled_passage, envelope])
# Then we encode the answer options the same way we encoded the question.
bidaf_question_model = get_submodel(self._bidaf_model.model,
['question_input'],
['phrase_encoder'],
train_model=self.train_bidaf,
name="phrase_encoder_model")
# Total hack to make this compatible with TimeDistributedWithMask. Ok, ok, python's duck
# typing is kind of nice sometimes... At least I can get this to work, even though it's
# not supported in Keras.
bidaf_question_model.get_output_mask_shape_for = self.bidaf_question_model_mask_shape
embedded_options = TimeDistributedWithMask(bidaf_question_model, keep_dims=True)(options_input)
# Then we compare the weighted passage to each of the encoded options, and get a
# distribution over answer options. We'll use an encoder to get a single vector for the
# passage and for each answer option, then do an "attention" to get a distribution over
# answer options. We can think of doing other similarity computations (e.g., a
# decomposable attention) later.
passage_encoder = self._get_encoder(name="similarity", fallback_behavior="use default params")
option_encoder = EncoderWrapper(passage_encoder)
encoded_passage = passage_encoder(weighted_passage)
encoded_options = option_encoder(embedded_options)
attention_layer = Attention(**deepcopy(self.similarity_function_params).as_dict())
option_scores = attention_layer([encoded_passage, encoded_options])
return DeepQaModel(inputs=[question_input, passage_input, options_input],
outputs=option_scores)
@staticmethod
def bidaf_question_model_mask_shape(input_shape):
return input_shape[:-1]
@overrides
def _instance_type(self): # pylint: disable=no-self-use
return McQuestionPassageInstance
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
padding_lengths = self._bidaf_model.get_padding_lengths()
padding_lengths['num_options'] = self.num_options
padding_lengths['num_option_words'] = self.num_option_words
return padding_lengths
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
self._bidaf_model._set_padding_lengths(padding_lengths)
self.num_options = padding_lengths['num_options']
self.num_option_words = padding_lengths['num_option_words']
@overrides
def _set_padding_lengths_from_model(self):
self._bidaf_model._set_padding_lengths_from_model()
options_input_shape = self.model.get_input_shape_at(0)[2]
self.num_options = options_input_shape[1]
self.num_option_words = options_input_shape[2]
@classmethod
def _get_custom_objects(cls):
custom_objects = BidirectionalAttentionFlow._get_custom_objects()
custom_objects['Attention'] = Attention
custom_objects['EncoderWrapper'] = EncoderWrapper
custom_objects['Envelope'] = Envelope
custom_objects['Multiply'] = Multiply
custom_objects['TimeDistributedWithMask'] = TimeDistributedWithMask
# Above, in `_build_model`, we do a total hack to make the partial BiDAF model compatible
# with TimeDistributedWithMask. We need a similar hack here, because we need Keras to have
# this hacked `compute_output_mask_for` method when it loads the model from a config. This
# is really brittle...
class DeepQaModelWithOutputMaskFunction(DeepQaModel):
def get_output_mask_shape_for(self, input_shape): # pylint: disable=no-self-use
return input_shape[:-1]
custom_objects['DeepQaModel'] = DeepQaModelWithOutputMaskFunction
return custom_objects
|
|
from datetime import date, datetime, time, tzinfo
from django.test import SimpleTestCase, override_settings
from django.test.utils import TZ_SUPPORT, requires_tz_support
from django.utils import dateformat, translation
from django.utils.dateformat import format
from django.utils.timezone import (
get_default_timezone, get_fixed_timezone, make_aware, utc,
)
@override_settings(TIME_ZONE='Europe/Copenhagen')
class DateFormatTests(SimpleTestCase):
def setUp(self):
self._orig_lang = translation.get_language()
translation.activate('en-us')
def tearDown(self):
translation.activate(self._orig_lang)
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16, 5, 30, 30)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt)
def test_naive_ambiguous_datetime(self):
# dt is ambiguous in Europe/Copenhagen. pytz raises an exception for
# the ambiguity, which results in an empty string.
dt = datetime(2015, 10, 25, 2, 30, 0)
# Try all formatters that involve self.timezone.
self.assertEqual(format(dt, 'I'), '')
self.assertEqual(format(dt, 'O'), '')
self.assertEqual(format(dt, 'T'), '')
self.assertEqual(format(dt, 'Z'), '')
@requires_tz_support
def test_datetime_with_local_tzinfo(self):
ltz = get_default_timezone()
dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
@requires_tz_support
def test_datetime_with_tzinfo(self):
tz = get_fixed_timezone(-510)
ltz = get_default_timezone()
dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
# astimezone() is safe here because the target timezone doesn't have DST
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).timetuple(), dt.astimezone(tz).timetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).timetuple(), dt.astimezone(ltz).timetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
morning = time(7, 00)
evening = time(19, 00)
self.assertEqual(dateformat.format(morning, 'a'), 'a.m.')
self.assertEqual(dateformat.format(evening, 'a'), 'p.m.')
self.assertEqual(dateformat.format(morning, 'A'), 'AM')
self.assertEqual(dateformat.format(evening, 'A'), 'PM')
def test_microsecond(self):
# Regression test for #18951
dt = datetime(2009, 5, 16, microsecond=123)
self.assertEqual(dateformat.format(dt, 'u'), '000123')
def test_date_formats(self):
# Specifiers 'I', 'r', and 'U' are covered in test_timezones().
my_birthday = datetime(1979, 7, 8, 22, 00)
for specifier, expected in [
('b', 'jul'),
('d', '08'),
('D', 'Sun'),
('E', 'July'),
('F', 'July'),
('j', '8'),
('l', 'Sunday'),
('L', 'False'),
('m', '07'),
('M', 'Jul'),
('n', '7'),
('N', 'July'),
('o', '1979'),
('S', 'th'),
('t', '31'),
('w', '0'),
('W', '27'),
('y', '79'),
('Y', '1979'),
('z', '189'),
]:
with self.subTest(specifier=specifier):
self.assertEqual(dateformat.format(my_birthday, specifier), expected)
def test_date_formats_c_format(self):
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
def test_time_formats(self):
# Specifiers 'I', 'r', and 'U' are covered in test_timezones().
my_birthday = datetime(1979, 7, 8, 22, 00)
for specifier, expected in [
('a', 'p.m.'),
('A', 'PM'),
('f', '10'),
('g', '10'),
('G', '22'),
('h', '10'),
('H', '22'),
('i', '00'),
('P', '10 p.m.'),
('s', '00'),
('u', '000000'),
]:
with self.subTest(specifier=specifier):
self.assertEqual(dateformat.format(my_birthday, specifier), expected)
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_day_of_year_leap(self):
self.assertEqual(dateformat.format(datetime(2000, 12, 31), 'z'), '366')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
noon = time(12, 0, 0)
# 3h30m to the west of UTC
tz = get_fixed_timezone(-210)
aware_dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
if TZ_SUPPORT:
for specifier, expected in [
('e', ''),
('O', '+0100'),
('r', 'Sun, 08 Jul 1979 22:00:00 +0100'),
('T', 'CET'),
('U', '300315600'),
('Z', '3600'),
]:
with self.subTest(specifier=specifier):
self.assertEqual(dateformat.format(my_birthday, specifier), expected)
self.assertEqual(dateformat.format(aware_dt, 'e'), '-0330')
self.assertEqual(
dateformat.format(aware_dt, 'r'),
'Sat, 16 May 2009 05:30:30 -0330',
)
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
for specifier in ['e', 'O', 'T', 'Z']:
with self.subTest(specifier=specifier):
self.assertEqual(dateformat.time_format(noon, specifier), '')
# Ticket #16924 -- We don't need timezone support to test this
self.assertEqual(dateformat.format(aware_dt, 'O'), '-0330')
def test_invalid_time_format_specifiers(self):
my_birthday = date(1984, 8, 7)
for specifier in ['a', 'A', 'f', 'g', 'G', 'h', 'H', 'i', 'P', 'r', 's', 'u']:
with self.subTest(specifier=specifier):
msg = (
'The format for date objects may not contain time-related '
f'format specifiers (found {specifier!r}).'
)
with self.assertRaisesMessage(TypeError, msg):
dateformat.format(my_birthday, specifier)
@requires_tz_support
def test_e_format_with_named_time_zone(self):
dt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(dateformat.format(dt, 'e'), 'UTC')
@requires_tz_support
def test_e_format_with_time_zone_with_unimplemented_tzname(self):
class NoNameTZ(tzinfo):
"""Time zone without .tzname() defined."""
def utcoffset(self, dt):
return None
dt = datetime(1970, 1, 1, tzinfo=NoNameTZ())
self.assertEqual(dateformat.format(dt, 'e'), '')
def test_P_format(self):
for expected, t in [
('midnight', time(0)),
('noon', time(12)),
('4 a.m.', time(4)),
('8:30 a.m.', time(8, 30)),
('4 p.m.', time(16)),
('8:30 p.m.', time(20, 30)),
]:
with self.subTest(time=t):
self.assertEqual(dateformat.time_format(t, 'P'), expected)
def test_r_format_with_non_en_locale(self):
# Changing the locale doesn't change the "r" format.
dt = datetime(1979, 7, 8, 22, 00)
with translation.override('fr'):
self.assertEqual(
dateformat.format(dt, 'r'),
'Sun, 08 Jul 1979 22:00:00 +0100',
)
def test_S_format(self):
for expected, days in [
('st', [1, 21, 31]),
('nd', [2, 22]),
('rd', [3, 23]),
('th', (n for n in range(4, 31) if n not in [21, 22, 23])),
]:
for day in days:
dt = date(1970, 1, day)
with self.subTest(day=day):
self.assertEqual(dateformat.format(dt, 'S'), expected)
def test_y_format_year_before_1000(self):
tests = [
(476, '76'),
(42, '42'),
(4, '04'),
]
for year, expected_date in tests:
with self.subTest(year=year):
self.assertEqual(
dateformat.format(datetime(year, 9, 8, 5, 0), 'y'),
expected_date,
)
def test_Y_format_year_before_1000(self):
self.assertEqual(dateformat.format(datetime(1, 1, 1), 'Y'), '0001')
self.assertEqual(dateformat.format(datetime(999, 1, 1), 'Y'), '0999')
def test_twelve_hour_format(self):
tests = [
(0, '12', '12'),
(1, '1', '01'),
(11, '11', '11'),
(12, '12', '12'),
(13, '1', '01'),
(23, '11', '11'),
]
for hour, g_expected, h_expected in tests:
dt = datetime(2000, 1, 1, hour)
with self.subTest(hour=hour):
self.assertEqual(dateformat.format(dt, 'g'), g_expected)
self.assertEqual(dateformat.format(dt, 'h'), h_expected)
|
|
"""Utilities for input validation"""
# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)
# License: BSD 3
import warnings
import numbers
import numpy as np
from scipy import sparse
from .fixes import safe_copy
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Array contains NaN or infinity.")
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X)
def safe_asarray(X, dtype=None, order=None):
"""Convert X to an array or sparse matrix.
Prevents copying X when possible; sparse matrices are passed through."""
if sparse.issparse(X):
assert_all_finite(X.data)
else:
X = np.asarray(X, dtype, order)
assert_all_finite(X)
return X
def as_float_array(X, copy=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sparse.issparse(X)):
return safe_asarray(X, dtype=np.float64)
elif sparse.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def array2d(X, dtype=None, order=None, copy=False):
"""Returns at least 2-d array with data from X"""
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = safe_copy(X_2d)
return X_2d
def _atleast2d_or_sparse(X, dtype, order, copy, sparse_class, convmethod):
if sparse.issparse(X):
# Note: order is ignored because CSR matrices hold data in 1-d arrays
if dtype is None or X.dtype == dtype:
X = getattr(X, convmethod)()
else:
X = sparse_class(X, dtype=dtype)
_assert_all_finite(X.data)
else:
X = array2d(X, dtype=dtype, order=order, copy=copy)
_assert_all_finite(X)
return X
def atleast2d_or_csc(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSC format.
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,
"tocsc")
def atleast2d_or_csr(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csr_matrix,
"tocsr")
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def check_arrays(*arrays, **options):
"""Checked that all arrays have consistent first dimensions.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
sparse_format : 'csr', 'csc' or 'dense', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
If 'dense', an error is raised when a sparse array is
passed.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
check_ccontiguous : boolean, False by default
Check that the arrays are C contiguous
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
allow_lists : bool
Allow lists of arbitrary objects as input, just check their length.
Not possible together with 'check_ccontiguous' or 'dtype'.
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc', 'dense'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
check_ccontiguous = options.pop('check_ccontiguous', False)
dtype = options.pop('dtype', None)
allow_lists = options.pop('allow_lists', False)
if options:
raise TypeError("Unexpected keyword arguments: %r" % options.keys())
if len(arrays) == 0:
return None
n_samples = _num_samples(arrays[0])
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
size = _num_samples(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d"
% (size, n_samples))
if sparse.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
elif sparse_format == 'dense':
raise TypeError('A sparse matrix was passed, but dense data is'
' required. Use X.todense() to convert to'
' dense.')
if check_ccontiguous:
array.data = np.ascontiguousarray(array.data, dtype=dtype)
else:
array.data = np.asarray(array.data, dtype=dtype)
_assert_all_finite(array.data)
else:
if not allow_lists:
if check_ccontiguous:
array = np.ascontiguousarray(array, dtype=dtype)
else:
array = np.asarray(array, dtype=dtype)
_assert_all_finite(array)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point"""
if not isinstance(estimator, basestring):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
|
# swift_build_support/cmake.py - Detect host machine's CMake -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Find the path to a CMake executable on the host machine.
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import
import os
import platform
import re
from numbers import Number
from . import shell
class CMakeOptions(object):
"""List like object used to define cmake options
"""
def __init__(self, initial_options=None):
self._options = []
if initial_options is not None:
self.extend(initial_options)
def define(self, var, value):
"""Utility to define cmake options in this object.
opts.define("FOO", "BAR") # -> -DFOO=BAR
opts.define("FLAG:BOOL", True) # -> -FLAG:BOOL=TRUE
"""
if var.endswith(':BOOL') or isinstance(value, bool):
value = self.true_false(value)
if value is None:
value = ""
elif not isinstance(value, (str, Number)):
raise ValueError('define: invalid value for key %s: %s (%s)' %
(var, value, type(value)))
self._options.append('-D%s=%s' % (var, value))
def extend(self, tuples_or_options):
if isinstance(tuples_or_options, CMakeOptions):
self += tuples_or_options
else:
for (variable, value) in tuples_or_options:
self.define(variable, value)
@staticmethod
def true_false(value):
if hasattr(value, 'lower'):
value = value.lower()
if value in [True, 1, 'true', 'yes', '1']:
return 'TRUE'
if value in [False, 0, 'false', 'no', '0']:
return 'FALSE'
raise ValueError("true_false: invalid value: %s" % value)
def __len__(self):
return self._options.__len__()
def __iter__(self):
return self._options.__iter__()
def __contains__(self, item):
return self._options.__contains__(item)
def __add__(self, other):
ret = CMakeOptions()
ret._options += self._options
ret._options += list(other)
return ret
def __iadd__(self, other):
self._options += list(other)
return self
class CMake(object):
def __init__(self, args, toolchain):
self.args = args
self.toolchain = toolchain
def common_options(self):
"""Return options used for all products, including LLVM/Clang
"""
args = self.args
toolchain = self.toolchain
options = CMakeOptions()
define = options.define
options += ['-G', args.cmake_generator]
sanitizers = []
if args.enable_asan:
sanitizers.append('Address')
if args.enable_ubsan:
sanitizers.append('Undefined')
if args.enable_tsan:
sanitizers.append('Thread')
if args.enable_lsan:
sanitizers.append('Leaks')
if sanitizers:
define("LLVM_USE_SANITIZER", ";".join(sanitizers))
if args.enable_sanitize_coverage:
define("LLVM_USE_SANITIZE_COVERAGE", "ON")
if args.export_compile_commands:
define("CMAKE_EXPORT_COMPILE_COMMANDS", "ON")
if args.distcc:
define("CMAKE_C_COMPILER_LAUNCHER:PATH", toolchain.distcc)
define("CMAKE_CXX_COMPILER_LAUNCHER:PATH", toolchain.distcc)
if args.cmake_c_launcher:
define("CMAKE_C_COMPILER_LAUNCHER:PATH", args.cmake_c_launcher)
if args.cmake_cxx_launcher:
define("CMAKE_CXX_COMPILER_LAUNCHER:PATH", args.cmake_cxx_launcher)
define("CMAKE_C_COMPILER:PATH", toolchain.cc)
define("CMAKE_CXX_COMPILER:PATH", toolchain.cxx)
define("CMAKE_LIBTOOL:PATH", toolchain.libtool)
if args.cmake_generator == 'Xcode':
define("CMAKE_CONFIGURATION_TYPES",
"Debug;Release;MinSizeRel;RelWithDebInfo")
if args.clang_user_visible_version:
major, minor, patch = \
args.clang_user_visible_version.components[0:3]
define("LLVM_VERSION_MAJOR:STRING", major)
define("LLVM_VERSION_MINOR:STRING", minor)
define("LLVM_VERSION_PATCH:STRING", patch)
define("CLANG_VERSION_MAJOR:STRING", major)
define("CLANG_VERSION_MINOR:STRING", minor)
define("CLANG_VERSION_PATCH:STRING", patch)
if args.build_ninja and args.cmake_generator == 'Ninja':
define('CMAKE_MAKE_PROGRAM', toolchain.ninja)
elif args.cmake_generator == 'Ninja' and toolchain.ninja is not None:
define('CMAKE_MAKE_PROGRAM', toolchain.ninja)
return options
def build_args(self):
"""Return arguments to the build tool used for all products
"""
args = self.args
toolchain = self.toolchain
jobs = args.build_jobs
if args.distcc:
jobs = shell.capture([toolchain.distcc, '-j'],
dry_run=False, echo=False).rstrip()
build_args = list(args.build_args)
if args.cmake_generator == 'Ninja':
build_args += ['-j%s' % jobs]
if args.verbose_build:
build_args += ['-v']
elif args.cmake_generator == 'Unix Makefiles':
build_args += ['-j%s' % jobs]
if args.verbose_build:
build_args += ['VERBOSE=1']
elif args.cmake_generator == 'Xcode':
build_args += ['-parallelizeTargets',
'-jobs', str(jobs)]
return build_args
# Determine the version of the installed CMake binary.
def installed_cmake_version(self, cmake_binary):
version = shell.capture([cmake_binary, '--version'], dry_run=False,
echo=True, optional=True)
(c_major, c_minor, c_patch) = (0, 0, 0)
if version is not None:
x = re.findall(r'cmake version (\d+)\.(\d+)\.(\d+)',
version.rstrip())
if len(x) == 1:
(c_major, c_minor, c_patch) = map(int, x[0])
return (c_major, c_minor, c_patch)
# Determine the version of the checked out CMake source.
def cmake_source_version(self, cmake_source_dir):
cmake_version_file = os.path.join(cmake_source_dir, 'Source',
'CMakeVersion.cmake')
major = -1
minor = -1
patch = -1
file = open(cmake_version_file, "r")
for line in file.readlines():
m = re.findall(r'set\(CMake_VERSION_MAJOR (\d+)\)', line)
if len(m) == 1:
major = int(m[0])
continue
m = re.findall(r'set\(CMake_VERSION_MINOR (\d+)\)', line)
if len(m) == 1:
minor = int(m[0])
continue
m = re.findall(r'set\(CMake_VERSION_PATCH (\d+)\)', line)
if len(m) == 1:
patch = int(m[0])
continue
if major == -1 or minor == -1 or patch == -1:
raise RuntimeError("Cant determine CMake version from %s"
% cmake_version_file)
return (major, minor, patch)
# Build CMake from source.
def build_cmake(self, source_root, build_root):
cmake_bootstrap = os.path.join(source_root, 'cmake', 'bootstrap')
if hasattr(self.args, 'build_script_impl_args'):
for opt in self.args.build_script_impl_args:
m = re.findall('--build-dir=(.*)', opt)
if len(m) == 1:
build_root = m[0]
cmake_build_dir = os.path.join(build_root, 'cmake-%s' %
self.args.host_target)
if not os.path.isdir(cmake_build_dir):
os.makedirs(cmake_build_dir)
cwd = os.getcwd()
os.chdir(cmake_build_dir)
shell.call_without_sleeping([cmake_bootstrap], echo=True)
shell.call_without_sleeping(['make', '-j%s' % self.args.build_jobs],
echo=True)
os.chdir(cwd)
return os.path.join(cmake_build_dir, 'bin', 'cmake')
# For Linux only, determine the version of the installed CMake compared to
# the source and build the source if necessary. Returns the path to the
# cmake binary.
def check_cmake_version(self, source_root, build_root):
if platform.system() != 'Linux':
return
cmake_source_dir = os.path.join(source_root, 'cmake')
# If the source is not checked out then don't attempt to build cmake.
if not os.path.isdir(cmake_source_dir):
return
cmake_binary = 'cmake'
try:
if self.args.cmake is not None:
cmake_binary = self.args.cmake
except AttributeError:
cmake_binary = 'cmake'
installed_ver = self.installed_cmake_version(cmake_binary)
if installed_ver > self.cmake_source_version(cmake_source_dir):
return
else:
# Build CMake from source and return the path to the executable.
return self.build_cmake(source_root, build_root)
|
|
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.common import utils
from neutron.common import exceptions
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
# some test data for get_vif_port_to_ofport_map that exhibited bug 1444269
OVSLIST_WITH_UNSET_PORT = (
'{"data":[["patch-tun",["map",[]],1],["tap2ab72a72-44",["map",[["attached-'
'mac","fa:16:3e:b0:f8:38"],["iface-id","2ab72a72-4407-4ef3-806a-b2172f3e4d'
'c7"],["iface-status","active"]]],2],["tap6b108774-15",["map",[["attached-'
'mac","fa:16:3e:02:f5:91"],["iface-id","6b108774-1559-45e9-a7c3-b714f11722'
'cf"],["iface-status","active"]]],["set",[]]]],"headings":["name","externa'
'l_ids","ofport"]}')
class OFCTLParamListMatcher(object):
def _parse(self, params):
actions_pos = params.find('actions')
return set(params[:actions_pos].split(',')), params[actions_pos:]
def __init__(self, params):
self.expected = self._parse(params)
def __eq__(self, other):
return self.expected == self._parse(other)
def __str__(self):
return 'ovs-ofctl parameters: %s, "%s"' % self.expected
__repr__ = __str__
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.br = ovs_lib.OVSBridge(self.BR_NAME)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
@property
def TO(self):
return "--timeout=%s" % self.br.vsctl_timeout
def _vsctl_args(self, *args):
cmd = ['ovs-vsctl', self.TO, '--oneline', '--format=json', '--']
cmd += args
return cmd
def _vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
return mock.call(cmd, run_as_root=True, log_fail_as_error=False)
def _verify_vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
self.execute.assert_called_once_with(cmd, run_as_root=True,
log_fail_as_error=False)
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = collections.OrderedDict([
('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = collections.OrderedDict([
('priority', 1),
('actions', 'normal')])
flow_dict_3 = collections.OrderedDict([
('priority', 2),
('actions', 'drop')])
flow_dict_4 = collections.OrderedDict([
('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = collections.OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = collections.OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = collections.OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef,"
"actions=strip_vlan,output:0")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=2,"
"in_port=%s,actions=drop" % ofport)),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal" %
(vid, ofport, lsw_id))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=3,"
"tun_id=%s,actions=mod_vlan_vid:%s,"
"output:%s" % (lsw_id, vid, ofport))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=4,"
"nw_src=%s,arp,actions=drop" % cidr)),
]
self.execute.assert_has_calls(expected_calls)
def _ofctl_args(self, cmd, *args):
cmd = ['ovs-ofctl', cmd]
cmd += args
return cmd
def _ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return mock.call(cmd, run_as_root=True, **kwargs)
def _verify_ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return self.execute.assert_called_once_with(cmd, run_as_root=True,
**kwargs)
def test_add_flow_timeout_set(self):
flow_dict = collections.OrderedDict([
('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=1000,idle_timeout=2000,priority=1,"
"actions=normal")
def test_add_flow_default_priority(self):
flow_dict = collections.OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=0,idle_timeout=0,priority=1,"
"actions=normal")
def _test_get_port_ofport(self, ofport, expected_result):
pname = "tap99"
self.br.vsctl_timeout = 0 # Don't waste precious time retrying
self.execute.return_value = self._encode_ovs_json(
['ofport'], [[ofport]])
self.assertEqual(self.br.get_port_ofport(pname), expected_result)
self._verify_vsctl_mock("--columns=ofport", "list", "Interface", pname)
def test_get_port_ofport_succeeds_for_valid_ofport(self):
self._test_get_port_ofport(6, 6)
def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
self._test_get_port_ofport([], ovs_lib.INVALID_OFPORT)
def test_get_port_ofport_returns_invalid_for_invalid(self):
self._test_get_port_ofport(ovs_lib.INVALID_OFPORT,
ovs_lib.INVALID_OFPORT)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="in_port=" + ofport),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="tun_id=%s" % lsw_id),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="dl_vlan=%s" % vid),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(None, retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
command = ["--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_fragmented_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
vxlan_udp_port = "9999"
dont_fragment = False
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=false",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = 6
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
id_field = 'xs-vif-uuid' if is_xen else 'iface-id'
external_ids = {"attached-mac": mac, id_field: vif_id}
self.br.get_ports_attributes = mock.Mock(return_value=[{
'name': pname, 'ofport': ofport, 'external_ids': external_ids}])
self.br.get_xapi_iface_id = mock.Mock(return_value=vif_id)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.br.get_ports_attributes.assert_called_once_with(
'Interface',
columns=['name', 'external_ids', 'ofport'],
if_exists=True)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids', 'ofport']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# Non-vif port on this bridge:
['bogus', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\\ntun22'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", 'tap99', 'tun22'),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_port_to_ofport_map(self):
self.execute.return_value = OVSLIST_WITH_UNSET_PORT
results = self.br.get_vif_port_to_ofport_map()
expected = {'2ab72a72-4407-4ef3-806a-b2172f3e4dc7': 2, 'patch-tun': 1}
self.assertEqual(expected, results)
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\n'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", "tap99"), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME),
'\\n'.join((iface for iface, tag in data))),
(self._vsctl_mock("--columns=name,tag", "list", "Port"),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self._verify_vsctl_mock("clear", "Port", pname, "tag")
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
if exp_timeout:
self.br.vsctl_timeout = exp_timeout
self.execute.return_value = 'br-int'
self.assertEqual(self.br.get_bridge_for_iface(iface), br)
self._verify_vsctl_mock("iface-to-br", iface)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
self.execute.side_effect = Exception
self.assertIsNone(self.br.get_bridge_for_iface(iface))
self._verify_vsctl_mock("iface-to-br", iface)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_bridges_not_default_timeout(self):
bridges = ['br-int', 'br-ex']
self.br.vsctl_timeout = 5
self.execute.return_value = 'br-int\\nbr-ex\n'
self.assertEqual(self.br.get_bridges(), bridges)
self._verify_vsctl_mock("list-br")
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def test_get_vifs_by_ids(self):
db_list_res = [
{'name': 'qvo1', 'ofport': 1,
'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}},
{'name': 'qvo2', 'ofport': 2,
'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}},
{'name': 'qvo4', 'ofport': -1,
'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}},
]
self.br.get_ports_attributes = mock.Mock(return_value=db_list_res)
self.br.ovsdb = mock.Mock()
self.br.ovsdb.list_ports.return_value.execute.return_value = [
'qvo1', 'qvo2', 'qvo4']
by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', 'pid4'])
# pid3 isn't on bridge and pid4 doesn't have a valid ofport
self.assertIsNone(by_id['pid3'])
self.assertIsNone(by_id['pid4'])
self.assertEqual('pid1', by_id['pid1'].vif_id)
self.assertEqual('qvo1', by_id['pid1'].port_name)
self.assertEqual(1, by_id['pid1'].ofport)
self.assertEqual('pid2', by_id['pid2'].vif_id)
self.assertEqual('qvo2', by_id['pid2'].port_name)
self.assertEqual(2, by_id['pid2'].ofport)
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None,
extra_calls_and_values=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("--columns=external_ids,name,ofport", "find",
"Interface",
'external_ids:iface-id=%s' % iface_id,
'external_ids:attached-mac!=""'),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
# Only the last information list in 'data' is used, so if more
# than one vif is described in data, the rest must be declared
# in the argument 'expected_calls_and_values'.
if extra_calls_and_values:
expected_calls_and_values.extend(extra_calls_and_values)
expected_calls_and_values.append(
(self._vsctl_mock("iface-to-br",
data[-1][headings.index('name')]), br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _assert_vif_port(self, vif_port, ofport=None, mac=None):
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port, "Got %s" % vif_port)
return
self.assertEqual('tap99id', vif_port.vif_id)
self.assertEqual(mac, vif_port.vif_mac)
self.assertEqual('tap99', vif_port.port_name)
self.assertEqual(ofport, vif_port.ofport)
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", mac]]
data = [[["map", external_ids], "tap99",
ofport if ofport else ["set", []]]]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
self._assert_vif_port(vif_port, ofport, mac)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def test_get_vif_by_port_id_multiple_vifs(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", "de:ad:be:ef:13:37"]]
data = [[["map", external_ids], "dummytap", 1],
[["map", external_ids], "tap99", 1337]]
extra_calls_and_values = [
(self._vsctl_mock("iface-to-br", "dummytap"), "br-ext")]
vif_port = self._test_get_vif_port_by_id(
'tap99id', data, extra_calls_and_values=extra_calls_and_values)
self._assert_vif_port(vif_port, ofport=1337, mac="de:ad:be:ef:13:37")
class TestDeferredOVSBridge(base.BaseTestCase):
def setUp(self):
super(TestDeferredOVSBridge, self).setUp()
self.br = mock.Mock()
self.mocked_do_action_flows = mock.patch.object(
self.br, 'do_action_flows').start()
self.add_flow_dict1 = dict(in_port=11, actions='drop')
self.add_flow_dict2 = dict(in_port=12, actions='drop')
self.mod_flow_dict1 = dict(in_port=21, actions='drop')
self.mod_flow_dict2 = dict(in_port=22, actions='drop')
self.del_flow_dict1 = dict(in_port=31)
self.del_flow_dict2 = dict(in_port=32)
def test_right_allowed_passthroughs(self):
expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port')
self.assertEqual(expected_passthroughs,
ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS)
def _verify_mock_call(self, expected_calls):
self.mocked_do_action_flows.assert_has_calls(expected_calls)
self.assertEqual(len(expected_calls),
len(self.mocked_do_action_flows.mock_calls))
def test_apply_on_exit(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
self._verify_mock_call(expected_calls)
def test_apply_on_exit_with_errors(self):
try:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
raise Exception()
except Exception:
self._verify_mock_call([])
else:
self.fail('Exception would be reraised')
def test_apply(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
deferred_br.apply_flows()
self._verify_mock_call(expected_calls)
self._verify_mock_call(expected_calls)
def test_apply_order(self):
expected_calls = [
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]),
mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]),
]
order = 'del', 'mod', 'add'
with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_apply_full_ordered(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('add', [self.add_flow_dict2]),
mock.call('mod', [self.mod_flow_dict2]),
]
with ovs_lib.DeferredOVSBridge(self.br,
full_ordered=True) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_getattr_unallowed_attr(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertEqual(self.br.add_port, deferred_br.add_port)
def test_getattr_unallowed_attr_failure(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.